Home | History | Annotate | Download | only in Eigenvalues
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud (at) inria.fr>
      5 // Copyright (C) 2010 Jitse Niesen <jitse (at) maths.leeds.ac.uk>
      6 //
      7 // This Source Code Form is subject to the terms of the Mozilla
      8 // Public License v. 2.0. If a copy of the MPL was not distributed
      9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
     10 
     11 #ifndef EIGEN_SELFADJOINTEIGENSOLVER_H
     12 #define EIGEN_SELFADJOINTEIGENSOLVER_H
     13 
     14 #include "./Tridiagonalization.h"
     15 
     16 namespace Eigen {
     17 
     18 template<typename _MatrixType>
     19 class GeneralizedSelfAdjointEigenSolver;
     20 
     21 namespace internal {
     22 template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues;
     23 template<typename MatrixType, typename DiagType, typename SubDiagType>
     24 ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec);
     25 }
     26 
     27 /** \eigenvalues_module \ingroup Eigenvalues_Module
     28   *
     29   *
     30   * \class SelfAdjointEigenSolver
     31   *
     32   * \brief Computes eigenvalues and eigenvectors of selfadjoint matrices
     33   *
     34   * \tparam _MatrixType the type of the matrix of which we are computing the
     35   * eigendecomposition; this is expected to be an instantiation of the Matrix
     36   * class template.
     37   *
     38   * A matrix \f$ A \f$ is selfadjoint if it equals its adjoint. For real
     39   * matrices, this means that the matrix is symmetric: it equals its
     40   * transpose. This class computes the eigenvalues and eigenvectors of a
     41   * selfadjoint matrix. These are the scalars \f$ \lambda \f$ and vectors
     42   * \f$ v \f$ such that \f$ Av = \lambda v \f$.  The eigenvalues of a
     43   * selfadjoint matrix are always real. If \f$ D \f$ is a diagonal matrix with
     44   * the eigenvalues on the diagonal, and \f$ V \f$ is a matrix with the
     45   * eigenvectors as its columns, then \f$ A = V D V^{-1} \f$ (for selfadjoint
     46   * matrices, the matrix \f$ V \f$ is always invertible). This is called the
     47   * eigendecomposition.
     48   *
     49   * The algorithm exploits the fact that the matrix is selfadjoint, making it
     50   * faster and more accurate than the general purpose eigenvalue algorithms
     51   * implemented in EigenSolver and ComplexEigenSolver.
     52   *
     53   * Only the \b lower \b triangular \b part of the input matrix is referenced.
     54   *
     55   * Call the function compute() to compute the eigenvalues and eigenvectors of
     56   * a given matrix. Alternatively, you can use the
     57   * SelfAdjointEigenSolver(const MatrixType&, int) constructor which computes
     58   * the eigenvalues and eigenvectors at construction time. Once the eigenvalue
     59   * and eigenvectors are computed, they can be retrieved with the eigenvalues()
     60   * and eigenvectors() functions.
     61   *
     62   * The documentation for SelfAdjointEigenSolver(const MatrixType&, int)
     63   * contains an example of the typical use of this class.
     64   *
     65   * To solve the \em generalized eigenvalue problem \f$ Av = \lambda Bv \f$ and
     66   * the likes, see the class GeneralizedSelfAdjointEigenSolver.
     67   *
     68   * \sa MatrixBase::eigenvalues(), class EigenSolver, class ComplexEigenSolver
     69   */
     70 template<typename _MatrixType> class SelfAdjointEigenSolver
     71 {
     72   public:
     73 
     74     typedef _MatrixType MatrixType;
     75     enum {
     76       Size = MatrixType::RowsAtCompileTime,
     77       ColsAtCompileTime = MatrixType::ColsAtCompileTime,
     78       Options = MatrixType::Options,
     79       MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
     80     };
     81 
     82     /** \brief Scalar type for matrices of type \p _MatrixType. */
     83     typedef typename MatrixType::Scalar Scalar;
     84     typedef Eigen::Index Index; ///< \deprecated since Eigen 3.3
     85 
     86     typedef Matrix<Scalar,Size,Size,ColMajor,MaxColsAtCompileTime,MaxColsAtCompileTime> EigenvectorsType;
     87 
     88     /** \brief Real scalar type for \p _MatrixType.
     89       *
     90       * This is just \c Scalar if #Scalar is real (e.g., \c float or
     91       * \c double), and the type of the real part of \c Scalar if #Scalar is
     92       * complex.
     93       */
     94     typedef typename NumTraits<Scalar>::Real RealScalar;
     95 
     96     friend struct internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>;
     97 
     98     /** \brief Type for vector of eigenvalues as returned by eigenvalues().
     99       *
    100       * This is a column vector with entries of type #RealScalar.
    101       * The length of the vector is the size of \p _MatrixType.
    102       */
    103     typedef typename internal::plain_col_type<MatrixType, RealScalar>::type RealVectorType;
    104     typedef Tridiagonalization<MatrixType> TridiagonalizationType;
    105     typedef typename TridiagonalizationType::SubDiagonalType SubDiagonalType;
    106 
    107     /** \brief Default constructor for fixed-size matrices.
    108       *
    109       * The default constructor is useful in cases in which the user intends to
    110       * perform decompositions via compute(). This constructor
    111       * can only be used if \p _MatrixType is a fixed-size matrix; use
    112       * SelfAdjointEigenSolver(Index) for dynamic-size matrices.
    113       *
    114       * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver.cpp
    115       * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver.out
    116       */
    117     EIGEN_DEVICE_FUNC
    118     SelfAdjointEigenSolver()
    119         : m_eivec(),
    120           m_eivalues(),
    121           m_subdiag(),
    122           m_isInitialized(false)
    123     { }
    124 
    125     /** \brief Constructor, pre-allocates memory for dynamic-size matrices.
    126       *
    127       * \param [in]  size  Positive integer, size of the matrix whose
    128       * eigenvalues and eigenvectors will be computed.
    129       *
    130       * This constructor is useful for dynamic-size matrices, when the user
    131       * intends to perform decompositions via compute(). The \p size
    132       * parameter is only used as a hint. It is not an error to give a wrong
    133       * \p size, but it may impair performance.
    134       *
    135       * \sa compute() for an example
    136       */
    137     EIGEN_DEVICE_FUNC
    138     explicit SelfAdjointEigenSolver(Index size)
    139         : m_eivec(size, size),
    140           m_eivalues(size),
    141           m_subdiag(size > 1 ? size - 1 : 1),
    142           m_isInitialized(false)
    143     {}
    144 
    145     /** \brief Constructor; computes eigendecomposition of given matrix.
    146       *
    147       * \param[in]  matrix  Selfadjoint matrix whose eigendecomposition is to
    148       *    be computed. Only the lower triangular part of the matrix is referenced.
    149       * \param[in]  options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
    150       *
    151       * This constructor calls compute(const MatrixType&, int) to compute the
    152       * eigenvalues of the matrix \p matrix. The eigenvectors are computed if
    153       * \p options equals #ComputeEigenvectors.
    154       *
    155       * Example: \include SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.cpp
    156       * Output: \verbinclude SelfAdjointEigenSolver_SelfAdjointEigenSolver_MatrixType.out
    157       *
    158       * \sa compute(const MatrixType&, int)
    159       */
    160     template<typename InputType>
    161     EIGEN_DEVICE_FUNC
    162     explicit SelfAdjointEigenSolver(const EigenBase<InputType>& matrix, int options = ComputeEigenvectors)
    163       : m_eivec(matrix.rows(), matrix.cols()),
    164         m_eivalues(matrix.cols()),
    165         m_subdiag(matrix.rows() > 1 ? matrix.rows() - 1 : 1),
    166         m_isInitialized(false)
    167     {
    168       compute(matrix.derived(), options);
    169     }
    170 
    171     /** \brief Computes eigendecomposition of given matrix.
    172       *
    173       * \param[in]  matrix  Selfadjoint matrix whose eigendecomposition is to
    174       *    be computed. Only the lower triangular part of the matrix is referenced.
    175       * \param[in]  options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
    176       * \returns    Reference to \c *this
    177       *
    178       * This function computes the eigenvalues of \p matrix.  The eigenvalues()
    179       * function can be used to retrieve them.  If \p options equals #ComputeEigenvectors,
    180       * then the eigenvectors are also computed and can be retrieved by
    181       * calling eigenvectors().
    182       *
    183       * This implementation uses a symmetric QR algorithm. The matrix is first
    184       * reduced to tridiagonal form using the Tridiagonalization class. The
    185       * tridiagonal matrix is then brought to diagonal form with implicit
    186       * symmetric QR steps with Wilkinson shift. Details can be found in
    187       * Section 8.3 of Golub \& Van Loan, <i>%Matrix Computations</i>.
    188       *
    189       * The cost of the computation is about \f$ 9n^3 \f$ if the eigenvectors
    190       * are required and \f$ 4n^3/3 \f$ if they are not required.
    191       *
    192       * This method reuses the memory in the SelfAdjointEigenSolver object that
    193       * was allocated when the object was constructed, if the size of the
    194       * matrix does not change.
    195       *
    196       * Example: \include SelfAdjointEigenSolver_compute_MatrixType.cpp
    197       * Output: \verbinclude SelfAdjointEigenSolver_compute_MatrixType.out
    198       *
    199       * \sa SelfAdjointEigenSolver(const MatrixType&, int)
    200       */
    201     template<typename InputType>
    202     EIGEN_DEVICE_FUNC
    203     SelfAdjointEigenSolver& compute(const EigenBase<InputType>& matrix, int options = ComputeEigenvectors);
    204 
    205     /** \brief Computes eigendecomposition of given matrix using a closed-form algorithm
    206       *
    207       * This is a variant of compute(const MatrixType&, int options) which
    208       * directly solves the underlying polynomial equation.
    209       *
    210       * Currently only 2x2 and 3x3 matrices for which the sizes are known at compile time are supported (e.g., Matrix3d).
    211       *
    212       * This method is usually significantly faster than the QR iterative algorithm
    213       * but it might also be less accurate. It is also worth noting that
    214       * for 3x3 matrices it involves trigonometric operations which are
    215       * not necessarily available for all scalar types.
    216       *
    217       * For the 3x3 case, we observed the following worst case relative error regarding the eigenvalues:
    218       *   - double: 1e-8
    219       *   - float:  1e-3
    220       *
    221       * \sa compute(const MatrixType&, int options)
    222       */
    223     EIGEN_DEVICE_FUNC
    224     SelfAdjointEigenSolver& computeDirect(const MatrixType& matrix, int options = ComputeEigenvectors);
    225 
    226     /**
    227       *\brief Computes the eigen decomposition from a tridiagonal symmetric matrix
    228       *
    229       * \param[in] diag The vector containing the diagonal of the matrix.
    230       * \param[in] subdiag The subdiagonal of the matrix.
    231       * \param[in] options Can be #ComputeEigenvectors (default) or #EigenvaluesOnly.
    232       * \returns Reference to \c *this
    233       *
    234       * This function assumes that the matrix has been reduced to tridiagonal form.
    235       *
    236       * \sa compute(const MatrixType&, int) for more information
    237       */
    238     SelfAdjointEigenSolver& computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options=ComputeEigenvectors);
    239 
    240     /** \brief Returns the eigenvectors of given matrix.
    241       *
    242       * \returns  A const reference to the matrix whose columns are the eigenvectors.
    243       *
    244       * \pre The eigenvectors have been computed before.
    245       *
    246       * Column \f$ k \f$ of the returned matrix is an eigenvector corresponding
    247       * to eigenvalue number \f$ k \f$ as returned by eigenvalues().  The
    248       * eigenvectors are normalized to have (Euclidean) norm equal to one. If
    249       * this object was used to solve the eigenproblem for the selfadjoint
    250       * matrix \f$ A \f$, then the matrix returned by this function is the
    251       * matrix \f$ V \f$ in the eigendecomposition \f$ A = V D V^{-1} \f$.
    252       *
    253       * Example: \include SelfAdjointEigenSolver_eigenvectors.cpp
    254       * Output: \verbinclude SelfAdjointEigenSolver_eigenvectors.out
    255       *
    256       * \sa eigenvalues()
    257       */
    258     EIGEN_DEVICE_FUNC
    259     const EigenvectorsType& eigenvectors() const
    260     {
    261       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
    262       eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
    263       return m_eivec;
    264     }
    265 
    266     /** \brief Returns the eigenvalues of given matrix.
    267       *
    268       * \returns A const reference to the column vector containing the eigenvalues.
    269       *
    270       * \pre The eigenvalues have been computed before.
    271       *
    272       * The eigenvalues are repeated according to their algebraic multiplicity,
    273       * so there are as many eigenvalues as rows in the matrix. The eigenvalues
    274       * are sorted in increasing order.
    275       *
    276       * Example: \include SelfAdjointEigenSolver_eigenvalues.cpp
    277       * Output: \verbinclude SelfAdjointEigenSolver_eigenvalues.out
    278       *
    279       * \sa eigenvectors(), MatrixBase::eigenvalues()
    280       */
    281     EIGEN_DEVICE_FUNC
    282     const RealVectorType& eigenvalues() const
    283     {
    284       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
    285       return m_eivalues;
    286     }
    287 
    288     /** \brief Computes the positive-definite square root of the matrix.
    289       *
    290       * \returns the positive-definite square root of the matrix
    291       *
    292       * \pre The eigenvalues and eigenvectors of a positive-definite matrix
    293       * have been computed before.
    294       *
    295       * The square root of a positive-definite matrix \f$ A \f$ is the
    296       * positive-definite matrix whose square equals \f$ A \f$. This function
    297       * uses the eigendecomposition \f$ A = V D V^{-1} \f$ to compute the
    298       * square root as \f$ A^{1/2} = V D^{1/2} V^{-1} \f$.
    299       *
    300       * Example: \include SelfAdjointEigenSolver_operatorSqrt.cpp
    301       * Output: \verbinclude SelfAdjointEigenSolver_operatorSqrt.out
    302       *
    303       * \sa operatorInverseSqrt(), <a href="unsupported/group__MatrixFunctions__Module.html">MatrixFunctions Module</a>
    304       */
    305     EIGEN_DEVICE_FUNC
    306     MatrixType operatorSqrt() const
    307     {
    308       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
    309       eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
    310       return m_eivec * m_eivalues.cwiseSqrt().asDiagonal() * m_eivec.adjoint();
    311     }
    312 
    313     /** \brief Computes the inverse square root of the matrix.
    314       *
    315       * \returns the inverse positive-definite square root of the matrix
    316       *
    317       * \pre The eigenvalues and eigenvectors of a positive-definite matrix
    318       * have been computed before.
    319       *
    320       * This function uses the eigendecomposition \f$ A = V D V^{-1} \f$ to
    321       * compute the inverse square root as \f$ V D^{-1/2} V^{-1} \f$. This is
    322       * cheaper than first computing the square root with operatorSqrt() and
    323       * then its inverse with MatrixBase::inverse().
    324       *
    325       * Example: \include SelfAdjointEigenSolver_operatorInverseSqrt.cpp
    326       * Output: \verbinclude SelfAdjointEigenSolver_operatorInverseSqrt.out
    327       *
    328       * \sa operatorSqrt(), MatrixBase::inverse(), <a href="unsupported/group__MatrixFunctions__Module.html">MatrixFunctions Module</a>
    329       */
    330     EIGEN_DEVICE_FUNC
    331     MatrixType operatorInverseSqrt() const
    332     {
    333       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
    334       eigen_assert(m_eigenvectorsOk && "The eigenvectors have not been computed together with the eigenvalues.");
    335       return m_eivec * m_eivalues.cwiseInverse().cwiseSqrt().asDiagonal() * m_eivec.adjoint();
    336     }
    337 
    338     /** \brief Reports whether previous computation was successful.
    339       *
    340       * \returns \c Success if computation was succesful, \c NoConvergence otherwise.
    341       */
    342     EIGEN_DEVICE_FUNC
    343     ComputationInfo info() const
    344     {
    345       eigen_assert(m_isInitialized && "SelfAdjointEigenSolver is not initialized.");
    346       return m_info;
    347     }
    348 
    349     /** \brief Maximum number of iterations.
    350       *
    351       * The algorithm terminates if it does not converge within m_maxIterations * n iterations, where n
    352       * denotes the size of the matrix. This value is currently set to 30 (copied from LAPACK).
    353       */
    354     static const int m_maxIterations = 30;
    355 
    356   protected:
    357     static void check_template_parameters()
    358     {
    359       EIGEN_STATIC_ASSERT_NON_INTEGER(Scalar);
    360     }
    361 
    362     EigenvectorsType m_eivec;
    363     RealVectorType m_eivalues;
    364     typename TridiagonalizationType::SubDiagonalType m_subdiag;
    365     ComputationInfo m_info;
    366     bool m_isInitialized;
    367     bool m_eigenvectorsOk;
    368 };
    369 
    370 namespace internal {
    371 /** \internal
    372   *
    373   * \eigenvalues_module \ingroup Eigenvalues_Module
    374   *
    375   * Performs a QR step on a tridiagonal symmetric matrix represented as a
    376   * pair of two vectors \a diag and \a subdiag.
    377   *
    378   * \param diag the diagonal part of the input selfadjoint tridiagonal matrix
    379   * \param subdiag the sub-diagonal part of the input selfadjoint tridiagonal matrix
    380   * \param start starting index of the submatrix to work on
    381   * \param end last+1 index of the submatrix to work on
    382   * \param matrixQ pointer to the column-major matrix holding the eigenvectors, can be 0
    383   * \param n size of the input matrix
    384   *
    385   * For compilation efficiency reasons, this procedure does not use eigen expression
    386   * for its arguments.
    387   *
    388   * Implemented from Golub's "Matrix Computations", algorithm 8.3.2:
    389   * "implicit symmetric QR step with Wilkinson shift"
    390   */
    391 template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
    392 EIGEN_DEVICE_FUNC
    393 static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n);
    394 }
    395 
    396 template<typename MatrixType>
    397 template<typename InputType>
    398 EIGEN_DEVICE_FUNC
    399 SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
    400 ::compute(const EigenBase<InputType>& a_matrix, int options)
    401 {
    402   check_template_parameters();
    403 
    404   const InputType &matrix(a_matrix.derived());
    405 
    406   using std::abs;
    407   eigen_assert(matrix.cols() == matrix.rows());
    408   eigen_assert((options&~(EigVecMask|GenEigMask))==0
    409           && (options&EigVecMask)!=EigVecMask
    410           && "invalid option parameter");
    411   bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
    412   Index n = matrix.cols();
    413   m_eivalues.resize(n,1);
    414 
    415   if(n==1)
    416   {
    417     m_eivec = matrix;
    418     m_eivalues.coeffRef(0,0) = numext::real(m_eivec.coeff(0,0));
    419     if(computeEigenvectors)
    420       m_eivec.setOnes(n,n);
    421     m_info = Success;
    422     m_isInitialized = true;
    423     m_eigenvectorsOk = computeEigenvectors;
    424     return *this;
    425   }
    426 
    427   // declare some aliases
    428   RealVectorType& diag = m_eivalues;
    429   EigenvectorsType& mat = m_eivec;
    430 
    431   // map the matrix coefficients to [-1:1] to avoid over- and underflow.
    432   mat = matrix.template triangularView<Lower>();
    433   RealScalar scale = mat.cwiseAbs().maxCoeff();
    434   if(scale==RealScalar(0)) scale = RealScalar(1);
    435   mat.template triangularView<Lower>() /= scale;
    436   m_subdiag.resize(n-1);
    437   internal::tridiagonalization_inplace(mat, diag, m_subdiag, computeEigenvectors);
    438 
    439   m_info = internal::computeFromTridiagonal_impl(diag, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
    440 
    441   // scale back the eigen values
    442   m_eivalues *= scale;
    443 
    444   m_isInitialized = true;
    445   m_eigenvectorsOk = computeEigenvectors;
    446   return *this;
    447 }
    448 
    449 template<typename MatrixType>
    450 SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
    451 ::computeFromTridiagonal(const RealVectorType& diag, const SubDiagonalType& subdiag , int options)
    452 {
    453   //TODO : Add an option to scale the values beforehand
    454   bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
    455 
    456   m_eivalues = diag;
    457   m_subdiag = subdiag;
    458   if (computeEigenvectors)
    459   {
    460     m_eivec.setIdentity(diag.size(), diag.size());
    461   }
    462   m_info = internal::computeFromTridiagonal_impl(m_eivalues, m_subdiag, m_maxIterations, computeEigenvectors, m_eivec);
    463 
    464   m_isInitialized = true;
    465   m_eigenvectorsOk = computeEigenvectors;
    466   return *this;
    467 }
    468 
    469 namespace internal {
    470 /**
    471   * \internal
    472   * \brief Compute the eigendecomposition from a tridiagonal matrix
    473   *
    474   * \param[in,out] diag : On input, the diagonal of the matrix, on output the eigenvalues
    475   * \param[in,out] subdiag : The subdiagonal part of the matrix (entries are modified during the decomposition)
    476   * \param[in] maxIterations : the maximum number of iterations
    477   * \param[in] computeEigenvectors : whether the eigenvectors have to be computed or not
    478   * \param[out] eivec : The matrix to store the eigenvectors if computeEigenvectors==true. Must be allocated on input.
    479   * \returns \c Success or \c NoConvergence
    480   */
    481 template<typename MatrixType, typename DiagType, typename SubDiagType>
    482 ComputationInfo computeFromTridiagonal_impl(DiagType& diag, SubDiagType& subdiag, const Index maxIterations, bool computeEigenvectors, MatrixType& eivec)
    483 {
    484   using std::abs;
    485 
    486   ComputationInfo info;
    487   typedef typename MatrixType::Scalar Scalar;
    488 
    489   Index n = diag.size();
    490   Index end = n-1;
    491   Index start = 0;
    492   Index iter = 0; // total number of iterations
    493 
    494   typedef typename DiagType::RealScalar RealScalar;
    495   const RealScalar considerAsZero = (std::numeric_limits<RealScalar>::min)();
    496   const RealScalar precision = RealScalar(2)*NumTraits<RealScalar>::epsilon();
    497 
    498   while (end>0)
    499   {
    500     for (Index i = start; i<end; ++i)
    501       if (internal::isMuchSmallerThan(abs(subdiag[i]),(abs(diag[i])+abs(diag[i+1])),precision) || abs(subdiag[i]) <= considerAsZero)
    502         subdiag[i] = 0;
    503 
    504     // find the largest unreduced block
    505     while (end>0 && subdiag[end-1]==RealScalar(0))
    506     {
    507       end--;
    508     }
    509     if (end<=0)
    510       break;
    511 
    512     // if we spent too many iterations, we give up
    513     iter++;
    514     if(iter > maxIterations * n) break;
    515 
    516     start = end - 1;
    517     while (start>0 && subdiag[start-1]!=0)
    518       start--;
    519 
    520     internal::tridiagonal_qr_step<MatrixType::Flags&RowMajorBit ? RowMajor : ColMajor>(diag.data(), subdiag.data(), start, end, computeEigenvectors ? eivec.data() : (Scalar*)0, n);
    521   }
    522   if (iter <= maxIterations * n)
    523     info = Success;
    524   else
    525     info = NoConvergence;
    526 
    527   // Sort eigenvalues and corresponding vectors.
    528   // TODO make the sort optional ?
    529   // TODO use a better sort algorithm !!
    530   if (info == Success)
    531   {
    532     for (Index i = 0; i < n-1; ++i)
    533     {
    534       Index k;
    535       diag.segment(i,n-i).minCoeff(&k);
    536       if (k > 0)
    537       {
    538         std::swap(diag[i], diag[k+i]);
    539         if(computeEigenvectors)
    540           eivec.col(i).swap(eivec.col(k+i));
    541       }
    542     }
    543   }
    544   return info;
    545 }
    546 
    547 template<typename SolverType,int Size,bool IsComplex> struct direct_selfadjoint_eigenvalues
    548 {
    549   EIGEN_DEVICE_FUNC
    550   static inline void run(SolverType& eig, const typename SolverType::MatrixType& A, int options)
    551   { eig.compute(A,options); }
    552 };
    553 
    554 template<typename SolverType> struct direct_selfadjoint_eigenvalues<SolverType,3,false>
    555 {
    556   typedef typename SolverType::MatrixType MatrixType;
    557   typedef typename SolverType::RealVectorType VectorType;
    558   typedef typename SolverType::Scalar Scalar;
    559   typedef typename SolverType::EigenvectorsType EigenvectorsType;
    560 
    561 
    562   /** \internal
    563    * Computes the roots of the characteristic polynomial of \a m.
    564    * For numerical stability m.trace() should be near zero and to avoid over- or underflow m should be normalized.
    565    */
    566   EIGEN_DEVICE_FUNC
    567   static inline void computeRoots(const MatrixType& m, VectorType& roots)
    568   {
    569     EIGEN_USING_STD_MATH(sqrt)
    570     EIGEN_USING_STD_MATH(atan2)
    571     EIGEN_USING_STD_MATH(cos)
    572     EIGEN_USING_STD_MATH(sin)
    573     const Scalar s_inv3 = Scalar(1)/Scalar(3);
    574     const Scalar s_sqrt3 = sqrt(Scalar(3));
    575 
    576     // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0.  The
    577     // eigenvalues are the roots to this equation, all guaranteed to be
    578     // real-valued, because the matrix is symmetric.
    579     Scalar c0 = m(0,0)*m(1,1)*m(2,2) + Scalar(2)*m(1,0)*m(2,0)*m(2,1) - m(0,0)*m(2,1)*m(2,1) - m(1,1)*m(2,0)*m(2,0) - m(2,2)*m(1,0)*m(1,0);
    580     Scalar c1 = m(0,0)*m(1,1) - m(1,0)*m(1,0) + m(0,0)*m(2,2) - m(2,0)*m(2,0) + m(1,1)*m(2,2) - m(2,1)*m(2,1);
    581     Scalar c2 = m(0,0) + m(1,1) + m(2,2);
    582 
    583     // Construct the parameters used in classifying the roots of the equation
    584     // and in solving the equation for the roots in closed form.
    585     Scalar c2_over_3 = c2*s_inv3;
    586     Scalar a_over_3 = (c2*c2_over_3 - c1)*s_inv3;
    587     a_over_3 = numext::maxi(a_over_3, Scalar(0));
    588 
    589     Scalar half_b = Scalar(0.5)*(c0 + c2_over_3*(Scalar(2)*c2_over_3*c2_over_3 - c1));
    590 
    591     Scalar q = a_over_3*a_over_3*a_over_3 - half_b*half_b;
    592     q = numext::maxi(q, Scalar(0));
    593 
    594     // Compute the eigenvalues by solving for the roots of the polynomial.
    595     Scalar rho = sqrt(a_over_3);
    596     Scalar theta = atan2(sqrt(q),half_b)*s_inv3;  // since sqrt(q) > 0, atan2 is in [0, pi] and theta is in [0, pi/3]
    597     Scalar cos_theta = cos(theta);
    598     Scalar sin_theta = sin(theta);
    599     // roots are already sorted, since cos is monotonically decreasing on [0, pi]
    600     roots(0) = c2_over_3 - rho*(cos_theta + s_sqrt3*sin_theta); // == 2*rho*cos(theta+2pi/3)
    601     roots(1) = c2_over_3 - rho*(cos_theta - s_sqrt3*sin_theta); // == 2*rho*cos(theta+ pi/3)
    602     roots(2) = c2_over_3 + Scalar(2)*rho*cos_theta;
    603   }
    604 
    605   EIGEN_DEVICE_FUNC
    606   static inline bool extract_kernel(MatrixType& mat, Ref<VectorType> res, Ref<VectorType> representative)
    607   {
    608     using std::abs;
    609     Index i0;
    610     // Find non-zero column i0 (by construction, there must exist a non zero coefficient on the diagonal):
    611     mat.diagonal().cwiseAbs().maxCoeff(&i0);
    612     // mat.col(i0) is a good candidate for an orthogonal vector to the current eigenvector,
    613     // so let's save it:
    614     representative = mat.col(i0);
    615     Scalar n0, n1;
    616     VectorType c0, c1;
    617     n0 = (c0 = representative.cross(mat.col((i0+1)%3))).squaredNorm();
    618     n1 = (c1 = representative.cross(mat.col((i0+2)%3))).squaredNorm();
    619     if(n0>n1) res = c0/std::sqrt(n0);
    620     else      res = c1/std::sqrt(n1);
    621 
    622     return true;
    623   }
    624 
    625   EIGEN_DEVICE_FUNC
    626   static inline void run(SolverType& solver, const MatrixType& mat, int options)
    627   {
    628     eigen_assert(mat.cols() == 3 && mat.cols() == mat.rows());
    629     eigen_assert((options&~(EigVecMask|GenEigMask))==0
    630             && (options&EigVecMask)!=EigVecMask
    631             && "invalid option parameter");
    632     bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
    633 
    634     EigenvectorsType& eivecs = solver.m_eivec;
    635     VectorType& eivals = solver.m_eivalues;
    636 
    637     // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
    638     Scalar shift = mat.trace() / Scalar(3);
    639     // TODO Avoid this copy. Currently it is necessary to suppress bogus values when determining maxCoeff and for computing the eigenvectors later
    640     MatrixType scaledMat = mat.template selfadjointView<Lower>();
    641     scaledMat.diagonal().array() -= shift;
    642     Scalar scale = scaledMat.cwiseAbs().maxCoeff();
    643     if(scale > 0) scaledMat /= scale;   // TODO for scale==0 we could save the remaining operations
    644 
    645     // compute the eigenvalues
    646     computeRoots(scaledMat,eivals);
    647 
    648     // compute the eigenvectors
    649     if(computeEigenvectors)
    650     {
    651       if((eivals(2)-eivals(0))<=Eigen::NumTraits<Scalar>::epsilon())
    652       {
    653         // All three eigenvalues are numerically the same
    654         eivecs.setIdentity();
    655       }
    656       else
    657       {
    658         MatrixType tmp;
    659         tmp = scaledMat;
    660 
    661         // Compute the eigenvector of the most distinct eigenvalue
    662         Scalar d0 = eivals(2) - eivals(1);
    663         Scalar d1 = eivals(1) - eivals(0);
    664         Index k(0), l(2);
    665         if(d0 > d1)
    666         {
    667           numext::swap(k,l);
    668           d0 = d1;
    669         }
    670 
    671         // Compute the eigenvector of index k
    672         {
    673           tmp.diagonal().array () -= eivals(k);
    674           // By construction, 'tmp' is of rank 2, and its kernel corresponds to the respective eigenvector.
    675           extract_kernel(tmp, eivecs.col(k), eivecs.col(l));
    676         }
    677 
    678         // Compute eigenvector of index l
    679         if(d0<=2*Eigen::NumTraits<Scalar>::epsilon()*d1)
    680         {
    681           // If d0 is too small, then the two other eigenvalues are numerically the same,
    682           // and thus we only have to ortho-normalize the near orthogonal vector we saved above.
    683           eivecs.col(l) -= eivecs.col(k).dot(eivecs.col(l))*eivecs.col(l);
    684           eivecs.col(l).normalize();
    685         }
    686         else
    687         {
    688           tmp = scaledMat;
    689           tmp.diagonal().array () -= eivals(l);
    690 
    691           VectorType dummy;
    692           extract_kernel(tmp, eivecs.col(l), dummy);
    693         }
    694 
    695         // Compute last eigenvector from the other two
    696         eivecs.col(1) = eivecs.col(2).cross(eivecs.col(0)).normalized();
    697       }
    698     }
    699 
    700     // Rescale back to the original size.
    701     eivals *= scale;
    702     eivals.array() += shift;
    703 
    704     solver.m_info = Success;
    705     solver.m_isInitialized = true;
    706     solver.m_eigenvectorsOk = computeEigenvectors;
    707   }
    708 };
    709 
    710 // 2x2 direct eigenvalues decomposition, code from Hauke Heibel
    711 template<typename SolverType>
    712 struct direct_selfadjoint_eigenvalues<SolverType,2,false>
    713 {
    714   typedef typename SolverType::MatrixType MatrixType;
    715   typedef typename SolverType::RealVectorType VectorType;
    716   typedef typename SolverType::Scalar Scalar;
    717   typedef typename SolverType::EigenvectorsType EigenvectorsType;
    718 
    719   EIGEN_DEVICE_FUNC
    720   static inline void computeRoots(const MatrixType& m, VectorType& roots)
    721   {
    722     using std::sqrt;
    723     const Scalar t0 = Scalar(0.5) * sqrt( numext::abs2(m(0,0)-m(1,1)) + Scalar(4)*numext::abs2(m(1,0)));
    724     const Scalar t1 = Scalar(0.5) * (m(0,0) + m(1,1));
    725     roots(0) = t1 - t0;
    726     roots(1) = t1 + t0;
    727   }
    728 
    729   EIGEN_DEVICE_FUNC
    730   static inline void run(SolverType& solver, const MatrixType& mat, int options)
    731   {
    732     EIGEN_USING_STD_MATH(sqrt);
    733     EIGEN_USING_STD_MATH(abs);
    734 
    735     eigen_assert(mat.cols() == 2 && mat.cols() == mat.rows());
    736     eigen_assert((options&~(EigVecMask|GenEigMask))==0
    737             && (options&EigVecMask)!=EigVecMask
    738             && "invalid option parameter");
    739     bool computeEigenvectors = (options&ComputeEigenvectors)==ComputeEigenvectors;
    740 
    741     EigenvectorsType& eivecs = solver.m_eivec;
    742     VectorType& eivals = solver.m_eivalues;
    743 
    744     // Shift the matrix to the mean eigenvalue and map the matrix coefficients to [-1:1] to avoid over- and underflow.
    745     Scalar shift = mat.trace() / Scalar(2);
    746     MatrixType scaledMat = mat;
    747     scaledMat.coeffRef(0,1) = mat.coeff(1,0);
    748     scaledMat.diagonal().array() -= shift;
    749     Scalar scale = scaledMat.cwiseAbs().maxCoeff();
    750     if(scale > Scalar(0))
    751       scaledMat /= scale;
    752 
    753     // Compute the eigenvalues
    754     computeRoots(scaledMat,eivals);
    755 
    756     // compute the eigen vectors
    757     if(computeEigenvectors)
    758     {
    759       if((eivals(1)-eivals(0))<=abs(eivals(1))*Eigen::NumTraits<Scalar>::epsilon())
    760       {
    761         eivecs.setIdentity();
    762       }
    763       else
    764       {
    765         scaledMat.diagonal().array () -= eivals(1);
    766         Scalar a2 = numext::abs2(scaledMat(0,0));
    767         Scalar c2 = numext::abs2(scaledMat(1,1));
    768         Scalar b2 = numext::abs2(scaledMat(1,0));
    769         if(a2>c2)
    770         {
    771           eivecs.col(1) << -scaledMat(1,0), scaledMat(0,0);
    772           eivecs.col(1) /= sqrt(a2+b2);
    773         }
    774         else
    775         {
    776           eivecs.col(1) << -scaledMat(1,1), scaledMat(1,0);
    777           eivecs.col(1) /= sqrt(c2+b2);
    778         }
    779 
    780         eivecs.col(0) << eivecs.col(1).unitOrthogonal();
    781       }
    782     }
    783 
    784     // Rescale back to the original size.
    785     eivals *= scale;
    786     eivals.array() += shift;
    787 
    788     solver.m_info = Success;
    789     solver.m_isInitialized = true;
    790     solver.m_eigenvectorsOk = computeEigenvectors;
    791   }
    792 };
    793 
    794 }
    795 
    796 template<typename MatrixType>
    797 EIGEN_DEVICE_FUNC
    798 SelfAdjointEigenSolver<MatrixType>& SelfAdjointEigenSolver<MatrixType>
    799 ::computeDirect(const MatrixType& matrix, int options)
    800 {
    801   internal::direct_selfadjoint_eigenvalues<SelfAdjointEigenSolver,Size,NumTraits<Scalar>::IsComplex>::run(*this,matrix,options);
    802   return *this;
    803 }
    804 
    805 namespace internal {
    806 template<int StorageOrder,typename RealScalar, typename Scalar, typename Index>
    807 EIGEN_DEVICE_FUNC
    808 static void tridiagonal_qr_step(RealScalar* diag, RealScalar* subdiag, Index start, Index end, Scalar* matrixQ, Index n)
    809 {
    810   using std::abs;
    811   RealScalar td = (diag[end-1] - diag[end])*RealScalar(0.5);
    812   RealScalar e = subdiag[end-1];
    813   // Note that thanks to scaling, e^2 or td^2 cannot overflow, however they can still
    814   // underflow thus leading to inf/NaN values when using the following commented code:
    815 //   RealScalar e2 = numext::abs2(subdiag[end-1]);
    816 //   RealScalar mu = diag[end] - e2 / (td + (td>0 ? 1 : -1) * sqrt(td*td + e2));
    817   // This explain the following, somewhat more complicated, version:
    818   RealScalar mu = diag[end];
    819   if(td==RealScalar(0))
    820     mu -= abs(e);
    821   else
    822   {
    823     RealScalar e2 = numext::abs2(subdiag[end-1]);
    824     RealScalar h = numext::hypot(td,e);
    825     if(e2==RealScalar(0)) mu -= (e / (td + (td>RealScalar(0) ? RealScalar(1) : RealScalar(-1)))) * (e / h);
    826     else                  mu -= e2 / (td + (td>RealScalar(0) ? h : -h));
    827   }
    828 
    829   RealScalar x = diag[start] - mu;
    830   RealScalar z = subdiag[start];
    831   for (Index k = start; k < end; ++k)
    832   {
    833     JacobiRotation<RealScalar> rot;
    834     rot.makeGivens(x, z);
    835 
    836     // do T = G' T G
    837     RealScalar sdk = rot.s() * diag[k] + rot.c() * subdiag[k];
    838     RealScalar dkp1 = rot.s() * subdiag[k] + rot.c() * diag[k+1];
    839 
    840     diag[k] = rot.c() * (rot.c() * diag[k] - rot.s() * subdiag[k]) - rot.s() * (rot.c() * subdiag[k] - rot.s() * diag[k+1]);
    841     diag[k+1] = rot.s() * sdk + rot.c() * dkp1;
    842     subdiag[k] = rot.c() * sdk - rot.s() * dkp1;
    843 
    844 
    845     if (k > start)
    846       subdiag[k - 1] = rot.c() * subdiag[k-1] - rot.s() * z;
    847 
    848     x = subdiag[k];
    849 
    850     if (k < end - 1)
    851     {
    852       z = -rot.s() * subdiag[k+1];
    853       subdiag[k + 1] = rot.c() * subdiag[k+1];
    854     }
    855 
    856     // apply the givens rotation to the unit matrix Q = Q * G
    857     if (matrixQ)
    858     {
    859       // FIXME if StorageOrder == RowMajor this operation is not very efficient
    860       Map<Matrix<Scalar,Dynamic,Dynamic,StorageOrder> > q(matrixQ,n,n);
    861       q.applyOnTheRight(k,k+1,rot);
    862     }
    863   }
    864 }
    865 
    866 } // end namespace internal
    867 
    868 } // end namespace Eigen
    869 
    870 #endif // EIGEN_SELFADJOINTEIGENSOLVER_H
    871