Home | History | Annotate | Download | only in SparseLU
      1 // This file is part of Eigen, a lightweight C++ template library
      2 // for linear algebra.
      3 //
      4 // Copyright (C) 2012 Dsir Nuentsa-Wakam <desire.nuentsa_wakam (at) inria.fr>
      5 // Copyright (C) 2012 Gael Guennebaud <gael.guennebaud (at) inria.fr>
      6 //
      7 // This Source Code Form is subject to the terms of the Mozilla
      8 // Public License v. 2.0. If a copy of the MPL was not distributed
      9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
     10 
     11 
     12 #ifndef EIGEN_SPARSE_LU_H
     13 #define EIGEN_SPARSE_LU_H
     14 
     15 namespace Eigen {
     16 
     17 template <typename _MatrixType, typename _OrderingType = COLAMDOrdering<typename _MatrixType::Index> > class SparseLU;
     18 template <typename MappedSparseMatrixType> struct SparseLUMatrixLReturnType;
     19 template <typename MatrixLType, typename MatrixUType> struct SparseLUMatrixUReturnType;
     20 
     21 /** \ingroup SparseLU_Module
     22   * \class SparseLU
     23   *
     24   * \brief Sparse supernodal LU factorization for general matrices
     25   *
     26   * This class implements the supernodal LU factorization for general matrices.
     27   * It uses the main techniques from the sequential SuperLU package
     28   * (http://crd-legacy.lbl.gov/~xiaoye/SuperLU/). It handles transparently real
     29   * and complex arithmetics with single and double precision, depending on the
     30   * scalar type of your input matrix.
     31   * The code has been optimized to provide BLAS-3 operations during supernode-panel updates.
     32   * It benefits directly from the built-in high-performant Eigen BLAS routines.
     33   * Moreover, when the size of a supernode is very small, the BLAS calls are avoided to
     34   * enable a better optimization from the compiler. For best performance,
     35   * you should compile it with NDEBUG flag to avoid the numerous bounds checking on vectors.
     36   *
     37   * An important parameter of this class is the ordering method. It is used to reorder the columns
     38   * (and eventually the rows) of the matrix to reduce the number of new elements that are created during
     39   * numerical factorization. The cheapest method available is COLAMD.
     40   * See  \link OrderingMethods_Module the OrderingMethods module \endlink for the list of
     41   * built-in and external ordering methods.
     42   *
     43   * Simple example with key steps
     44   * \code
     45   * VectorXd x(n), b(n);
     46   * SparseMatrix<double, ColMajor> A;
     47   * SparseLU<SparseMatrix<scalar, ColMajor>, COLAMDOrdering<Index> >   solver;
     48   * // fill A and b;
     49   * // Compute the ordering permutation vector from the structural pattern of A
     50   * solver.analyzePattern(A);
     51   * // Compute the numerical factorization
     52   * solver.factorize(A);
     53   * //Use the factors to solve the linear system
     54   * x = solver.solve(b);
     55   * \endcode
     56   *
     57   * \warning The input matrix A should be in a \b compressed and \b column-major form.
     58   * Otherwise an expensive copy will be made. You can call the inexpensive makeCompressed() to get a compressed matrix.
     59   *
     60   * \note Unlike the initial SuperLU implementation, there is no step to equilibrate the matrix.
     61   * For badly scaled matrices, this step can be useful to reduce the pivoting during factorization.
     62   * If this is the case for your matrices, you can try the basic scaling method at
     63   *  "unsupported/Eigen/src/IterativeSolvers/Scaling.h"
     64   *
     65   * \tparam _MatrixType The type of the sparse matrix. It must be a column-major SparseMatrix<>
     66   * \tparam _OrderingType The ordering method to use, either AMD, COLAMD or METIS. Default is COLMAD
     67   *
     68   *
     69   * \sa \ref TutorialSparseDirectSolvers
     70   * \sa \ref OrderingMethods_Module
     71   */
     72 template <typename _MatrixType, typename _OrderingType>
     73 class SparseLU : public internal::SparseLUImpl<typename _MatrixType::Scalar, typename _MatrixType::Index>
     74 {
     75   public:
     76     typedef _MatrixType MatrixType;
     77     typedef _OrderingType OrderingType;
     78     typedef typename MatrixType::Scalar Scalar;
     79     typedef typename MatrixType::RealScalar RealScalar;
     80     typedef typename MatrixType::Index Index;
     81     typedef SparseMatrix<Scalar,ColMajor,Index> NCMatrix;
     82     typedef internal::MappedSuperNodalMatrix<Scalar, Index> SCMatrix;
     83     typedef Matrix<Scalar,Dynamic,1> ScalarVector;
     84     typedef Matrix<Index,Dynamic,1> IndexVector;
     85     typedef PermutationMatrix<Dynamic, Dynamic, Index> PermutationType;
     86     typedef internal::SparseLUImpl<Scalar, Index> Base;
     87 
     88   public:
     89     SparseLU():m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
     90     {
     91       initperfvalues();
     92     }
     93     SparseLU(const MatrixType& matrix):m_isInitialized(true),m_lastError(""),m_Ustore(0,0,0,0,0,0),m_symmetricmode(false),m_diagpivotthresh(1.0),m_detPermR(1)
     94     {
     95       initperfvalues();
     96       compute(matrix);
     97     }
     98 
     99     ~SparseLU()
    100     {
    101       // Free all explicit dynamic pointers
    102     }
    103 
    104     void analyzePattern (const MatrixType& matrix);
    105     void factorize (const MatrixType& matrix);
    106     void simplicialfactorize(const MatrixType& matrix);
    107 
    108     /**
    109       * Compute the symbolic and numeric factorization of the input sparse matrix.
    110       * The input matrix should be in column-major storage.
    111       */
    112     void compute (const MatrixType& matrix)
    113     {
    114       // Analyze
    115       analyzePattern(matrix);
    116       //Factorize
    117       factorize(matrix);
    118     }
    119 
    120     inline Index rows() const { return m_mat.rows(); }
    121     inline Index cols() const { return m_mat.cols(); }
    122     /** Indicate that the pattern of the input matrix is symmetric */
    123     void isSymmetric(bool sym)
    124     {
    125       m_symmetricmode = sym;
    126     }
    127 
    128     /** \returns an expression of the matrix L, internally stored as supernodes
    129       * The only operation available with this expression is the triangular solve
    130       * \code
    131       * y = b; matrixL().solveInPlace(y);
    132       * \endcode
    133       */
    134     SparseLUMatrixLReturnType<SCMatrix> matrixL() const
    135     {
    136       return SparseLUMatrixLReturnType<SCMatrix>(m_Lstore);
    137     }
    138     /** \returns an expression of the matrix U,
    139       * The only operation available with this expression is the triangular solve
    140       * \code
    141       * y = b; matrixU().solveInPlace(y);
    142       * \endcode
    143       */
    144     SparseLUMatrixUReturnType<SCMatrix,MappedSparseMatrix<Scalar,ColMajor,Index> > matrixU() const
    145     {
    146       return SparseLUMatrixUReturnType<SCMatrix, MappedSparseMatrix<Scalar,ColMajor,Index> >(m_Lstore, m_Ustore);
    147     }
    148 
    149     /**
    150       * \returns a reference to the row matrix permutation \f$ P_r \f$ such that \f$P_r A P_c^T = L U\f$
    151       * \sa colsPermutation()
    152       */
    153     inline const PermutationType& rowsPermutation() const
    154     {
    155       return m_perm_r;
    156     }
    157     /**
    158       * \returns a reference to the column matrix permutation\f$ P_c^T \f$ such that \f$P_r A P_c^T = L U\f$
    159       * \sa rowsPermutation()
    160       */
    161     inline const PermutationType& colsPermutation() const
    162     {
    163       return m_perm_c;
    164     }
    165     /** Set the threshold used for a diagonal entry to be an acceptable pivot. */
    166     void setPivotThreshold(const RealScalar& thresh)
    167     {
    168       m_diagpivotthresh = thresh;
    169     }
    170 
    171     /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
    172       *
    173       * \warning the destination matrix X in X = this->solve(B) must be colmun-major.
    174       *
    175       * \sa compute()
    176       */
    177     template<typename Rhs>
    178     inline const internal::solve_retval<SparseLU, Rhs> solve(const MatrixBase<Rhs>& B) const
    179     {
    180       eigen_assert(m_factorizationIsOk && "SparseLU is not initialized.");
    181       eigen_assert(rows()==B.rows()
    182                     && "SparseLU::solve(): invalid number of rows of the right hand side matrix B");
    183           return internal::solve_retval<SparseLU, Rhs>(*this, B.derived());
    184     }
    185 
    186     /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
    187       *
    188       * \sa compute()
    189       */
    190     template<typename Rhs>
    191     inline const internal::sparse_solve_retval<SparseLU, Rhs> solve(const SparseMatrixBase<Rhs>& B) const
    192     {
    193       eigen_assert(m_factorizationIsOk && "SparseLU is not initialized.");
    194       eigen_assert(rows()==B.rows()
    195                     && "SparseLU::solve(): invalid number of rows of the right hand side matrix B");
    196           return internal::sparse_solve_retval<SparseLU, Rhs>(*this, B.derived());
    197     }
    198 
    199     /** \brief Reports whether previous computation was successful.
    200       *
    201       * \returns \c Success if computation was succesful,
    202       *          \c NumericalIssue if the LU factorization reports a problem, zero diagonal for instance
    203       *          \c InvalidInput if the input matrix is invalid
    204       *
    205       * \sa iparm()
    206       */
    207     ComputationInfo info() const
    208     {
    209       eigen_assert(m_isInitialized && "Decomposition is not initialized.");
    210       return m_info;
    211     }
    212 
    213     /**
    214       * \returns A string describing the type of error
    215       */
    216     std::string lastErrorMessage() const
    217     {
    218       return m_lastError;
    219     }
    220 
    221     template<typename Rhs, typename Dest>
    222     bool _solve(const MatrixBase<Rhs> &B, MatrixBase<Dest> &X_base) const
    223     {
    224       Dest& X(X_base.derived());
    225       eigen_assert(m_factorizationIsOk && "The matrix should be factorized first");
    226       EIGEN_STATIC_ASSERT((Dest::Flags&RowMajorBit)==0,
    227                         THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
    228 
    229       // Permute the right hand side to form X = Pr*B
    230       // on return, X is overwritten by the computed solution
    231       X.resize(B.rows(),B.cols());
    232 
    233       // this ugly const_cast_derived() helps to detect aliasing when applying the permutations
    234       for(Index j = 0; j < B.cols(); ++j)
    235         X.col(j) = rowsPermutation() * B.const_cast_derived().col(j);
    236 
    237       //Forward substitution with L
    238       this->matrixL().solveInPlace(X);
    239       this->matrixU().solveInPlace(X);
    240 
    241       // Permute back the solution
    242       for (Index j = 0; j < B.cols(); ++j)
    243         X.col(j) = colsPermutation().inverse() * X.col(j);
    244 
    245       return true;
    246     }
    247 
    248     /**
    249       * \returns the absolute value of the determinant of the matrix of which
    250       * *this is the QR decomposition.
    251       *
    252       * \warning a determinant can be very big or small, so for matrices
    253       * of large enough dimension, there is a risk of overflow/underflow.
    254       * One way to work around that is to use logAbsDeterminant() instead.
    255       *
    256       * \sa logAbsDeterminant(), signDeterminant()
    257       */
    258      Scalar absDeterminant()
    259     {
    260       eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
    261       // Initialize with the determinant of the row matrix
    262       Scalar det = Scalar(1.);
    263       // Note that the diagonal blocks of U are stored in supernodes,
    264       // which are available in the  L part :)
    265       for (Index j = 0; j < this->cols(); ++j)
    266       {
    267         for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
    268         {
    269           if(it.index() == j)
    270           {
    271             using std::abs;
    272             det *= abs(it.value());
    273             break;
    274           }
    275         }
    276        }
    277        return det;
    278      }
    279 
    280      /** \returns the natural log of the absolute value of the determinant of the matrix
    281        * of which **this is the QR decomposition
    282        *
    283        * \note This method is useful to work around the risk of overflow/underflow that's
    284        * inherent to the determinant computation.
    285        *
    286        * \sa absDeterminant(), signDeterminant()
    287        */
    288      Scalar logAbsDeterminant() const
    289      {
    290        eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
    291        Scalar det = Scalar(0.);
    292        for (Index j = 0; j < this->cols(); ++j)
    293        {
    294          for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
    295          {
    296            if(it.row() < j) continue;
    297            if(it.row() == j)
    298            {
    299              using std::log; using std::abs;
    300              det += log(abs(it.value()));
    301              break;
    302            }
    303          }
    304        }
    305        return det;
    306      }
    307 
    308     /** \returns A number representing the sign of the determinant
    309       *
    310       * \sa absDeterminant(), logAbsDeterminant()
    311       */
    312     Scalar signDeterminant()
    313     {
    314       eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
    315       // Initialize with the determinant of the row matrix
    316       Index det = 1;
    317       // Note that the diagonal blocks of U are stored in supernodes,
    318       // which are available in the  L part :)
    319       for (Index j = 0; j < this->cols(); ++j)
    320       {
    321         for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
    322         {
    323           if(it.index() == j)
    324           {
    325             if(it.value()<0)
    326               det = -det;
    327             else if(it.value()==0)
    328               return 0;
    329             break;
    330           }
    331         }
    332       }
    333       return det * m_detPermR * m_detPermC;
    334     }
    335 
    336     /** \returns The determinant of the matrix.
    337       *
    338       * \sa absDeterminant(), logAbsDeterminant()
    339       */
    340     Scalar determinant()
    341     {
    342       eigen_assert(m_factorizationIsOk && "The matrix should be factorized first.");
    343       // Initialize with the determinant of the row matrix
    344       Scalar det = Scalar(1.);
    345       // Note that the diagonal blocks of U are stored in supernodes,
    346       // which are available in the  L part :)
    347       for (Index j = 0; j < this->cols(); ++j)
    348       {
    349         for (typename SCMatrix::InnerIterator it(m_Lstore, j); it; ++it)
    350         {
    351           if(it.index() == j)
    352           {
    353             det *= it.value();
    354             break;
    355           }
    356         }
    357       }
    358       return det * Scalar(m_detPermR * m_detPermC);
    359     }
    360 
    361   protected:
    362     // Functions
    363     void initperfvalues()
    364     {
    365       m_perfv.panel_size = 16;
    366       m_perfv.relax = 1;
    367       m_perfv.maxsuper = 128;
    368       m_perfv.rowblk = 16;
    369       m_perfv.colblk = 8;
    370       m_perfv.fillfactor = 20;
    371     }
    372 
    373     // Variables
    374     mutable ComputationInfo m_info;
    375     bool m_isInitialized;
    376     bool m_factorizationIsOk;
    377     bool m_analysisIsOk;
    378     std::string m_lastError;
    379     NCMatrix m_mat; // The input (permuted ) matrix
    380     SCMatrix m_Lstore; // The lower triangular matrix (supernodal)
    381     MappedSparseMatrix<Scalar,ColMajor,Index> m_Ustore; // The upper triangular matrix
    382     PermutationType m_perm_c; // Column permutation
    383     PermutationType m_perm_r ; // Row permutation
    384     IndexVector m_etree; // Column elimination tree
    385 
    386     typename Base::GlobalLU_t m_glu;
    387 
    388     // SparseLU options
    389     bool m_symmetricmode;
    390     // values for performance
    391     internal::perfvalues<Index> m_perfv;
    392     RealScalar m_diagpivotthresh; // Specifies the threshold used for a diagonal entry to be an acceptable pivot
    393     Index m_nnzL, m_nnzU; // Nonzeros in L and U factors
    394     Index m_detPermR, m_detPermC; // Determinants of the permutation matrices
    395   private:
    396     // Disable copy constructor
    397     SparseLU (const SparseLU& );
    398 
    399 }; // End class SparseLU
    400 
    401 
    402 
    403 // Functions needed by the anaysis phase
    404 /**
    405   * Compute the column permutation to minimize the fill-in
    406   *
    407   *  - Apply this permutation to the input matrix -
    408   *
    409   *  - Compute the column elimination tree on the permuted matrix
    410   *
    411   *  - Postorder the elimination tree and the column permutation
    412   *
    413   */
    414 template <typename MatrixType, typename OrderingType>
    415 void SparseLU<MatrixType, OrderingType>::analyzePattern(const MatrixType& mat)
    416 {
    417 
    418   //TODO  It is possible as in SuperLU to compute row and columns scaling vectors to equilibrate the matrix mat.
    419 
    420   OrderingType ord;
    421   ord(mat,m_perm_c);
    422 
    423   // Apply the permutation to the column of the input  matrix
    424   //First copy the whole input matrix.
    425   m_mat = mat;
    426   if (m_perm_c.size()) {
    427     m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers. FIXME : This vector is filled but not subsequently used.
    428     //Then, permute only the column pointers
    429     const Index * outerIndexPtr;
    430     if (mat.isCompressed()) outerIndexPtr = mat.outerIndexPtr();
    431     else
    432     {
    433       Index *outerIndexPtr_t = new Index[mat.cols()+1];
    434       for(Index i = 0; i <= mat.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
    435       outerIndexPtr = outerIndexPtr_t;
    436     }
    437     for (Index i = 0; i < mat.cols(); i++)
    438     {
    439       m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
    440       m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
    441     }
    442     if(!mat.isCompressed()) delete[] outerIndexPtr;
    443   }
    444   // Compute the column elimination tree of the permuted matrix
    445   IndexVector firstRowElt;
    446   internal::coletree(m_mat, m_etree,firstRowElt);
    447 
    448   // In symmetric mode, do not do postorder here
    449   if (!m_symmetricmode) {
    450     IndexVector post, iwork;
    451     // Post order etree
    452     internal::treePostorder(m_mat.cols(), m_etree, post);
    453 
    454 
    455     // Renumber etree in postorder
    456     Index m = m_mat.cols();
    457     iwork.resize(m+1);
    458     for (Index i = 0; i < m; ++i) iwork(post(i)) = post(m_etree(i));
    459     m_etree = iwork;
    460 
    461     // Postmultiply A*Pc by post, i.e reorder the matrix according to the postorder of the etree
    462     PermutationType post_perm(m);
    463     for (Index i = 0; i < m; i++)
    464       post_perm.indices()(i) = post(i);
    465 
    466     // Combine the two permutations : postorder the permutation for future use
    467     if(m_perm_c.size()) {
    468       m_perm_c = post_perm * m_perm_c;
    469     }
    470 
    471   } // end postordering
    472 
    473   m_analysisIsOk = true;
    474 }
    475 
    476 // Functions needed by the numerical factorization phase
    477 
    478 
    479 /**
    480   *  - Numerical factorization
    481   *  - Interleaved with the symbolic factorization
    482   * On exit,  info is
    483   *
    484   *    = 0: successful factorization
    485   *
    486   *    > 0: if info = i, and i is
    487   *
    488   *       <= A->ncol: U(i,i) is exactly zero. The factorization has
    489   *          been completed, but the factor U is exactly singular,
    490   *          and division by zero will occur if it is used to solve a
    491   *          system of equations.
    492   *
    493   *       > A->ncol: number of bytes allocated when memory allocation
    494   *         failure occurred, plus A->ncol. If lwork = -1, it is
    495   *         the estimated amount of space needed, plus A->ncol.
    496   */
    497 template <typename MatrixType, typename OrderingType>
    498 void SparseLU<MatrixType, OrderingType>::factorize(const MatrixType& matrix)
    499 {
    500   using internal::emptyIdxLU;
    501   eigen_assert(m_analysisIsOk && "analyzePattern() should be called first");
    502   eigen_assert((matrix.rows() == matrix.cols()) && "Only for squared matrices");
    503 
    504   typedef typename IndexVector::Scalar Index;
    505 
    506 
    507   // Apply the column permutation computed in analyzepattern()
    508   //   m_mat = matrix * m_perm_c.inverse();
    509   m_mat = matrix;
    510   if (m_perm_c.size())
    511   {
    512     m_mat.uncompress(); //NOTE: The effect of this command is only to create the InnerNonzeros pointers.
    513     //Then, permute only the column pointers
    514     const Index * outerIndexPtr;
    515     if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();
    516     else
    517     {
    518       Index* outerIndexPtr_t = new Index[matrix.cols()+1];
    519       for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = m_mat.outerIndexPtr()[i];
    520       outerIndexPtr = outerIndexPtr_t;
    521     }
    522     for (Index i = 0; i < matrix.cols(); i++)
    523     {
    524       m_mat.outerIndexPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i];
    525       m_mat.innerNonZeroPtr()[m_perm_c.indices()(i)] = outerIndexPtr[i+1] - outerIndexPtr[i];
    526     }
    527     if(!matrix.isCompressed()) delete[] outerIndexPtr;
    528   }
    529   else
    530   { //FIXME This should not be needed if the empty permutation is handled transparently
    531     m_perm_c.resize(matrix.cols());
    532     for(Index i = 0; i < matrix.cols(); ++i) m_perm_c.indices()(i) = i;
    533   }
    534 
    535   Index m = m_mat.rows();
    536   Index n = m_mat.cols();
    537   Index nnz = m_mat.nonZeros();
    538   Index maxpanel = m_perfv.panel_size * m;
    539   // Allocate working storage common to the factor routines
    540   Index lwork = 0;
    541   Index info = Base::memInit(m, n, nnz, lwork, m_perfv.fillfactor, m_perfv.panel_size, m_glu);
    542   if (info)
    543   {
    544     m_lastError = "UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
    545     m_factorizationIsOk = false;
    546     return ;
    547   }
    548 
    549   // Set up pointers for integer working arrays
    550   IndexVector segrep(m); segrep.setZero();
    551   IndexVector parent(m); parent.setZero();
    552   IndexVector xplore(m); xplore.setZero();
    553   IndexVector repfnz(maxpanel);
    554   IndexVector panel_lsub(maxpanel);
    555   IndexVector xprune(n); xprune.setZero();
    556   IndexVector marker(m*internal::LUNoMarker); marker.setZero();
    557 
    558   repfnz.setConstant(-1);
    559   panel_lsub.setConstant(-1);
    560 
    561   // Set up pointers for scalar working arrays
    562   ScalarVector dense;
    563   dense.setZero(maxpanel);
    564   ScalarVector tempv;
    565   tempv.setZero(internal::LUnumTempV(m, m_perfv.panel_size, m_perfv.maxsuper, /*m_perfv.rowblk*/m) );
    566 
    567   // Compute the inverse of perm_c
    568   PermutationType iperm_c(m_perm_c.inverse());
    569 
    570   // Identify initial relaxed snodes
    571   IndexVector relax_end(n);
    572   if ( m_symmetricmode == true )
    573     Base::heap_relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
    574   else
    575     Base::relax_snode(n, m_etree, m_perfv.relax, marker, relax_end);
    576 
    577 
    578   m_perm_r.resize(m);
    579   m_perm_r.indices().setConstant(-1);
    580   marker.setConstant(-1);
    581   m_detPermR = 1; // Record the determinant of the row permutation
    582 
    583   m_glu.supno(0) = emptyIdxLU; m_glu.xsup.setConstant(0);
    584   m_glu.xsup(0) = m_glu.xlsub(0) = m_glu.xusub(0) = m_glu.xlusup(0) = Index(0);
    585 
    586   // Work on one 'panel' at a time. A panel is one of the following :
    587   //  (a) a relaxed supernode at the bottom of the etree, or
    588   //  (b) panel_size contiguous columns, <panel_size> defined by the user
    589   Index jcol;
    590   IndexVector panel_histo(n);
    591   Index pivrow; // Pivotal row number in the original row matrix
    592   Index nseg1; // Number of segments in U-column above panel row jcol
    593   Index nseg; // Number of segments in each U-column
    594   Index irep;
    595   Index i, k, jj;
    596   for (jcol = 0; jcol < n; )
    597   {
    598     // Adjust panel size so that a panel won't overlap with the next relaxed snode.
    599     Index panel_size = m_perfv.panel_size; // upper bound on panel width
    600     for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
    601     {
    602       if (relax_end(k) != emptyIdxLU)
    603       {
    604         panel_size = k - jcol;
    605         break;
    606       }
    607     }
    608     if (k == n)
    609       panel_size = n - jcol;
    610 
    611     // Symbolic outer factorization on a panel of columns
    612     Base::panel_dfs(m, panel_size, jcol, m_mat, m_perm_r.indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, m_glu);
    613 
    614     // Numeric sup-panel updates in topological order
    615     Base::panel_bmod(m, panel_size, jcol, nseg1, dense, tempv, segrep, repfnz, m_glu);
    616 
    617     // Sparse LU within the panel, and below the panel diagonal
    618     for ( jj = jcol; jj< jcol + panel_size; jj++)
    619     {
    620       k = (jj - jcol) * m; // Column index for w-wide arrays
    621 
    622       nseg = nseg1; // begin after all the panel segments
    623       //Depth-first-search for the current column
    624       VectorBlock<IndexVector> panel_lsubk(panel_lsub, k, m);
    625       VectorBlock<IndexVector> repfnz_k(repfnz, k, m);
    626       info = Base::column_dfs(m, jj, m_perm_r.indices(), m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, m_glu);
    627       if ( info )
    628       {
    629         m_lastError =  "UNABLE TO EXPAND MEMORY IN COLUMN_DFS() ";
    630         m_info = NumericalIssue;
    631         m_factorizationIsOk = false;
    632         return;
    633       }
    634       // Numeric updates to this column
    635       VectorBlock<ScalarVector> dense_k(dense, k, m);
    636       VectorBlock<IndexVector> segrep_k(segrep, nseg1, m-nseg1);
    637       info = Base::column_bmod(jj, (nseg - nseg1), dense_k, tempv, segrep_k, repfnz_k, jcol, m_glu);
    638       if ( info )
    639       {
    640         m_lastError = "UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() ";
    641         m_info = NumericalIssue;
    642         m_factorizationIsOk = false;
    643         return;
    644       }
    645 
    646       // Copy the U-segments to ucol(*)
    647       info = Base::copy_to_ucol(jj, nseg, segrep, repfnz_k ,m_perm_r.indices(), dense_k, m_glu);
    648       if ( info )
    649       {
    650         m_lastError = "UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() ";
    651         m_info = NumericalIssue;
    652         m_factorizationIsOk = false;
    653         return;
    654       }
    655 
    656       // Form the L-segment
    657       info = Base::pivotL(jj, m_diagpivotthresh, m_perm_r.indices(), iperm_c.indices(), pivrow, m_glu);
    658       if ( info )
    659       {
    660         m_lastError = "THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT ";
    661         std::ostringstream returnInfo;
    662         returnInfo << info;
    663         m_lastError += returnInfo.str();
    664         m_info = NumericalIssue;
    665         m_factorizationIsOk = false;
    666         return;
    667       }
    668 
    669       // Update the determinant of the row permutation matrix
    670       // FIXME: the following test is not correct, we should probably take iperm_c into account and pivrow is not directly the row pivot.
    671       if (pivrow != jj) m_detPermR = -m_detPermR;
    672 
    673       // Prune columns (0:jj-1) using column jj
    674       Base::pruneL(jj, m_perm_r.indices(), pivrow, nseg, segrep, repfnz_k, xprune, m_glu);
    675 
    676       // Reset repfnz for this column
    677       for (i = 0; i < nseg; i++)
    678       {
    679         irep = segrep(i);
    680         repfnz_k(irep) = emptyIdxLU;
    681       }
    682     } // end SparseLU within the panel
    683     jcol += panel_size;  // Move to the next panel
    684   } // end for -- end elimination
    685 
    686   m_detPermR = m_perm_r.determinant();
    687   m_detPermC = m_perm_c.determinant();
    688 
    689   // Count the number of nonzeros in factors
    690   Base::countnz(n, m_nnzL, m_nnzU, m_glu);
    691   // Apply permutation  to the L subscripts
    692   Base::fixupL(n, m_perm_r.indices(), m_glu);
    693 
    694   // Create supernode matrix L
    695   m_Lstore.setInfos(m, n, m_glu.lusup, m_glu.xlusup, m_glu.lsub, m_glu.xlsub, m_glu.supno, m_glu.xsup);
    696   // Create the column major upper sparse matrix  U;
    697   new (&m_Ustore) MappedSparseMatrix<Scalar, ColMajor, Index> ( m, n, m_nnzU, m_glu.xusub.data(), m_glu.usub.data(), m_glu.ucol.data() );
    698 
    699   m_info = Success;
    700   m_factorizationIsOk = true;
    701 }
    702 
    703 template<typename MappedSupernodalType>
    704 struct SparseLUMatrixLReturnType : internal::no_assignment_operator
    705 {
    706   typedef typename MappedSupernodalType::Index Index;
    707   typedef typename MappedSupernodalType::Scalar Scalar;
    708   SparseLUMatrixLReturnType(const MappedSupernodalType& mapL) : m_mapL(mapL)
    709   { }
    710   Index rows() { return m_mapL.rows(); }
    711   Index cols() { return m_mapL.cols(); }
    712   template<typename Dest>
    713   void solveInPlace( MatrixBase<Dest> &X) const
    714   {
    715     m_mapL.solveInPlace(X);
    716   }
    717   const MappedSupernodalType& m_mapL;
    718 };
    719 
    720 template<typename MatrixLType, typename MatrixUType>
    721 struct SparseLUMatrixUReturnType : internal::no_assignment_operator
    722 {
    723   typedef typename MatrixLType::Index Index;
    724   typedef typename MatrixLType::Scalar Scalar;
    725   SparseLUMatrixUReturnType(const MatrixLType& mapL, const MatrixUType& mapU)
    726   : m_mapL(mapL),m_mapU(mapU)
    727   { }
    728   Index rows() { return m_mapL.rows(); }
    729   Index cols() { return m_mapL.cols(); }
    730 
    731   template<typename Dest>   void solveInPlace(MatrixBase<Dest> &X) const
    732   {
    733     Index nrhs = X.cols();
    734     Index n = X.rows();
    735     // Backward solve with U
    736     for (Index k = m_mapL.nsuper(); k >= 0; k--)
    737     {
    738       Index fsupc = m_mapL.supToCol()[k];
    739       Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; // leading dimension
    740       Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
    741       Index luptr = m_mapL.colIndexPtr()[fsupc];
    742 
    743       if (nsupc == 1)
    744       {
    745         for (Index j = 0; j < nrhs; j++)
    746         {
    747           X(fsupc, j) /= m_mapL.valuePtr()[luptr];
    748         }
    749       }
    750       else
    751       {
    752         Map<const Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > A( &(m_mapL.valuePtr()[luptr]), nsupc, nsupc, OuterStride<>(lda) );
    753         Map< Matrix<Scalar,Dynamic,Dynamic>, 0, OuterStride<> > U (&(X(fsupc,0)), nsupc, nrhs, OuterStride<>(n) );
    754         U = A.template triangularView<Upper>().solve(U);
    755       }
    756 
    757       for (Index j = 0; j < nrhs; ++j)
    758       {
    759         for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
    760         {
    761           typename MatrixUType::InnerIterator it(m_mapU, jcol);
    762           for ( ; it; ++it)
    763           {
    764             Index irow = it.index();
    765             X(irow, j) -= X(jcol, j) * it.value();
    766           }
    767         }
    768       }
    769     } // End For U-solve
    770   }
    771   const MatrixLType& m_mapL;
    772   const MatrixUType& m_mapU;
    773 };
    774 
    775 namespace internal {
    776 
    777 template<typename _MatrixType, typename Derived, typename Rhs>
    778 struct solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
    779   : solve_retval_base<SparseLU<_MatrixType,Derived>, Rhs>
    780 {
    781   typedef SparseLU<_MatrixType,Derived> Dec;
    782   EIGEN_MAKE_SOLVE_HELPERS(Dec,Rhs)
    783 
    784   template<typename Dest> void evalTo(Dest& dst) const
    785   {
    786     dec()._solve(rhs(),dst);
    787   }
    788 };
    789 
    790 template<typename _MatrixType, typename Derived, typename Rhs>
    791 struct sparse_solve_retval<SparseLU<_MatrixType,Derived>, Rhs>
    792   : sparse_solve_retval_base<SparseLU<_MatrixType,Derived>, Rhs>
    793 {
    794   typedef SparseLU<_MatrixType,Derived> Dec;
    795   EIGEN_MAKE_SPARSE_SOLVE_HELPERS(Dec,Rhs)
    796 
    797   template<typename Dest> void evalTo(Dest& dst) const
    798   {
    799     this->defaultEvalTo(dst);
    800   }
    801 };
    802 } // end namespace internal
    803 
    804 } // End namespace Eigen
    805 
    806 #endif
    807