12 #ifndef EIGEN_SPARSE_LU_H    13 #define EIGEN_SPARSE_LU_H    17 template <
typename _MatrixType, 
typename _OrderingType = COLAMDOrdering<
typename _MatrixType::Index> > 
class SparseLU;
    72 template <
typename _MatrixType, 
typename _OrderingType>
    78     typedef typename MatrixType::Scalar 
Scalar; 
    80     typedef typename MatrixType::Index 
Index; 
    89     SparseLU():
m_isInitialized(true),
m_lastError(
""),
m_Ustore(0,0,0,0,0,0),
m_symmetricmode(false),
m_diagpivotthresh(1.0),
m_detPermR(1)
    93     SparseLU(
const MatrixType& matrix):
m_isInitialized(true),
m_lastError(
""),
m_Ustore(0,0,0,0,0,0),
m_symmetricmode(false),
m_diagpivotthresh(1.0),
m_detPermR(1)
   105     void factorize (
const MatrixType& matrix);
   177     template<
typename Rhs>
   182                     && 
"SparseLU::solve(): invalid number of rows of the right hand side matrix B");
   190     template<
typename Rhs>
   195                     && 
"SparseLU::solve(): invalid number of rows of the right hand side matrix B");
   221     template<
typename Rhs, 
typename Dest>
   224       Dest& X(_X.derived());
   227                         THIS_METHOD_IS_ONLY_FOR_COLUMN_MAJOR_MATRICES);
   231       X.resize(B.rows(),B.cols());
   232       for(Index j = 0; j < B.cols(); ++j)
   236       this->
matrixL().solveInPlace(X);
   237       this->
matrixU().solveInPlace(X);
   240       for (Index j = 0; j < B.cols(); ++j)
   263       for (Index j = 0; j < this->
cols(); ++j)
   265         for (
typename SCMatrix::InnerIterator it(
m_Lstore, j); it; ++it)
   267           if(it.row() < j) 
continue;
   290        for (Index j = 0; j < this->
cols(); ++j)
   292          for (
typename SCMatrix::InnerIterator it(
m_Lstore, j); it; ++it)
   294            if(it.row() < j) 
continue;
   368 template <
typename MatrixType, 
typename OrderingType>
   383     const Index * outerIndexPtr;
   384     if (mat.isCompressed()) outerIndexPtr = mat.outerIndexPtr();
   387       Index *outerIndexPtr_t = 
new Index[mat.cols()+1];
   389       outerIndexPtr = outerIndexPtr_t;
   391     for (
Index i = 0; i < mat.cols(); i++)
   396     if(!mat.isCompressed()) 
delete[] outerIndexPtr;
   412     for (
Index i = 0; i < m; ++i) iwork(post(i)) = post(
m_etree(i));
   417     for (
Index i = 0; i < m; i++) 
   418       post_perm.
indices()(i) = post(i); 
   451 template <
typename MatrixType, 
typename OrderingType>
   456   eigen_assert((matrix.rows() == matrix.cols()) && 
"Only for squared matrices");
   468     const Index * outerIndexPtr;
   469     if (matrix.isCompressed()) outerIndexPtr = matrix.outerIndexPtr();
   472       Index* outerIndexPtr_t = 
new Index[matrix.cols()+1];
   473       for(Index i = 0; i <= matrix.cols(); i++) outerIndexPtr_t[i] = 
m_mat.
outerIndexPtr()[i];
   474       outerIndexPtr = outerIndexPtr_t;
   476     for (Index i = 0; i < matrix.cols(); i++)
   481     if(!matrix.isCompressed()) 
delete[] outerIndexPtr;
   492   Index maxpanel = 
m_perfv.panel_size * m;
   498     m_lastError = 
"UNABLE TO ALLOCATE WORKING MEMORY\n\n" ;
   550   for (jcol = 0; jcol < n; )
   553     Index panel_size = 
m_perfv.panel_size; 
   554     for (k = jcol + 1; k < (std::min)(jcol+panel_size, n); k++)
   558         panel_size = k - jcol; 
   563       panel_size = n - jcol; 
   566     Base::panel_dfs(m, panel_size, jcol, 
m_mat, 
m_perm_r.
indices(), nseg1, dense, panel_lsub, segrep, repfnz, xprune, marker, parent, xplore, 
m_glu); 
   572     for ( jj = jcol; jj< jcol + panel_size; jj++) 
   580       info = 
Base::column_dfs(m, jj, 
m_perm_r.
indices(), 
m_perfv.maxsuper, nseg, panel_lsubk, segrep, repfnz_k, xprune, marker, parent, xplore, 
m_glu); 
   583         m_lastError =  
"UNABLE TO EXPAND MEMORY IN COLUMN_DFS() ";
   594         m_lastError = 
"UNABLE TO EXPAND MEMORY IN COLUMN_BMOD() ";
   604         m_lastError = 
"UNABLE TO EXPAND MEMORY IN COPY_TO_UCOL() ";
   614         m_lastError = 
"THE MATRIX IS STRUCTURALLY SINGULAR ... ZERO COLUMN AT ";
   615         std::ostringstream returnInfo;
   630       for (i = 0; i < nseg; i++)
   653 template<
typename MappedSupernodalType>
   656   typedef typename MappedSupernodalType::Index 
Index;
   657   typedef typename MappedSupernodalType::Scalar 
Scalar;
   660   Index 
rows() { 
return m_mapL.rows(); }
   661   Index 
cols() { 
return m_mapL.cols(); }
   662   template<
typename Dest>
   665     m_mapL.solveInPlace(X);
   670 template<
typename MatrixLType, 
typename MatrixUType>
   673   typedef typename MatrixLType::Index 
Index;
   674   typedef typename MatrixLType::Scalar 
Scalar;
   676   : m_mapL(mapL),m_mapU(mapU)
   678   Index 
rows() { 
return m_mapL.rows(); }
   679   Index 
cols() { 
return m_mapL.cols(); }
   683     Index nrhs = X.cols();
   686     for (Index k = m_mapL.nsuper(); k >= 0; k--)
   688       Index fsupc = m_mapL.supToCol()[k];
   689       Index lda = m_mapL.colIndexPtr()[fsupc+1] - m_mapL.colIndexPtr()[fsupc]; 
   690       Index nsupc = m_mapL.supToCol()[k+1] - fsupc;
   691       Index luptr = m_mapL.colIndexPtr()[fsupc];
   695         for (Index j = 0; j < nrhs; j++)
   697           X(fsupc, j) /= m_mapL.valuePtr()[luptr];
   704         U = A.template triangularView<Upper>().
solve(U);
   707       for (Index j = 0; j < nrhs; ++j)
   709         for (Index jcol = fsupc; jcol < fsupc + nsupc; jcol++)
   711           typename MatrixUType::InnerIterator it(m_mapU, jcol);
   714             Index irow = it.index();
   715             X(irow, j) -= X(jcol, j) * it.
value();
   727 template<
typename _MatrixType, 
typename Derived, 
typename Rhs>
   734   template<typename Dest> 
void evalTo(Dest& dst)
 const   736     dec()._solve(rhs(),dst);
   740 template<
typename _MatrixType, 
typename Derived, 
typename Rhs>
   747   template<typename Dest> 
void evalTo(Dest& dst)
 const   749     this->defaultEvalTo(dst);
 
internal::traits< Derived >::Scalar Scalar
SparseLUMatrixUReturnType(const MatrixLType &mapL, const MatrixUType &mapU)
MatrixLType::Scalar Scalar
Index column_bmod(const Index jcol, const Index nseg, BlockScalarVector dense, ScalarVector &tempv, BlockIndexVector segrep, BlockIndexVector repfnz, Index fpanelc, GlobalLU_t &glu)
Performs numeric block updates (sup-col) in topological order. 
const MatrixLType & m_mapL
MappedSupernodalType::Index Index
const PermutationType & rowsPermutation() const 
A matrix or vector expression mapping an existing array of data. 
Matrix< Scalar, Dynamic, 1 > ScalarVector
Index copy_to_ucol(const Index jcol, const Index nseg, IndexVector &segrep, BlockIndexVector repfnz, IndexVector &perm_r, BlockScalarVector dense, GlobalLU_t &glu)
Performs numeric block updates (sup-col) in topological order. 
const internal::solve_retval< SparseLU, Rhs > solve(const MatrixBase< Rhs > &B) const 
const MappedSupernodalType & m_mapL
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
void fixupL(const Index n, const IndexVector &perm_r, GlobalLU_t &glu)
Fix up the data storage lsub for L-subscripts. 
std::string lastErrorMessage() const 
const internal::sparse_solve_retval< SparseLU, Rhs > solve(const SparseMatrixBase< Rhs > &B) const 
void factorize(const MatrixType &matrix)
const unsigned int RowMajorBit
ComputationInfo info() const 
Reports whether previous computation was successful. 
const Index * outerIndexPtr() const 
Sparse supernodal LU factorization for general matrices. 
MappedSparseMatrix< Scalar, ColMajor, Index > m_Ustore
void simplicialfactorize(const MatrixType &matrix)
Index column_dfs(const Index m, const Index jcol, IndexVector &perm_r, Index maxsuper, Index &nseg, BlockIndexVector lsub_col, IndexVector &segrep, BlockIndexVector repfnz, IndexVector &xprune, IndexVector &marker, IndexVector &parent, IndexVector &xplore, GlobalLU_t &glu)
Performs a symbolic factorization on column jcol and decide the supernode boundary. 
SparseLU(const MatrixType &matrix)
int coletree(const MatrixType &mat, IndexVector &parent, IndexVector &firstRowElt, typename MatrixType::Index *perm=0)
void setPivotThreshold(const RealScalar &thresh)
void panel_bmod(const Index m, const Index w, const Index jcol, const Index nseg, ScalarVector &dense, ScalarVector &tempv, IndexVector &segrep, IndexVector &repfnz, GlobalLU_t &glu)
Performs numeric block updates (sup-panel) in topological order. 
void pruneL(const Index jcol, const IndexVector &perm_r, const Index pivrow, const Index nseg, const IndexVector &segrep, BlockIndexVector repfnz, IndexVector &xprune, GlobalLU_t &glu)
Prunes the L-structure. 
EIGEN_STRONG_INLINE const CwiseUnaryOp< internal::scalar_abs_op< Scalar >, const Derived > abs() const 
Expression of a fixed-size or dynamic-size sub-vector. 
void solveInPlace(MatrixBase< Dest > &X) const 
Base class of any sparse matrices or sparse expressions. 
RealScalar m_diagpivotthresh
Matrix< Index, Dynamic, 1 > IndexVector
void compute(const MatrixType &matrix)
internal::SparseLUImpl< Scalar, Index > Base
SparseLU< _MatrixType, Derived > Dec
const PermutationType & colsPermutation() const 
Index LUnumTempV(Index &m, Index &w, Index &t, Index &b)
void isSymmetric(bool sym)
MatrixType::Scalar Scalar
CoeffReturnType value() const 
EIGEN_STRONG_INLINE void resize(Index nbRows, Index nbCols)
const CwiseUnaryOp< internal::scalar_log_op< Scalar >, const Derived > log() const 
bool _solve(const MatrixBase< Rhs > &B, MatrixBase< Dest > &_X) const 
void setInfos(Index m, Index n, ScalarVector &nzval, IndexVector &nzval_colptr, IndexVector &rowind, IndexVector &rowind_colptr, IndexVector &col_to_sup, IndexVector &sup_to_col)
Derived & setZero(Index size)
#define EIGEN_MAKE_SPARSE_SOLVE_HELPERS(DecompositionType, Rhs)
Transpose< PermutationBase > inverse() const 
Derived & setConstant(Index size, const Scalar &value)
void relax_snode(const Index n, IndexVector &et, const Index relax_columns, IndexVector &descendants, IndexVector &relax_end)
Identify the initial relaxed supernodes. 
EIGEN_STRONG_INLINE const Scalar * data() const 
SparseLUMatrixUReturnType< SCMatrix, MappedSparseMatrix< Scalar, ColMajor, Index > > matrixU() const 
_OrderingType OrderingType
const Derived & derived() const 
internal::MappedSuperNodalMatrix< Scalar, Index > SCMatrix
const IndicesType & indices() const 
void panel_dfs(const Index m, const Index w, const Index jcol, MatrixType &A, IndexVector &perm_r, Index &nseg, ScalarVector &dense, IndexVector &panel_lsub, IndexVector &segrep, IndexVector &repfnz, IndexVector &xprune, IndexVector &marker, IndexVector &parent, IndexVector &xplore, GlobalLU_t &glu)
Performs a symbolic factorization on a panel of columns [jcol, jcol+w) 
PermutationMatrix< Dynamic, Dynamic, Index > PermutationType
Scalar logAbsDeterminant() const 
Index pivotL(const Index jcol, const RealScalar &diagpivotthresh, IndexVector &perm_r, IndexVector &iperm_c, Index &pivrow, GlobalLU_t &glu)
Performs the numerical pivotin on the current column of L, and the CDIV operation. 
SparseLUMatrixLReturnType(const MappedSupernodalType &mapL)
#define EIGEN_MAKE_SOLVE_HELPERS(DecompositionType, Rhs)
internal::perfvalues< Index > m_perfv
void analyzePattern(const MatrixType &matrix)
const MatrixUType & m_mapU
void countnz(const Index n, Index &nnzL, Index &nnzU, GlobalLU_t &glu)
Count Nonzero elements in the factors. 
SparseLU< _MatrixType, Derived > Dec
MatrixType::RealScalar RealScalar
Convenience specialization of Stride to specify only an outer stride See class Map for some examples...
void solveInPlace(MatrixBase< Dest > &X) const 
void resize(Index newSize)
Base class for all dense matrices, vectors, and expressions. 
SparseLUMatrixLReturnType< SCMatrix > matrixL() const 
SparseMatrix< Scalar, ColMajor, Index > NCMatrix
Index memInit(Index m, Index n, Index annz, Index lwork, Index fillratio, Index panel_size, GlobalLU_t &glu)
Allocate various working space for the numerical factorization phase. 
const Index * innerNonZeroPtr() const 
MappedSupernodalType::Scalar Scalar
void treePostorder(Index n, IndexVector &parent, IndexVector &post)
Post order a tree. 
void heap_relax_snode(const Index n, IndexVector &et, const Index relax_columns, IndexVector &descendants, IndexVector &relax_end)
Identify the initial relaxed supernodes.