00001
00002
00003
00004
00005
00006
00007
00008
00009
00010 #ifndef EIGEN_SPARSEMATRIX_H
00011 #define EIGEN_SPARSEMATRIX_H
00012
00013 namespace Eigen {
00014
00041 namespace internal {
00042 template<typename _Scalar, int _Options, typename _Index>
00043 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
00044 {
00045 typedef _Scalar Scalar;
00046 typedef _Index Index;
00047 typedef Sparse StorageKind;
00048 typedef MatrixXpr XprKind;
00049 enum {
00050 RowsAtCompileTime = Dynamic,
00051 ColsAtCompileTime = Dynamic,
00052 MaxRowsAtCompileTime = Dynamic,
00053 MaxColsAtCompileTime = Dynamic,
00054 Flags = _Options | NestByRefBit | LvalueBit,
00055 CoeffReadCost = NumTraits<Scalar>::ReadCost,
00056 SupportedAccessPatterns = InnerRandomAccessPattern
00057 };
00058 };
00059
00060 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
00061 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
00062 {
00063 typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
00064 typedef typename nested<MatrixType>::type MatrixTypeNested;
00065 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
00066
00067 typedef _Scalar Scalar;
00068 typedef Dense StorageKind;
00069 typedef _Index Index;
00070 typedef MatrixXpr XprKind;
00071
00072 enum {
00073 RowsAtCompileTime = Dynamic,
00074 ColsAtCompileTime = 1,
00075 MaxRowsAtCompileTime = Dynamic,
00076 MaxColsAtCompileTime = 1,
00077 Flags = 0,
00078 CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
00079 };
00080 };
00081
00082 }
00083
00084 template<typename _Scalar, int _Options, typename _Index>
00085 class SparseMatrix
00086 : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
00087 {
00088 public:
00089 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
00090 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
00091 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
00092
00093 typedef MappedSparseMatrix<Scalar,Flags> Map;
00094 using Base::IsRowMajor;
00095 typedef internal::CompressedStorage<Scalar,Index> Storage;
00096 enum {
00097 Options = _Options
00098 };
00099
00100 protected:
00101
00102 typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
00103
00104 Index m_outerSize;
00105 Index m_innerSize;
00106 Index* m_outerIndex;
00107 Index* m_innerNonZeros;
00108 Storage m_data;
00109
00110 Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
00111 const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
00112
00113 public:
00114
00116 inline bool isCompressed() const { return m_innerNonZeros==0; }
00117
00119 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
00121 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
00122
00124 inline Index innerSize() const { return m_innerSize; }
00126 inline Index outerSize() const { return m_outerSize; }
00127
00131 inline const Scalar* valuePtr() const { return &m_data.value(0); }
00135 inline Scalar* valuePtr() { return &m_data.value(0); }
00136
00140 inline const Index* innerIndexPtr() const { return &m_data.index(0); }
00144 inline Index* innerIndexPtr() { return &m_data.index(0); }
00145
00149 inline const Index* outerIndexPtr() const { return m_outerIndex; }
00153 inline Index* outerIndexPtr() { return m_outerIndex; }
00154
00158 inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
00162 inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
00163
00165 inline Storage& data() { return m_data; }
00167 inline const Storage& data() const { return m_data; }
00168
00171 inline Scalar coeff(Index row, Index col) const
00172 {
00173 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
00174
00175 const Index outer = IsRowMajor ? row : col;
00176 const Index inner = IsRowMajor ? col : row;
00177 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
00178 return m_data.atInRange(m_outerIndex[outer], end, inner);
00179 }
00180
00189 inline Scalar& coeffRef(Index row, Index col)
00190 {
00191 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
00192
00193 const Index outer = IsRowMajor ? row : col;
00194 const Index inner = IsRowMajor ? col : row;
00195
00196 Index start = m_outerIndex[outer];
00197 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
00198 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
00199 if(end<=start)
00200 return insert(row,col);
00201 const Index p = m_data.searchLowerIndex(start,end-1,inner);
00202 if((p<end) && (m_data.index(p)==inner))
00203 return m_data.value(p);
00204 else
00205 return insert(row,col);
00206 }
00207
00220 Scalar& insert(Index row, Index col)
00221 {
00222 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
00223
00224 if(isCompressed())
00225 {
00226 reserve(Matrix<Index,Dynamic,1>::Constant(outerSize(), 2));
00227 }
00228 return insertUncompressed(row,col);
00229 }
00230
00231 public:
00232
00233 class InnerIterator;
00234 class ReverseInnerIterator;
00235
00237 inline void setZero()
00238 {
00239 m_data.clear();
00240 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
00241 if(m_innerNonZeros)
00242 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
00243 }
00244
00246 inline Index nonZeros() const
00247 {
00248 if(m_innerNonZeros)
00249 return innerNonZeros().sum();
00250 return static_cast<Index>(m_data.size());
00251 }
00252
00256 inline void reserve(Index reserveSize)
00257 {
00258 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
00259 m_data.reserve(reserveSize);
00260 }
00261
00262 #ifdef EIGEN_PARSED_BY_DOXYGEN
00263
00266 template<class SizesType>
00267 inline void reserve(const SizesType& reserveSizes);
00268 #else
00269 template<class SizesType>
00270 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
00271 {
00272 EIGEN_UNUSED_VARIABLE(enableif);
00273 reserveInnerVectors(reserveSizes);
00274 }
00275 template<class SizesType>
00276 inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
00277 #if (!defined(_MSC_VER)) || (_MSC_VER>=1500)
00278 typename
00279 #endif
00280 SizesType::Scalar())
00281 {
00282 EIGEN_UNUSED_VARIABLE(enableif);
00283 reserveInnerVectors(reserveSizes);
00284 }
00285 #endif // EIGEN_PARSED_BY_DOXYGEN
00286 protected:
00287 template<class SizesType>
00288 inline void reserveInnerVectors(const SizesType& reserveSizes)
00289 {
00290 if(isCompressed())
00291 {
00292 std::size_t totalReserveSize = 0;
00293
00294 m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
00295 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
00296
00297
00298 Index* newOuterIndex = m_innerNonZeros;
00299
00300 Index count = 0;
00301 for(Index j=0; j<m_outerSize; ++j)
00302 {
00303 newOuterIndex[j] = count;
00304 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
00305 totalReserveSize += reserveSizes[j];
00306 }
00307 m_data.reserve(totalReserveSize);
00308 Index previousOuterIndex = m_outerIndex[m_outerSize];
00309 for(Index j=m_outerSize-1; j>=0; --j)
00310 {
00311 Index innerNNZ = previousOuterIndex - m_outerIndex[j];
00312 for(Index i=innerNNZ-1; i>=0; --i)
00313 {
00314 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
00315 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
00316 }
00317 previousOuterIndex = m_outerIndex[j];
00318 m_outerIndex[j] = newOuterIndex[j];
00319 m_innerNonZeros[j] = innerNNZ;
00320 }
00321 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
00322
00323 m_data.resize(m_outerIndex[m_outerSize]);
00324 }
00325 else
00326 {
00327 Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
00328 if (!newOuterIndex) internal::throw_std_bad_alloc();
00329
00330 Index count = 0;
00331 for(Index j=0; j<m_outerSize; ++j)
00332 {
00333 newOuterIndex[j] = count;
00334 Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
00335 Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
00336 count += toReserve + m_innerNonZeros[j];
00337 }
00338 newOuterIndex[m_outerSize] = count;
00339
00340 m_data.resize(count);
00341 for(Index j=m_outerSize-1; j>=0; --j)
00342 {
00343 Index offset = newOuterIndex[j] - m_outerIndex[j];
00344 if(offset>0)
00345 {
00346 Index innerNNZ = m_innerNonZeros[j];
00347 for(Index i=innerNNZ-1; i>=0; --i)
00348 {
00349 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
00350 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
00351 }
00352 }
00353 }
00354
00355 std::swap(m_outerIndex, newOuterIndex);
00356 std::free(newOuterIndex);
00357 }
00358
00359 }
00360 public:
00361
00362
00363
00374 inline Scalar& insertBack(Index row, Index col)
00375 {
00376 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
00377 }
00378
00381 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
00382 {
00383 eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
00384 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
00385 Index p = m_outerIndex[outer+1];
00386 ++m_outerIndex[outer+1];
00387 m_data.append(0, inner);
00388 return m_data.value(p);
00389 }
00390
00393 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
00394 {
00395 Index p = m_outerIndex[outer+1];
00396 ++m_outerIndex[outer+1];
00397 m_data.append(0, inner);
00398 return m_data.value(p);
00399 }
00400
00403 inline void startVec(Index outer)
00404 {
00405 eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
00406 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
00407 m_outerIndex[outer+1] = m_outerIndex[outer];
00408 }
00409
00413 inline void finalize()
00414 {
00415 if(isCompressed())
00416 {
00417 Index size = static_cast<Index>(m_data.size());
00418 Index i = m_outerSize;
00419
00420 while (i>=0 && m_outerIndex[i]==0)
00421 --i;
00422 ++i;
00423 while (i<=m_outerSize)
00424 {
00425 m_outerIndex[i] = size;
00426 ++i;
00427 }
00428 }
00429 }
00430
00431
00432
00433 template<typename InputIterators>
00434 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
00435
00436 void sumupDuplicates();
00437
00438
00439
00442 Scalar& insertByOuterInner(Index j, Index i)
00443 {
00444 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
00445 }
00446
00449 void makeCompressed()
00450 {
00451 if(isCompressed())
00452 return;
00453
00454 Index oldStart = m_outerIndex[1];
00455 m_outerIndex[1] = m_innerNonZeros[0];
00456 for(Index j=1; j<m_outerSize; ++j)
00457 {
00458 Index nextOldStart = m_outerIndex[j+1];
00459 Index offset = oldStart - m_outerIndex[j];
00460 if(offset>0)
00461 {
00462 for(Index k=0; k<m_innerNonZeros[j]; ++k)
00463 {
00464 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
00465 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
00466 }
00467 }
00468 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
00469 oldStart = nextOldStart;
00470 }
00471 std::free(m_innerNonZeros);
00472 m_innerNonZeros = 0;
00473 m_data.resize(m_outerIndex[m_outerSize]);
00474 m_data.squeeze();
00475 }
00476
00478 void uncompress()
00479 {
00480 if(m_innerNonZeros != 0)
00481 return;
00482 m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
00483 for (Index i = 0; i < m_outerSize; i++)
00484 {
00485 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
00486 }
00487 }
00488
00490 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
00491 {
00492 prune(default_prunning_func(reference,epsilon));
00493 }
00494
00502 template<typename KeepFunc>
00503 void prune(const KeepFunc& keep = KeepFunc())
00504 {
00505
00506
00507 makeCompressed();
00508
00509 Index k = 0;
00510 for(Index j=0; j<m_outerSize; ++j)
00511 {
00512 Index previousStart = m_outerIndex[j];
00513 m_outerIndex[j] = k;
00514 Index end = m_outerIndex[j+1];
00515 for(Index i=previousStart; i<end; ++i)
00516 {
00517 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
00518 {
00519 m_data.value(k) = m_data.value(i);
00520 m_data.index(k) = m_data.index(i);
00521 ++k;
00522 }
00523 }
00524 }
00525 m_outerIndex[m_outerSize] = k;
00526 m_data.resize(k,0);
00527 }
00528
00532 void conservativeResize(Index rows, Index cols)
00533 {
00534
00535 if (this->rows() == rows && this->cols() == cols) return;
00536
00537
00538 if(rows==0 || cols==0) return resize(rows,cols);
00539
00540 Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
00541 Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
00542 Index newInnerSize = IsRowMajor ? cols : rows;
00543
00544
00545 if (m_innerNonZeros)
00546 {
00547
00548 Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
00549 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
00550 m_innerNonZeros = newInnerNonZeros;
00551
00552 for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
00553 m_innerNonZeros[i] = 0;
00554 }
00555 else if (innerChange < 0)
00556 {
00557
00558 m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
00559 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
00560 for(Index i = 0; i < m_outerSize; i++)
00561 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
00562 }
00563
00564
00565 if (m_innerNonZeros && innerChange < 0)
00566 {
00567 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
00568 {
00569 Index &n = m_innerNonZeros[i];
00570 Index start = m_outerIndex[i];
00571 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
00572 }
00573 }
00574
00575 m_innerSize = newInnerSize;
00576
00577
00578 if (outerChange == 0)
00579 return;
00580
00581 Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
00582 if (!newOuterIndex) internal::throw_std_bad_alloc();
00583 m_outerIndex = newOuterIndex;
00584 if (outerChange > 0)
00585 {
00586 Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
00587 for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
00588 m_outerIndex[i] = last;
00589 }
00590 m_outerSize += outerChange;
00591 }
00592
00596 void resize(Index rows, Index cols)
00597 {
00598 const Index outerSize = IsRowMajor ? rows : cols;
00599 m_innerSize = IsRowMajor ? cols : rows;
00600 m_data.clear();
00601 if (m_outerSize != outerSize || m_outerSize==0)
00602 {
00603 std::free(m_outerIndex);
00604 m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
00605 if (!m_outerIndex) internal::throw_std_bad_alloc();
00606
00607 m_outerSize = outerSize;
00608 }
00609 if(m_innerNonZeros)
00610 {
00611 std::free(m_innerNonZeros);
00612 m_innerNonZeros = 0;
00613 }
00614 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
00615 }
00616
00619 void resizeNonZeros(Index size)
00620 {
00621
00622 m_data.resize(size);
00623 }
00624
00626 const Diagonal<const SparseMatrix> diagonal() const { return *this; }
00627
00629 inline SparseMatrix()
00630 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00631 {
00632 check_template_parameters();
00633 resize(0, 0);
00634 }
00635
00637 inline SparseMatrix(Index rows, Index cols)
00638 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00639 {
00640 check_template_parameters();
00641 resize(rows, cols);
00642 }
00643
00645 template<typename OtherDerived>
00646 inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
00647 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00648 {
00649 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
00650 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
00651 check_template_parameters();
00652 *this = other.derived();
00653 }
00654
00656 template<typename OtherDerived, unsigned int UpLo>
00657 inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
00658 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00659 {
00660 check_template_parameters();
00661 *this = other;
00662 }
00663
00665 inline SparseMatrix(const SparseMatrix& other)
00666 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00667 {
00668 check_template_parameters();
00669 *this = other.derived();
00670 }
00671
00673 template<typename OtherDerived>
00674 SparseMatrix(const ReturnByValue<OtherDerived>& other)
00675 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00676 {
00677 check_template_parameters();
00678 initAssignment(other);
00679 other.evalTo(*this);
00680 }
00681
00684 inline void swap(SparseMatrix& other)
00685 {
00686
00687 std::swap(m_outerIndex, other.m_outerIndex);
00688 std::swap(m_innerSize, other.m_innerSize);
00689 std::swap(m_outerSize, other.m_outerSize);
00690 std::swap(m_innerNonZeros, other.m_innerNonZeros);
00691 m_data.swap(other.m_data);
00692 }
00693
00695 inline void setIdentity()
00696 {
00697 eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
00698 this->m_data.resize(rows());
00699 Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
00700 Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
00701 Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
00702 }
00703 inline SparseMatrix& operator=(const SparseMatrix& other)
00704 {
00705 if (other.isRValue())
00706 {
00707 swap(other.const_cast_derived());
00708 }
00709 else if(this!=&other)
00710 {
00711 initAssignment(other);
00712 if(other.isCompressed())
00713 {
00714 memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
00715 m_data = other.m_data;
00716 }
00717 else
00718 {
00719 Base::operator=(other);
00720 }
00721 }
00722 return *this;
00723 }
00724
00725 #ifndef EIGEN_PARSED_BY_DOXYGEN
00726 template<typename Lhs, typename Rhs>
00727 inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
00728 { return Base::operator=(product); }
00729
00730 template<typename OtherDerived>
00731 inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
00732 {
00733 initAssignment(other);
00734 return Base::operator=(other.derived());
00735 }
00736
00737 template<typename OtherDerived>
00738 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
00739 { return Base::operator=(other.derived()); }
00740 #endif
00741
00742 template<typename OtherDerived>
00743 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
00744
00745 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
00746 {
00747 EIGEN_DBG_SPARSE(
00748 s << "Nonzero entries:\n";
00749 if(m.isCompressed())
00750 for (Index i=0; i<m.nonZeros(); ++i)
00751 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
00752 else
00753 for (Index i=0; i<m.outerSize(); ++i)
00754 {
00755 Index p = m.m_outerIndex[i];
00756 Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
00757 Index k=p;
00758 for (; k<pe; ++k)
00759 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
00760 for (; k<m.m_outerIndex[i+1]; ++k)
00761 s << "(_,_) ";
00762 }
00763 s << std::endl;
00764 s << std::endl;
00765 s << "Outer pointers:\n";
00766 for (Index i=0; i<m.outerSize(); ++i)
00767 s << m.m_outerIndex[i] << " ";
00768 s << " $" << std::endl;
00769 if(!m.isCompressed())
00770 {
00771 s << "Inner non zeros:\n";
00772 for (Index i=0; i<m.outerSize(); ++i)
00773 s << m.m_innerNonZeros[i] << " ";
00774 s << " $" << std::endl;
00775 }
00776 s << std::endl;
00777 );
00778 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
00779 return s;
00780 }
00781
00783 inline ~SparseMatrix()
00784 {
00785 std::free(m_outerIndex);
00786 std::free(m_innerNonZeros);
00787 }
00788
00789 #ifndef EIGEN_PARSED_BY_DOXYGEN
00790
00791 Scalar sum() const;
00792 #endif
00793
00794 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
00795 # include EIGEN_SPARSEMATRIX_PLUGIN
00796 # endif
00797
00798 protected:
00799
00800 template<typename Other>
00801 void initAssignment(const Other& other)
00802 {
00803 resize(other.rows(), other.cols());
00804 if(m_innerNonZeros)
00805 {
00806 std::free(m_innerNonZeros);
00807 m_innerNonZeros = 0;
00808 }
00809 }
00810
00813 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
00814
00817 class SingletonVector
00818 {
00819 Index m_index;
00820 Index m_value;
00821 public:
00822 typedef Index value_type;
00823 SingletonVector(Index i, Index v)
00824 : m_index(i), m_value(v)
00825 {}
00826
00827 Index operator[](Index i) const { return i==m_index ? m_value : 0; }
00828 };
00829
00832 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
00833
00834 public:
00837 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
00838 {
00839 const Index outer = IsRowMajor ? row : col;
00840 const Index inner = IsRowMajor ? col : row;
00841
00842 eigen_assert(!isCompressed());
00843 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
00844
00845 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
00846 m_data.index(p) = inner;
00847 return (m_data.value(p) = 0);
00848 }
00849
00850 private:
00851 static void check_template_parameters()
00852 {
00853 EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
00854 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
00855 }
00856
00857 struct default_prunning_func {
00858 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
00859 inline bool operator() (const Index&, const Index&, const Scalar& value) const
00860 {
00861 return !internal::isMuchSmallerThan(value, reference, epsilon);
00862 }
00863 Scalar reference;
00864 RealScalar epsilon;
00865 };
00866 };
00867
00868 template<typename Scalar, int _Options, typename _Index>
00869 class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
00870 {
00871 public:
00872 InnerIterator(const SparseMatrix& mat, Index outer)
00873 : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
00874 {
00875 if(mat.isCompressed())
00876 m_end = mat.m_outerIndex[outer+1];
00877 else
00878 m_end = m_id + mat.m_innerNonZeros[outer];
00879 }
00880
00881 inline InnerIterator& operator++() { m_id++; return *this; }
00882
00883 inline const Scalar& value() const { return m_values[m_id]; }
00884 inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
00885
00886 inline Index index() const { return m_indices[m_id]; }
00887 inline Index outer() const { return m_outer; }
00888 inline Index row() const { return IsRowMajor ? m_outer : index(); }
00889 inline Index col() const { return IsRowMajor ? index() : m_outer; }
00890
00891 inline operator bool() const { return (m_id < m_end); }
00892
00893 protected:
00894 const Scalar* m_values;
00895 const Index* m_indices;
00896 const Index m_outer;
00897 Index m_id;
00898 Index m_end;
00899 };
00900
00901 template<typename Scalar, int _Options, typename _Index>
00902 class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
00903 {
00904 public:
00905 ReverseInnerIterator(const SparseMatrix& mat, Index outer)
00906 : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
00907 {
00908 if(mat.isCompressed())
00909 m_id = mat.m_outerIndex[outer+1];
00910 else
00911 m_id = m_start + mat.m_innerNonZeros[outer];
00912 }
00913
00914 inline ReverseInnerIterator& operator--() { --m_id; return *this; }
00915
00916 inline const Scalar& value() const { return m_values[m_id-1]; }
00917 inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
00918
00919 inline Index index() const { return m_indices[m_id-1]; }
00920 inline Index outer() const { return m_outer; }
00921 inline Index row() const { return IsRowMajor ? m_outer : index(); }
00922 inline Index col() const { return IsRowMajor ? index() : m_outer; }
00923
00924 inline operator bool() const { return (m_id > m_start); }
00925
00926 protected:
00927 const Scalar* m_values;
00928 const Index* m_indices;
00929 const Index m_outer;
00930 Index m_id;
00931 const Index m_start;
00932 };
00933
00934 namespace internal {
00935
00936 template<typename InputIterator, typename SparseMatrixType>
00937 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
00938 {
00939 EIGEN_UNUSED_VARIABLE(Options);
00940 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
00941 typedef typename SparseMatrixType::Scalar Scalar;
00942 typedef typename SparseMatrixType::Index Index;
00943 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor,Index> trMat(mat.rows(),mat.cols());
00944
00945 if(begin!=end)
00946 {
00947
00948 Matrix<Index,Dynamic,1> wi(trMat.outerSize());
00949 wi.setZero();
00950 for(InputIterator it(begin); it!=end; ++it)
00951 {
00952 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
00953 wi(IsRowMajor ? it->col() : it->row())++;
00954 }
00955
00956
00957 trMat.reserve(wi);
00958 for(InputIterator it(begin); it!=end; ++it)
00959 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
00960
00961
00962 trMat.sumupDuplicates();
00963 }
00964
00965
00966 mat = trMat;
00967 }
00968
00969 }
00970
00971
01009 template<typename Scalar, int _Options, typename _Index>
01010 template<typename InputIterators>
01011 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
01012 {
01013 internal::set_from_triplets(begin, end, *this);
01014 }
01015
01017 template<typename Scalar, int _Options, typename _Index>
01018 void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
01019 {
01020 eigen_assert(!isCompressed());
01021
01022 Matrix<Index,Dynamic,1> wi(innerSize());
01023 wi.fill(-1);
01024 Index count = 0;
01025
01026 for(Index j=0; j<outerSize(); ++j)
01027 {
01028 Index start = count;
01029 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
01030 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
01031 {
01032 Index i = m_data.index(k);
01033 if(wi(i)>=start)
01034 {
01035
01036 m_data.value(wi(i)) += m_data.value(k);
01037 }
01038 else
01039 {
01040 m_data.value(count) = m_data.value(k);
01041 m_data.index(count) = m_data.index(k);
01042 wi(i) = count;
01043 ++count;
01044 }
01045 }
01046 m_outerIndex[j] = start;
01047 }
01048 m_outerIndex[m_outerSize] = count;
01049
01050
01051 std::free(m_innerNonZeros);
01052 m_innerNonZeros = 0;
01053 m_data.resize(m_outerIndex[m_outerSize]);
01054 }
01055
01056 template<typename Scalar, int _Options, typename _Index>
01057 template<typename OtherDerived>
01058 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
01059 {
01060 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
01061 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
01062
01063 const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
01064 if (needToTranspose)
01065 {
01066
01067
01068
01069
01070 typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
01071 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
01072 OtherCopy otherCopy(other.derived());
01073
01074 SparseMatrix dest(other.rows(),other.cols());
01075 Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
01076
01077
01078
01079 for (Index j=0; j<otherCopy.outerSize(); ++j)
01080 for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
01081 ++dest.m_outerIndex[it.index()];
01082
01083
01084 Index count = 0;
01085 Matrix<Index,Dynamic,1> positions(dest.outerSize());
01086 for (Index j=0; j<dest.outerSize(); ++j)
01087 {
01088 Index tmp = dest.m_outerIndex[j];
01089 dest.m_outerIndex[j] = count;
01090 positions[j] = count;
01091 count += tmp;
01092 }
01093 dest.m_outerIndex[dest.outerSize()] = count;
01094
01095 dest.m_data.resize(count);
01096
01097 for (Index j=0; j<otherCopy.outerSize(); ++j)
01098 {
01099 for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
01100 {
01101 Index pos = positions[it.index()]++;
01102 dest.m_data.index(pos) = j;
01103 dest.m_data.value(pos) = it.value();
01104 }
01105 }
01106 this->swap(dest);
01107 return *this;
01108 }
01109 else
01110 {
01111 if(other.isRValue())
01112 initAssignment(other.derived());
01113
01114 return Base::operator=(other.derived());
01115 }
01116 }
01117
01118 template<typename _Scalar, int _Options, typename _Index>
01119 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
01120 {
01121 eigen_assert(!isCompressed());
01122
01123 const Index outer = IsRowMajor ? row : col;
01124 const Index inner = IsRowMajor ? col : row;
01125
01126 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
01127 Index innerNNZ = m_innerNonZeros[outer];
01128 if(innerNNZ>=room)
01129 {
01130
01131 reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
01132 }
01133
01134 Index startId = m_outerIndex[outer];
01135 Index p = startId + m_innerNonZeros[outer];
01136 while ( (p > startId) && (m_data.index(p-1) > inner) )
01137 {
01138 m_data.index(p) = m_data.index(p-1);
01139 m_data.value(p) = m_data.value(p-1);
01140 --p;
01141 }
01142 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
01143
01144 m_innerNonZeros[outer]++;
01145
01146 m_data.index(p) = inner;
01147 return (m_data.value(p) = 0);
01148 }
01149
01150 template<typename _Scalar, int _Options, typename _Index>
01151 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
01152 {
01153 eigen_assert(isCompressed());
01154
01155 const Index outer = IsRowMajor ? row : col;
01156 const Index inner = IsRowMajor ? col : row;
01157
01158 Index previousOuter = outer;
01159 if (m_outerIndex[outer+1]==0)
01160 {
01161
01162 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
01163 {
01164 m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
01165 --previousOuter;
01166 }
01167 m_outerIndex[outer+1] = m_outerIndex[outer];
01168 }
01169
01170
01171
01172
01173 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
01174 && (size_t(m_outerIndex[outer+1]) == m_data.size());
01175
01176 size_t startId = m_outerIndex[outer];
01177
01178 size_t p = m_outerIndex[outer+1];
01179 ++m_outerIndex[outer+1];
01180
01181 double reallocRatio = 1;
01182 if (m_data.allocatedSize()<=m_data.size())
01183 {
01184
01185 if (m_data.size()==0)
01186 {
01187 m_data.reserve(32);
01188 }
01189 else
01190 {
01191
01192
01193
01194 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
01195 reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
01196
01197
01198
01199 reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
01200 }
01201 }
01202 m_data.resize(m_data.size()+1,reallocRatio);
01203
01204 if (!isLastVec)
01205 {
01206 if (previousOuter==-1)
01207 {
01208
01209
01210 for (Index k=0; k<=(outer+1); ++k)
01211 m_outerIndex[k] = 0;
01212 Index k=outer+1;
01213 while(m_outerIndex[k]==0)
01214 m_outerIndex[k++] = 1;
01215 while (k<=m_outerSize && m_outerIndex[k]!=0)
01216 m_outerIndex[k++]++;
01217 p = 0;
01218 --k;
01219 k = m_outerIndex[k]-1;
01220 while (k>0)
01221 {
01222 m_data.index(k) = m_data.index(k-1);
01223 m_data.value(k) = m_data.value(k-1);
01224 k--;
01225 }
01226 }
01227 else
01228 {
01229
01230
01231 Index j = outer+2;
01232 while (j<=m_outerSize && m_outerIndex[j]!=0)
01233 m_outerIndex[j++]++;
01234 --j;
01235
01236 Index k = m_outerIndex[j]-1;
01237 while (k>=Index(p))
01238 {
01239 m_data.index(k) = m_data.index(k-1);
01240 m_data.value(k) = m_data.value(k-1);
01241 k--;
01242 }
01243 }
01244 }
01245
01246 while ( (p > startId) && (m_data.index(p-1) > inner) )
01247 {
01248 m_data.index(p) = m_data.index(p-1);
01249 m_data.value(p) = m_data.value(p-1);
01250 --p;
01251 }
01252
01253 m_data.index(p) = inner;
01254 return (m_data.value(p) = 0);
01255 }
01256
01257 }
01258
01259 #endif // EIGEN_SPARSEMATRIX_H