00001
00002
00003
00004
00005
00006
00007
00008
00009
00010 #ifndef EIGEN_SPARSEMATRIX_H
00011 #define EIGEN_SPARSEMATRIX_H
00012
00013 namespace Eigen {
00014
00041 namespace internal {
00042 template<typename _Scalar, int _Options, typename _Index>
00043 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
00044 {
00045 typedef _Scalar Scalar;
00046 typedef _Index Index;
00047 typedef Sparse StorageKind;
00048 typedef MatrixXpr XprKind;
00049 enum {
00050 RowsAtCompileTime = Dynamic,
00051 ColsAtCompileTime = Dynamic,
00052 MaxRowsAtCompileTime = Dynamic,
00053 MaxColsAtCompileTime = Dynamic,
00054 Flags = _Options | NestByRefBit | LvalueBit,
00055 CoeffReadCost = NumTraits<Scalar>::ReadCost,
00056 SupportedAccessPatterns = InnerRandomAccessPattern
00057 };
00058 };
00059
00060 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
00061 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
00062 {
00063 typedef SparseMatrix<_Scalar, _Options, _Index> MatrixType;
00064 typedef typename nested<MatrixType>::type MatrixTypeNested;
00065 typedef typename remove_reference<MatrixTypeNested>::type _MatrixTypeNested;
00066
00067 typedef _Scalar Scalar;
00068 typedef Dense StorageKind;
00069 typedef _Index Index;
00070 typedef MatrixXpr XprKind;
00071
00072 enum {
00073 RowsAtCompileTime = Dynamic,
00074 ColsAtCompileTime = 1,
00075 MaxRowsAtCompileTime = Dynamic,
00076 MaxColsAtCompileTime = 1,
00077 Flags = 0,
00078 CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
00079 };
00080 };
00081
00082 }
00083
00084 template<typename _Scalar, int _Options, typename _Index>
00085 class SparseMatrix
00086 : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
00087 {
00088 public:
00089 EIGEN_SPARSE_PUBLIC_INTERFACE(SparseMatrix)
00090 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, +=)
00091 EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(SparseMatrix, -=)
00092
00093 typedef MappedSparseMatrix<Scalar,Flags> Map;
00094 using Base::IsRowMajor;
00095 typedef internal::CompressedStorage<Scalar,Index> Storage;
00096 enum {
00097 Options = _Options
00098 };
00099
00100 protected:
00101
00102 typedef SparseMatrix<Scalar,(Flags&~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix;
00103
00104 Index m_outerSize;
00105 Index m_innerSize;
00106 Index* m_outerIndex;
00107 Index* m_innerNonZeros;
00108 Storage m_data;
00109
00110 Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
00111 const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
00112
00113 public:
00114
00116 inline bool isCompressed() const { return m_innerNonZeros==0; }
00117
00119 inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
00121 inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
00122
00124 inline Index innerSize() const { return m_innerSize; }
00126 inline Index outerSize() const { return m_outerSize; }
00127
00131 inline const Scalar* valuePtr() const { return &m_data.value(0); }
00135 inline Scalar* valuePtr() { return &m_data.value(0); }
00136
00140 inline const Index* innerIndexPtr() const { return &m_data.index(0); }
00144 inline Index* innerIndexPtr() { return &m_data.index(0); }
00145
00149 inline const Index* outerIndexPtr() const { return m_outerIndex; }
00153 inline Index* outerIndexPtr() { return m_outerIndex; }
00154
00158 inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
00162 inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
00163
00165 inline Storage& data() { return m_data; }
00167 inline const Storage& data() const { return m_data; }
00168
00171 inline Scalar coeff(Index row, Index col) const
00172 {
00173 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
00174
00175 const Index outer = IsRowMajor ? row : col;
00176 const Index inner = IsRowMajor ? col : row;
00177 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
00178 return m_data.atInRange(m_outerIndex[outer], end, inner);
00179 }
00180
00189 inline Scalar& coeffRef(Index row, Index col)
00190 {
00191 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
00192
00193 const Index outer = IsRowMajor ? row : col;
00194 const Index inner = IsRowMajor ? col : row;
00195
00196 Index start = m_outerIndex[outer];
00197 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
00198 eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
00199 if(end<=start)
00200 return insert(row,col);
00201 const Index p = m_data.searchLowerIndex(start,end-1,inner);
00202 if((p<end) && (m_data.index(p)==inner))
00203 return m_data.value(p);
00204 else
00205 return insert(row,col);
00206 }
00207
00220 Scalar& insert(Index row, Index col)
00221 {
00222 eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
00223
00224 if(isCompressed())
00225 {
00226 reserve(VectorXi::Constant(outerSize(), 2));
00227 }
00228 return insertUncompressed(row,col);
00229 }
00230
00231 public:
00232
00233 class InnerIterator;
00234 class ReverseInnerIterator;
00235
00237 inline void setZero()
00238 {
00239 m_data.clear();
00240 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
00241 if(m_innerNonZeros)
00242 memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
00243 }
00244
00246 inline Index nonZeros() const
00247 {
00248 if(m_innerNonZeros)
00249 return innerNonZeros().sum();
00250 return static_cast<Index>(m_data.size());
00251 }
00252
00256 inline void reserve(Index reserveSize)
00257 {
00258 eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
00259 m_data.reserve(reserveSize);
00260 }
00261
00262 #ifdef EIGEN_PARSED_BY_DOXYGEN
00263
00266 template<class SizesType>
00267 inline void reserve(const SizesType& reserveSizes);
00268 #else
00269 template<class SizesType>
00270 inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
00271 {
00272 EIGEN_UNUSED_VARIABLE(enableif);
00273 reserveInnerVectors(reserveSizes);
00274 }
00275 template<class SizesType>
00276 inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
00277 #if (!defined(_MSC_VER)) || (_MSC_VER>=1500)
00278 typename
00279 #endif
00280 SizesType::Scalar())
00281 {
00282 EIGEN_UNUSED_VARIABLE(enableif);
00283 reserveInnerVectors(reserveSizes);
00284 }
00285 #endif // EIGEN_PARSED_BY_DOXYGEN
00286 protected:
00287 template<class SizesType>
00288 inline void reserveInnerVectors(const SizesType& reserveSizes)
00289 {
00290 if(isCompressed())
00291 {
00292 std::size_t totalReserveSize = 0;
00293
00294 m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
00295 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
00296
00297
00298 Index* newOuterIndex = m_innerNonZeros;
00299
00300 Index count = 0;
00301 for(Index j=0; j<m_outerSize; ++j)
00302 {
00303 newOuterIndex[j] = count;
00304 count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
00305 totalReserveSize += reserveSizes[j];
00306 }
00307 m_data.reserve(totalReserveSize);
00308 Index previousOuterIndex = m_outerIndex[m_outerSize];
00309 for(Index j=m_outerSize-1; j>=0; --j)
00310 {
00311 Index innerNNZ = previousOuterIndex - m_outerIndex[j];
00312 for(Index i=innerNNZ-1; i>=0; --i)
00313 {
00314 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
00315 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
00316 }
00317 previousOuterIndex = m_outerIndex[j];
00318 m_outerIndex[j] = newOuterIndex[j];
00319 m_innerNonZeros[j] = innerNNZ;
00320 }
00321 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
00322
00323 m_data.resize(m_outerIndex[m_outerSize]);
00324 }
00325 else
00326 {
00327 Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
00328 if (!newOuterIndex) internal::throw_std_bad_alloc();
00329
00330 Index count = 0;
00331 for(Index j=0; j<m_outerSize; ++j)
00332 {
00333 newOuterIndex[j] = count;
00334 Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
00335 Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
00336 count += toReserve + m_innerNonZeros[j];
00337 }
00338 newOuterIndex[m_outerSize] = count;
00339
00340 m_data.resize(count);
00341 for(Index j=m_outerSize-1; j>=0; --j)
00342 {
00343 Index offset = newOuterIndex[j] - m_outerIndex[j];
00344 if(offset>0)
00345 {
00346 Index innerNNZ = m_innerNonZeros[j];
00347 for(Index i=innerNNZ-1; i>=0; --i)
00348 {
00349 m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
00350 m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
00351 }
00352 }
00353 }
00354
00355 std::swap(m_outerIndex, newOuterIndex);
00356 std::free(newOuterIndex);
00357 }
00358
00359 }
00360 public:
00361
00362
00363
00374 inline Scalar& insertBack(Index row, Index col)
00375 {
00376 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
00377 }
00378
00381 inline Scalar& insertBackByOuterInner(Index outer, Index inner)
00382 {
00383 eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
00384 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
00385 Index p = m_outerIndex[outer+1];
00386 ++m_outerIndex[outer+1];
00387 m_data.append(0, inner);
00388 return m_data.value(p);
00389 }
00390
00393 inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
00394 {
00395 Index p = m_outerIndex[outer+1];
00396 ++m_outerIndex[outer+1];
00397 m_data.append(0, inner);
00398 return m_data.value(p);
00399 }
00400
00403 inline void startVec(Index outer)
00404 {
00405 eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
00406 eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
00407 m_outerIndex[outer+1] = m_outerIndex[outer];
00408 }
00409
00413 inline void finalize()
00414 {
00415 if(isCompressed())
00416 {
00417 Index size = static_cast<Index>(m_data.size());
00418 Index i = m_outerSize;
00419
00420 while (i>=0 && m_outerIndex[i]==0)
00421 --i;
00422 ++i;
00423 while (i<=m_outerSize)
00424 {
00425 m_outerIndex[i] = size;
00426 ++i;
00427 }
00428 }
00429 }
00430
00431
00432
00433 template<typename InputIterators>
00434 void setFromTriplets(const InputIterators& begin, const InputIterators& end);
00435
00436 void sumupDuplicates();
00437
00438
00439
00442 Scalar& insertByOuterInner(Index j, Index i)
00443 {
00444 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
00445 }
00446
00449 void makeCompressed()
00450 {
00451 if(isCompressed())
00452 return;
00453
00454 Index oldStart = m_outerIndex[1];
00455 m_outerIndex[1] = m_innerNonZeros[0];
00456 for(Index j=1; j<m_outerSize; ++j)
00457 {
00458 Index nextOldStart = m_outerIndex[j+1];
00459 Index offset = oldStart - m_outerIndex[j];
00460 if(offset>0)
00461 {
00462 for(Index k=0; k<m_innerNonZeros[j]; ++k)
00463 {
00464 m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
00465 m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
00466 }
00467 }
00468 m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
00469 oldStart = nextOldStart;
00470 }
00471 std::free(m_innerNonZeros);
00472 m_innerNonZeros = 0;
00473 m_data.resize(m_outerIndex[m_outerSize]);
00474 m_data.squeeze();
00475 }
00476
00478 void uncompress()
00479 {
00480 if(m_innerNonZeros != 0)
00481 return;
00482 m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
00483 for (int i = 0; i < m_outerSize; i++)
00484 {
00485 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
00486 }
00487 }
00488
00490 void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
00491 {
00492 prune(default_prunning_func(reference,epsilon));
00493 }
00494
00502 template<typename KeepFunc>
00503 void prune(const KeepFunc& keep = KeepFunc())
00504 {
00505
00506
00507 makeCompressed();
00508
00509 Index k = 0;
00510 for(Index j=0; j<m_outerSize; ++j)
00511 {
00512 Index previousStart = m_outerIndex[j];
00513 m_outerIndex[j] = k;
00514 Index end = m_outerIndex[j+1];
00515 for(Index i=previousStart; i<end; ++i)
00516 {
00517 if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
00518 {
00519 m_data.value(k) = m_data.value(i);
00520 m_data.index(k) = m_data.index(i);
00521 ++k;
00522 }
00523 }
00524 }
00525 m_outerIndex[m_outerSize] = k;
00526 m_data.resize(k,0);
00527 }
00528
00532 void conservativeResize(Index rows, Index cols)
00533 {
00534
00535 if (this->rows() == rows && this->cols() == cols) return;
00536
00537
00538 if(rows==0 || cols==0) return resize(rows,cols);
00539
00540 Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
00541 Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
00542 Index newInnerSize = IsRowMajor ? cols : rows;
00543
00544
00545 if (m_innerNonZeros)
00546 {
00547
00548 Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
00549 if (!newInnerNonZeros) internal::throw_std_bad_alloc();
00550 m_innerNonZeros = newInnerNonZeros;
00551
00552 for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
00553 m_innerNonZeros[i] = 0;
00554 }
00555 else if (innerChange < 0)
00556 {
00557
00558 m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
00559 if (!m_innerNonZeros) internal::throw_std_bad_alloc();
00560 for(Index i = 0; i < m_outerSize; i++)
00561 m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
00562 }
00563
00564
00565 if (m_innerNonZeros && innerChange < 0)
00566 {
00567 for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
00568 {
00569 Index &n = m_innerNonZeros[i];
00570 Index start = m_outerIndex[i];
00571 while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
00572 }
00573 }
00574
00575 m_innerSize = newInnerSize;
00576
00577
00578 if (outerChange == 0)
00579 return;
00580
00581 Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
00582 if (!newOuterIndex) internal::throw_std_bad_alloc();
00583 m_outerIndex = newOuterIndex;
00584 if (outerChange > 0)
00585 {
00586 Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
00587 for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
00588 m_outerIndex[i] = last;
00589 }
00590 m_outerSize += outerChange;
00591 }
00592
00596 void resize(Index rows, Index cols)
00597 {
00598 const Index outerSize = IsRowMajor ? rows : cols;
00599 m_innerSize = IsRowMajor ? cols : rows;
00600 m_data.clear();
00601 if (m_outerSize != outerSize || m_outerSize==0)
00602 {
00603 std::free(m_outerIndex);
00604 m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
00605 if (!m_outerIndex) internal::throw_std_bad_alloc();
00606
00607 m_outerSize = outerSize;
00608 }
00609 if(m_innerNonZeros)
00610 {
00611 std::free(m_innerNonZeros);
00612 m_innerNonZeros = 0;
00613 }
00614 memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
00615 }
00616
00619 void resizeNonZeros(Index size)
00620 {
00621
00622 m_data.resize(size);
00623 }
00624
00626 const Diagonal<const SparseMatrix> diagonal() const { return *this; }
00627
00629 inline SparseMatrix()
00630 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00631 {
00632 check_template_parameters();
00633 resize(0, 0);
00634 }
00635
00637 inline SparseMatrix(Index rows, Index cols)
00638 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00639 {
00640 check_template_parameters();
00641 resize(rows, cols);
00642 }
00643
00645 template<typename OtherDerived>
00646 inline SparseMatrix(const SparseMatrixBase<OtherDerived>& other)
00647 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00648 {
00649 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
00650 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
00651 check_template_parameters();
00652 *this = other.derived();
00653 }
00654
00656 template<typename OtherDerived, unsigned int UpLo>
00657 inline SparseMatrix(const SparseSelfAdjointView<OtherDerived, UpLo>& other)
00658 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00659 {
00660 check_template_parameters();
00661 *this = other;
00662 }
00663
00665 inline SparseMatrix(const SparseMatrix& other)
00666 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00667 {
00668 check_template_parameters();
00669 *this = other.derived();
00670 }
00671
00673 template<typename OtherDerived>
00674 SparseMatrix(const ReturnByValue<OtherDerived>& other)
00675 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
00676 {
00677 check_template_parameters();
00678 initAssignment(other);
00679 other.evalTo(*this);
00680 }
00681
00684 inline void swap(SparseMatrix& other)
00685 {
00686
00687 std::swap(m_outerIndex, other.m_outerIndex);
00688 std::swap(m_innerSize, other.m_innerSize);
00689 std::swap(m_outerSize, other.m_outerSize);
00690 std::swap(m_innerNonZeros, other.m_innerNonZeros);
00691 m_data.swap(other.m_data);
00692 }
00693
00695 inline void setIdentity()
00696 {
00697 eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
00698 this->m_data.resize(rows());
00699 Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
00700 Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
00701 Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
00702 }
00703 inline SparseMatrix& operator=(const SparseMatrix& other)
00704 {
00705 if (other.isRValue())
00706 {
00707 swap(other.const_cast_derived());
00708 }
00709 else if(this!=&other)
00710 {
00711 initAssignment(other);
00712 if(other.isCompressed())
00713 {
00714 memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
00715 m_data = other.m_data;
00716 }
00717 else
00718 {
00719 Base::operator=(other);
00720 }
00721 }
00722 return *this;
00723 }
00724
00725 #ifndef EIGEN_PARSED_BY_DOXYGEN
00726 template<typename Lhs, typename Rhs>
00727 inline SparseMatrix& operator=(const SparseSparseProduct<Lhs,Rhs>& product)
00728 { return Base::operator=(product); }
00729
00730 template<typename OtherDerived>
00731 inline SparseMatrix& operator=(const ReturnByValue<OtherDerived>& other)
00732 {
00733 initAssignment(other);
00734 return Base::operator=(other.derived());
00735 }
00736
00737 template<typename OtherDerived>
00738 inline SparseMatrix& operator=(const EigenBase<OtherDerived>& other)
00739 { return Base::operator=(other.derived()); }
00740 #endif
00741
00742 template<typename OtherDerived>
00743 EIGEN_DONT_INLINE SparseMatrix& operator=(const SparseMatrixBase<OtherDerived>& other);
00744
00745 friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
00746 {
00747 EIGEN_DBG_SPARSE(
00748 s << "Nonzero entries:\n";
00749 if(m.isCompressed())
00750 for (Index i=0; i<m.nonZeros(); ++i)
00751 s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
00752 else
00753 for (Index i=0; i<m.outerSize(); ++i)
00754 {
00755 int p = m.m_outerIndex[i];
00756 int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
00757 Index k=p;
00758 for (; k<pe; ++k)
00759 s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
00760 for (; k<m.m_outerIndex[i+1]; ++k)
00761 s << "(_,_) ";
00762 }
00763 s << std::endl;
00764 s << std::endl;
00765 s << "Outer pointers:\n";
00766 for (Index i=0; i<m.outerSize(); ++i)
00767 s << m.m_outerIndex[i] << " ";
00768 s << " $" << std::endl;
00769 if(!m.isCompressed())
00770 {
00771 s << "Inner non zeros:\n";
00772 for (Index i=0; i<m.outerSize(); ++i)
00773 s << m.m_innerNonZeros[i] << " ";
00774 s << " $" << std::endl;
00775 }
00776 s << std::endl;
00777 );
00778 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
00779 return s;
00780 }
00781
00783 inline ~SparseMatrix()
00784 {
00785 std::free(m_outerIndex);
00786 std::free(m_innerNonZeros);
00787 }
00788
00789 #ifndef EIGEN_PARSED_BY_DOXYGEN
00790
00791 Scalar sum() const;
00792 #endif
00793
00794 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
00795 # include EIGEN_SPARSEMATRIX_PLUGIN
00796 # endif
00797
00798 protected:
00799
00800 template<typename Other>
00801 void initAssignment(const Other& other)
00802 {
00803 resize(other.rows(), other.cols());
00804 if(m_innerNonZeros)
00805 {
00806 std::free(m_innerNonZeros);
00807 m_innerNonZeros = 0;
00808 }
00809 }
00810
00813 EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
00814
00817 class SingletonVector
00818 {
00819 Index m_index;
00820 Index m_value;
00821 public:
00822 typedef Index value_type;
00823 SingletonVector(Index i, Index v)
00824 : m_index(i), m_value(v)
00825 {}
00826
00827 Index operator[](Index i) const { return i==m_index ? m_value : 0; }
00828 };
00829
00832 EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
00833
00834 public:
00837 EIGEN_STRONG_INLINE Scalar& insertBackUncompressed(Index row, Index col)
00838 {
00839 const Index outer = IsRowMajor ? row : col;
00840 const Index inner = IsRowMajor ? col : row;
00841
00842 eigen_assert(!isCompressed());
00843 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
00844
00845 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
00846 m_data.index(p) = inner;
00847 return (m_data.value(p) = 0);
00848 }
00849
00850 private:
00851 static void check_template_parameters()
00852 {
00853 EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
00854 EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
00855 }
00856
00857 struct default_prunning_func {
00858 default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
00859 inline bool operator() (const Index&, const Index&, const Scalar& value) const
00860 {
00861 return !internal::isMuchSmallerThan(value, reference, epsilon);
00862 }
00863 Scalar reference;
00864 RealScalar epsilon;
00865 };
00866 };
00867
00868 template<typename Scalar, int _Options, typename _Index>
00869 class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
00870 {
00871 public:
00872 InnerIterator(const SparseMatrix& mat, Index outer)
00873 : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
00874 {
00875 if(mat.isCompressed())
00876 m_end = mat.m_outerIndex[outer+1];
00877 else
00878 m_end = m_id + mat.m_innerNonZeros[outer];
00879 }
00880
00881 inline InnerIterator& operator++() { m_id++; return *this; }
00882
00883 inline const Scalar& value() const { return m_values[m_id]; }
00884 inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
00885
00886 inline Index index() const { return m_indices[m_id]; }
00887 inline Index outer() const { return m_outer; }
00888 inline Index row() const { return IsRowMajor ? m_outer : index(); }
00889 inline Index col() const { return IsRowMajor ? index() : m_outer; }
00890
00891 inline operator bool() const { return (m_id < m_end); }
00892
00893 protected:
00894 const Scalar* m_values;
00895 const Index* m_indices;
00896 const Index m_outer;
00897 Index m_id;
00898 Index m_end;
00899 };
00900
00901 template<typename Scalar, int _Options, typename _Index>
00902 class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
00903 {
00904 public:
00905 ReverseInnerIterator(const SparseMatrix& mat, Index outer)
00906 : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
00907 {
00908 if(mat.isCompressed())
00909 m_id = mat.m_outerIndex[outer+1];
00910 else
00911 m_id = m_start + mat.m_innerNonZeros[outer];
00912 }
00913
00914 inline ReverseInnerIterator& operator--() { --m_id; return *this; }
00915
00916 inline const Scalar& value() const { return m_values[m_id-1]; }
00917 inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
00918
00919 inline Index index() const { return m_indices[m_id-1]; }
00920 inline Index outer() const { return m_outer; }
00921 inline Index row() const { return IsRowMajor ? m_outer : index(); }
00922 inline Index col() const { return IsRowMajor ? index() : m_outer; }
00923
00924 inline operator bool() const { return (m_id > m_start); }
00925
00926 protected:
00927 const Scalar* m_values;
00928 const Index* m_indices;
00929 const Index m_outer;
00930 Index m_id;
00931 const Index m_start;
00932 };
00933
00934 namespace internal {
00935
00936 template<typename InputIterator, typename SparseMatrixType>
00937 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
00938 {
00939 EIGEN_UNUSED_VARIABLE(Options);
00940 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
00941 typedef typename SparseMatrixType::Scalar Scalar;
00942 SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
00943
00944 if(begin<end)
00945 {
00946
00947 VectorXi wi(trMat.outerSize());
00948 wi.setZero();
00949 for(InputIterator it(begin); it!=end; ++it)
00950 {
00951 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
00952 wi(IsRowMajor ? it->col() : it->row())++;
00953 }
00954
00955
00956 trMat.reserve(wi);
00957 for(InputIterator it(begin); it!=end; ++it)
00958 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
00959
00960
00961 trMat.sumupDuplicates();
00962 }
00963
00964
00965 mat = trMat;
00966 }
00967
00968 }
00969
00970
01008 template<typename Scalar, int _Options, typename _Index>
01009 template<typename InputIterators>
01010 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
01011 {
01012 internal::set_from_triplets(begin, end, *this);
01013 }
01014
01016 template<typename Scalar, int _Options, typename _Index>
01017 void SparseMatrix<Scalar,_Options,_Index>::sumupDuplicates()
01018 {
01019 eigen_assert(!isCompressed());
01020
01021 VectorXi wi(innerSize());
01022 wi.fill(-1);
01023 Index count = 0;
01024
01025 for(int j=0; j<outerSize(); ++j)
01026 {
01027 Index start = count;
01028 Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
01029 for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
01030 {
01031 Index i = m_data.index(k);
01032 if(wi(i)>=start)
01033 {
01034
01035 m_data.value(wi(i)) += m_data.value(k);
01036 }
01037 else
01038 {
01039 m_data.value(count) = m_data.value(k);
01040 m_data.index(count) = m_data.index(k);
01041 wi(i) = count;
01042 ++count;
01043 }
01044 }
01045 m_outerIndex[j] = start;
01046 }
01047 m_outerIndex[m_outerSize] = count;
01048
01049
01050 std::free(m_innerNonZeros);
01051 m_innerNonZeros = 0;
01052 m_data.resize(m_outerIndex[m_outerSize]);
01053 }
01054
01055 template<typename Scalar, int _Options, typename _Index>
01056 template<typename OtherDerived>
01057 EIGEN_DONT_INLINE SparseMatrix<Scalar,_Options,_Index>& SparseMatrix<Scalar,_Options,_Index>::operator=(const SparseMatrixBase<OtherDerived>& other)
01058 {
01059 EIGEN_STATIC_ASSERT((internal::is_same<Scalar, typename OtherDerived::Scalar>::value),
01060 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
01061
01062 const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
01063 if (needToTranspose)
01064 {
01065
01066
01067
01068
01069 typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
01070 typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
01071 OtherCopy otherCopy(other.derived());
01072
01073 SparseMatrix dest(other.rows(),other.cols());
01074 Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
01075
01076
01077
01078 for (Index j=0; j<otherCopy.outerSize(); ++j)
01079 for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
01080 ++dest.m_outerIndex[it.index()];
01081
01082
01083 Index count = 0;
01084 VectorXi positions(dest.outerSize());
01085 for (Index j=0; j<dest.outerSize(); ++j)
01086 {
01087 Index tmp = dest.m_outerIndex[j];
01088 dest.m_outerIndex[j] = count;
01089 positions[j] = count;
01090 count += tmp;
01091 }
01092 dest.m_outerIndex[dest.outerSize()] = count;
01093
01094 dest.m_data.resize(count);
01095
01096 for (Index j=0; j<otherCopy.outerSize(); ++j)
01097 {
01098 for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
01099 {
01100 Index pos = positions[it.index()]++;
01101 dest.m_data.index(pos) = j;
01102 dest.m_data.value(pos) = it.value();
01103 }
01104 }
01105 this->swap(dest);
01106 return *this;
01107 }
01108 else
01109 {
01110 if(other.isRValue())
01111 initAssignment(other.derived());
01112
01113 return Base::operator=(other.derived());
01114 }
01115 }
01116
01117 template<typename _Scalar, int _Options, typename _Index>
01118 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertUncompressed(Index row, Index col)
01119 {
01120 eigen_assert(!isCompressed());
01121
01122 const Index outer = IsRowMajor ? row : col;
01123 const Index inner = IsRowMajor ? col : row;
01124
01125 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
01126 Index innerNNZ = m_innerNonZeros[outer];
01127 if(innerNNZ>=room)
01128 {
01129
01130 reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
01131 }
01132
01133 Index startId = m_outerIndex[outer];
01134 Index p = startId + m_innerNonZeros[outer];
01135 while ( (p > startId) && (m_data.index(p-1) > inner) )
01136 {
01137 m_data.index(p) = m_data.index(p-1);
01138 m_data.value(p) = m_data.value(p-1);
01139 --p;
01140 }
01141 eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
01142
01143 m_innerNonZeros[outer]++;
01144
01145 m_data.index(p) = inner;
01146 return (m_data.value(p) = 0);
01147 }
01148
01149 template<typename _Scalar, int _Options, typename _Index>
01150 EIGEN_DONT_INLINE typename SparseMatrix<_Scalar,_Options,_Index>::Scalar& SparseMatrix<_Scalar,_Options,_Index>::insertCompressed(Index row, Index col)
01151 {
01152 eigen_assert(isCompressed());
01153
01154 const Index outer = IsRowMajor ? row : col;
01155 const Index inner = IsRowMajor ? col : row;
01156
01157 Index previousOuter = outer;
01158 if (m_outerIndex[outer+1]==0)
01159 {
01160
01161 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
01162 {
01163 m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
01164 --previousOuter;
01165 }
01166 m_outerIndex[outer+1] = m_outerIndex[outer];
01167 }
01168
01169
01170
01171
01172 bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
01173 && (size_t(m_outerIndex[outer+1]) == m_data.size());
01174
01175 size_t startId = m_outerIndex[outer];
01176
01177 size_t p = m_outerIndex[outer+1];
01178 ++m_outerIndex[outer+1];
01179
01180 float reallocRatio = 1;
01181 if (m_data.allocatedSize()<=m_data.size())
01182 {
01183
01184 if (m_data.size()==0)
01185 {
01186 m_data.reserve(32);
01187 }
01188 else
01189 {
01190
01191
01192
01193 float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
01194 reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
01195
01196
01197
01198 reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
01199 }
01200 }
01201 m_data.resize(m_data.size()+1,reallocRatio);
01202
01203 if (!isLastVec)
01204 {
01205 if (previousOuter==-1)
01206 {
01207
01208
01209 for (Index k=0; k<=(outer+1); ++k)
01210 m_outerIndex[k] = 0;
01211 Index k=outer+1;
01212 while(m_outerIndex[k]==0)
01213 m_outerIndex[k++] = 1;
01214 while (k<=m_outerSize && m_outerIndex[k]!=0)
01215 m_outerIndex[k++]++;
01216 p = 0;
01217 --k;
01218 k = m_outerIndex[k]-1;
01219 while (k>0)
01220 {
01221 m_data.index(k) = m_data.index(k-1);
01222 m_data.value(k) = m_data.value(k-1);
01223 k--;
01224 }
01225 }
01226 else
01227 {
01228
01229
01230 Index j = outer+2;
01231 while (j<=m_outerSize && m_outerIndex[j]!=0)
01232 m_outerIndex[j++]++;
01233 --j;
01234
01235 Index k = m_outerIndex[j]-1;
01236 while (k>=Index(p))
01237 {
01238 m_data.index(k) = m_data.index(k-1);
01239 m_data.value(k) = m_data.value(k-1);
01240 k--;
01241 }
01242 }
01243 }
01244
01245 while ( (p > startId) && (m_data.index(p-1) > inner) )
01246 {
01247 m_data.index(p) = m_data.index(p-1);
01248 m_data.value(p) = m_data.value(p-1);
01249 --p;
01250 }
01251
01252 m_data.index(p) = inner;
01253 return (m_data.value(p) = 0);
01254 }
01255
01256 }
01257
01258 #endif // EIGEN_SPARSEMATRIX_H