SparseMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2010 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
41 namespace internal {
42 template<typename _Scalar, int _Options, typename _Index>
43 struct traits<SparseMatrix<_Scalar, _Options, _Index> >
44 {
45  typedef _Scalar Scalar;
46  typedef _Index Index;
47  typedef Sparse StorageKind;
48  typedef MatrixXpr XprKind;
49  enum {
50  RowsAtCompileTime = Dynamic,
51  ColsAtCompileTime = Dynamic,
52  MaxRowsAtCompileTime = Dynamic,
53  MaxColsAtCompileTime = Dynamic,
54  Flags = _Options | NestByRefBit | LvalueBit,
55  CoeffReadCost = NumTraits<Scalar>::ReadCost,
56  SupportedAccessPatterns = InnerRandomAccessPattern
57  };
58 };
59 
60 template<typename _Scalar, int _Options, typename _Index, int DiagIndex>
61 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _Index>, DiagIndex> >
62 {
66 
67  typedef _Scalar Scalar;
68  typedef Dense StorageKind;
69  typedef _Index Index;
70  typedef MatrixXpr XprKind;
71 
72  enum {
73  RowsAtCompileTime = Dynamic,
74  ColsAtCompileTime = 1,
75  MaxRowsAtCompileTime = Dynamic,
76  MaxColsAtCompileTime = 1,
77  Flags = 0,
78  CoeffReadCost = _MatrixTypeNested::CoeffReadCost*10
79  };
80 };
81 
82 } // end namespace internal
83 
84 template<typename _Scalar, int _Options, typename _Index>
86  : public SparseMatrixBase<SparseMatrix<_Scalar, _Options, _Index> >
87 {
88  public:
92 
93  typedef MappedSparseMatrix<Scalar,Flags> Map;
94  using Base::IsRowMajor;
95  typedef internal::CompressedStorage<Scalar,Index> Storage;
96  enum {
97  Options = _Options
98  };
99 
100  protected:
101 
103 
104  Index m_outerSize;
105  Index m_innerSize;
106  Index* m_outerIndex;
107  Index* m_innerNonZeros; // optional, if null then the data is compressed
109 
110  Eigen::Map<Matrix<Index,Dynamic,1> > innerNonZeros() { return Eigen::Map<Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
111  const Eigen::Map<const Matrix<Index,Dynamic,1> > innerNonZeros() const { return Eigen::Map<const Matrix<Index,Dynamic,1> >(m_innerNonZeros, m_innerNonZeros?m_outerSize:0); }
112 
113  public:
114 
116  inline bool isCompressed() const { return m_innerNonZeros==0; }
117 
119  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
121  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
122 
124  inline Index innerSize() const { return m_innerSize; }
126  inline Index outerSize() const { return m_outerSize; }
127 
131  inline const Scalar* valuePtr() const { return &m_data.value(0); }
135  inline Scalar* valuePtr() { return &m_data.value(0); }
136 
140  inline const Index* innerIndexPtr() const { return &m_data.index(0); }
144  inline Index* innerIndexPtr() { return &m_data.index(0); }
145 
149  inline const Index* outerIndexPtr() const { return m_outerIndex; }
153  inline Index* outerIndexPtr() { return m_outerIndex; }
154 
158  inline const Index* innerNonZeroPtr() const { return m_innerNonZeros; }
162  inline Index* innerNonZeroPtr() { return m_innerNonZeros; }
163 
165  inline Storage& data() { return m_data; }
167  inline const Storage& data() const { return m_data; }
168 
171  inline Scalar coeff(Index row, Index col) const
172  {
173  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
174 
175  const Index outer = IsRowMajor ? row : col;
176  const Index inner = IsRowMajor ? col : row;
177  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
178  return m_data.atInRange(m_outerIndex[outer], end, inner);
179  }
180 
189  inline Scalar& coeffRef(Index row, Index col)
190  {
191  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
192 
193  const Index outer = IsRowMajor ? row : col;
194  const Index inner = IsRowMajor ? col : row;
195 
196  Index start = m_outerIndex[outer];
197  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
198  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
199  if(end<=start)
200  return insert(row,col);
201  const Index p = m_data.searchLowerIndex(start,end-1,inner);
202  if((p<end) && (m_data.index(p)==inner))
203  return m_data.value(p);
204  else
205  return insert(row,col);
206  }
207 
220  Scalar& insert(Index row, Index col)
221  {
222  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
223 
224  if(isCompressed())
225  {
226  reserve(VectorXi::Constant(outerSize(), 2));
227  }
228  return insertUncompressed(row,col);
229  }
230 
231  public:
232 
233  class InnerIterator;
234  class ReverseInnerIterator;
235 
237  inline void setZero()
238  {
239  m_data.clear();
240  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
241  if(m_innerNonZeros)
242  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(Index));
243  }
244 
246  inline Index nonZeros() const
247  {
248  if(m_innerNonZeros)
249  return innerNonZeros().sum();
250  return static_cast<Index>(m_data.size());
251  }
252 
256  inline void reserve(Index reserveSize)
257  {
258  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
259  m_data.reserve(reserveSize);
260  }
261 
262  #ifdef EIGEN_PARSED_BY_DOXYGEN
263 
266  template<class SizesType>
267  inline void reserve(const SizesType& reserveSizes);
268  #else
269  template<class SizesType>
270  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif = typename SizesType::value_type())
271  {
272  EIGEN_UNUSED_VARIABLE(enableif);
273  reserveInnerVectors(reserveSizes);
274  }
275  template<class SizesType>
276  inline void reserve(const SizesType& reserveSizes, const typename SizesType::Scalar& enableif =
277  #if (!defined(_MSC_VER)) || (_MSC_VER>=1500) // MSVC 2005 fails to compile with this typename
278  typename
279  #endif
280  SizesType::Scalar())
281  {
282  EIGEN_UNUSED_VARIABLE(enableif);
283  reserveInnerVectors(reserveSizes);
284  }
285  #endif // EIGEN_PARSED_BY_DOXYGEN
286  protected:
287  template<class SizesType>
288  inline void reserveInnerVectors(const SizesType& reserveSizes)
289  {
290  if(isCompressed())
291  {
292  std::size_t totalReserveSize = 0;
293  // turn the matrix into non-compressed mode
294  m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
295  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
296 
297  // temporarily use m_innerSizes to hold the new starting points.
298  Index* newOuterIndex = m_innerNonZeros;
299 
300  Index count = 0;
301  for(Index j=0; j<m_outerSize; ++j)
302  {
303  newOuterIndex[j] = count;
304  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
305  totalReserveSize += reserveSizes[j];
306  }
307  m_data.reserve(totalReserveSize);
308  Index previousOuterIndex = m_outerIndex[m_outerSize];
309  for(Index j=m_outerSize-1; j>=0; --j)
310  {
311  Index innerNNZ = previousOuterIndex - m_outerIndex[j];
312  for(Index i=innerNNZ-1; i>=0; --i)
313  {
314  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
315  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
316  }
317  previousOuterIndex = m_outerIndex[j];
318  m_outerIndex[j] = newOuterIndex[j];
319  m_innerNonZeros[j] = innerNNZ;
320  }
321  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
322 
323  m_data.resize(m_outerIndex[m_outerSize]);
324  }
325  else
326  {
327  Index* newOuterIndex = static_cast<Index*>(std::malloc((m_outerSize+1)*sizeof(Index)));
328  if (!newOuterIndex) internal::throw_std_bad_alloc();
329 
330  Index count = 0;
331  for(Index j=0; j<m_outerSize; ++j)
332  {
333  newOuterIndex[j] = count;
334  Index alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
335  Index toReserve = std::max<Index>(reserveSizes[j], alreadyReserved);
336  count += toReserve + m_innerNonZeros[j];
337  }
338  newOuterIndex[m_outerSize] = count;
339 
340  m_data.resize(count);
341  for(Index j=m_outerSize-1; j>=0; --j)
342  {
343  Index offset = newOuterIndex[j] - m_outerIndex[j];
344  if(offset>0)
345  {
346  Index innerNNZ = m_innerNonZeros[j];
347  for(Index i=innerNNZ-1; i>=0; --i)
348  {
349  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
350  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
351  }
352  }
353  }
354 
355  std::swap(m_outerIndex, newOuterIndex);
356  std::free(newOuterIndex);
357  }
358 
359  }
360  public:
361 
362  //--- low level purely coherent filling ---
363 
374  inline Scalar& insertBack(Index row, Index col)
375  {
376  return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
377  }
378 
381  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
382  {
383  eigen_assert(size_t(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
384  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
385  Index p = m_outerIndex[outer+1];
386  ++m_outerIndex[outer+1];
387  m_data.append(0, inner);
388  return m_data.value(p);
389  }
390 
393  inline Scalar& insertBackByOuterInnerUnordered(Index outer, Index inner)
394  {
395  Index p = m_outerIndex[outer+1];
396  ++m_outerIndex[outer+1];
397  m_data.append(0, inner);
398  return m_data.value(p);
399  }
400 
403  inline void startVec(Index outer)
404  {
405  eigen_assert(m_outerIndex[outer]==int(m_data.size()) && "You must call startVec for each inner vector sequentially");
406  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
407  m_outerIndex[outer+1] = m_outerIndex[outer];
408  }
409 
413  inline void finalize()
414  {
415  if(isCompressed())
416  {
417  Index size = static_cast<Index>(m_data.size());
418  Index i = m_outerSize;
419  // find the last filled column
420  while (i>=0 && m_outerIndex[i]==0)
421  --i;
422  ++i;
423  while (i<=m_outerSize)
424  {
425  m_outerIndex[i] = size;
426  ++i;
427  }
428  }
429  }
430 
431  //---
432 
433  template<typename InputIterators>
434  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
435 
436  void sumupDuplicates();
437 
438  //---
439 
442  Scalar& insertByOuterInner(Index j, Index i)
443  {
444  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
445  }
446 
450  {
451  if(isCompressed())
452  return;
453 
454  Index oldStart = m_outerIndex[1];
455  m_outerIndex[1] = m_innerNonZeros[0];
456  for(Index j=1; j<m_outerSize; ++j)
457  {
458  Index nextOldStart = m_outerIndex[j+1];
459  Index offset = oldStart - m_outerIndex[j];
460  if(offset>0)
461  {
462  for(Index k=0; k<m_innerNonZeros[j]; ++k)
463  {
464  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
465  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
466  }
467  }
468  m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
469  oldStart = nextOldStart;
470  }
471  std::free(m_innerNonZeros);
472  m_innerNonZeros = 0;
473  m_data.resize(m_outerIndex[m_outerSize]);
474  m_data.squeeze();
475  }
476 
478  void uncompress()
479  {
480  if(m_innerNonZeros != 0)
481  return;
482  m_innerNonZeros = static_cast<Index*>(std::malloc(m_outerSize * sizeof(Index)));
483  for (int i = 0; i < m_outerSize; i++)
484  {
485  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
486  }
487  }
488 
490  void prune(const Scalar& reference, const RealScalar& epsilon = NumTraits<RealScalar>::dummy_precision())
491  {
492  prune(default_prunning_func(reference,epsilon));
493  }
494 
502  template<typename KeepFunc>
503  void prune(const KeepFunc& keep = KeepFunc())
504  {
505  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
506  // TODO also implement a unit test
507  makeCompressed();
508 
509  Index k = 0;
510  for(Index j=0; j<m_outerSize; ++j)
511  {
512  Index previousStart = m_outerIndex[j];
513  m_outerIndex[j] = k;
514  Index end = m_outerIndex[j+1];
515  for(Index i=previousStart; i<end; ++i)
516  {
517  if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
518  {
519  m_data.value(k) = m_data.value(i);
520  m_data.index(k) = m_data.index(i);
521  ++k;
522  }
523  }
524  }
525  m_outerIndex[m_outerSize] = k;
526  m_data.resize(k,0);
527  }
528 
532  void conservativeResize(Index rows, Index cols)
533  {
534  // No change
535  if (this->rows() == rows && this->cols() == cols) return;
536 
537  // If one dimension is null, then there is nothing to be preserved
538  if(rows==0 || cols==0) return resize(rows,cols);
539 
540  Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
541  Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
542  Index newInnerSize = IsRowMajor ? cols : rows;
543 
544  // Deals with inner non zeros
545  if (m_innerNonZeros)
546  {
547  // Resize m_innerNonZeros
548  Index *newInnerNonZeros = static_cast<Index*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(Index)));
549  if (!newInnerNonZeros) internal::throw_std_bad_alloc();
550  m_innerNonZeros = newInnerNonZeros;
551 
552  for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
553  m_innerNonZeros[i] = 0;
554  }
555  else if (innerChange < 0)
556  {
557  // Inner size decreased: allocate a new m_innerNonZeros
558  m_innerNonZeros = static_cast<Index*>(std::malloc((m_outerSize+outerChange+1) * sizeof(Index)));
559  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
560  for(Index i = 0; i < m_outerSize; i++)
561  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
562  }
563 
564  // Change the m_innerNonZeros in case of a decrease of inner size
565  if (m_innerNonZeros && innerChange < 0)
566  {
567  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
568  {
569  Index &n = m_innerNonZeros[i];
570  Index start = m_outerIndex[i];
571  while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
572  }
573  }
574 
575  m_innerSize = newInnerSize;
576 
577  // Re-allocate outer index structure if necessary
578  if (outerChange == 0)
579  return;
580 
581  Index *newOuterIndex = static_cast<Index*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(Index)));
582  if (!newOuterIndex) internal::throw_std_bad_alloc();
583  m_outerIndex = newOuterIndex;
584  if (outerChange > 0)
585  {
586  Index last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
587  for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
588  m_outerIndex[i] = last;
589  }
590  m_outerSize += outerChange;
591  }
592 
596  void resize(Index rows, Index cols)
597  {
598  const Index outerSize = IsRowMajor ? rows : cols;
599  m_innerSize = IsRowMajor ? cols : rows;
600  m_data.clear();
601  if (m_outerSize != outerSize || m_outerSize==0)
602  {
603  std::free(m_outerIndex);
604  m_outerIndex = static_cast<Index*>(std::malloc((outerSize + 1) * sizeof(Index)));
605  if (!m_outerIndex) internal::throw_std_bad_alloc();
606 
607  m_outerSize = outerSize;
608  }
609  if(m_innerNonZeros)
610  {
611  std::free(m_innerNonZeros);
612  m_innerNonZeros = 0;
613  }
614  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(Index));
615  }
616 
619  void resizeNonZeros(Index size)
620  {
621  // TODO remove this function
622  m_data.resize(size);
623  }
624 
626  const Diagonal<const SparseMatrix> diagonal() const { return *this; }
627 
629  inline SparseMatrix()
630  : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
631  {
632  check_template_parameters();
633  resize(0, 0);
634  }
635 
637  inline SparseMatrix(Index rows, Index cols)
638  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
639  {
640  check_template_parameters();
641  resize(rows, cols);
642  }
643 
645  template<typename OtherDerived>
647  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
648  {
650  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
651  check_template_parameters();
652  *this = other.derived();
653  }
654 
656  template<typename OtherDerived, unsigned int UpLo>
658  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
659  {
660  check_template_parameters();
661  *this = other;
662  }
663 
665  inline SparseMatrix(const SparseMatrix& other)
666  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
667  {
668  check_template_parameters();
669  *this = other.derived();
670  }
671 
673  template<typename OtherDerived>
675  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
676  {
677  check_template_parameters();
678  initAssignment(other);
679  other.evalTo(*this);
680  }
681 
684  inline void swap(SparseMatrix& other)
685  {
686  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
687  std::swap(m_outerIndex, other.m_outerIndex);
688  std::swap(m_innerSize, other.m_innerSize);
689  std::swap(m_outerSize, other.m_outerSize);
690  std::swap(m_innerNonZeros, other.m_innerNonZeros);
691  m_data.swap(other.m_data);
692  }
693 
695  inline void setIdentity()
696  {
697  eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
698  this->m_data.resize(rows());
699  Eigen::Map<Matrix<Index, Dynamic, 1> >(&this->m_data.index(0), rows()).setLinSpaced(0, rows()-1);
700  Eigen::Map<Matrix<Scalar, Dynamic, 1> >(&this->m_data.value(0), rows()).setOnes();
701  Eigen::Map<Matrix<Index, Dynamic, 1> >(this->m_outerIndex, rows()+1).setLinSpaced(0, rows());
702  }
703  inline SparseMatrix& operator=(const SparseMatrix& other)
704  {
705  if (other.isRValue())
706  {
707  swap(other.const_cast_derived());
708  }
709  else if(this!=&other)
710  {
711  initAssignment(other);
712  if(other.isCompressed())
713  {
714  memcpy(m_outerIndex, other.m_outerIndex, (m_outerSize+1)*sizeof(Index));
715  m_data = other.m_data;
716  }
717  else
718  {
719  Base::operator=(other);
720  }
721  }
722  return *this;
723  }
724 
725  #ifndef EIGEN_PARSED_BY_DOXYGEN
726  template<typename Lhs, typename Rhs>
728  { return Base::operator=(product); }
729 
730  template<typename OtherDerived>
732  {
733  initAssignment(other);
734  return Base::operator=(other.derived());
735  }
736 
737  template<typename OtherDerived>
739  { return Base::operator=(other.derived()); }
740  #endif
741 
742  template<typename OtherDerived>
744 
745  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
746  {
748  s << "Nonzero entries:\n";
749  if(m.isCompressed())
750  for (Index i=0; i<m.nonZeros(); ++i)
751  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
752  else
753  for (Index i=0; i<m.outerSize(); ++i)
754  {
755  int p = m.m_outerIndex[i];
756  int pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
757  Index k=p;
758  for (; k<pe; ++k)
759  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
760  for (; k<m.m_outerIndex[i+1]; ++k)
761  s << "(_,_) ";
762  }
763  s << std::endl;
764  s << std::endl;
765  s << "Outer pointers:\n";
766  for (Index i=0; i<m.outerSize(); ++i)
767  s << m.m_outerIndex[i] << " ";
768  s << " $" << std::endl;
769  if(!m.isCompressed())
770  {
771  s << "Inner non zeros:\n";
772  for (Index i=0; i<m.outerSize(); ++i)
773  s << m.m_innerNonZeros[i] << " ";
774  s << " $" << std::endl;
775  }
776  s << std::endl;
777  );
778  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
779  return s;
780  }
781 
783  inline ~SparseMatrix()
784  {
785  std::free(m_outerIndex);
786  std::free(m_innerNonZeros);
787  }
788 
789 #ifndef EIGEN_PARSED_BY_DOXYGEN
790 
791  Scalar sum() const;
792 #endif
793 
794 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
795 # include EIGEN_SPARSEMATRIX_PLUGIN
796 # endif
797 
798 protected:
799 
800  template<typename Other>
801  void initAssignment(const Other& other)
802  {
803  resize(other.rows(), other.cols());
804  if(m_innerNonZeros)
805  {
806  std::free(m_innerNonZeros);
807  m_innerNonZeros = 0;
808  }
809  }
810 
813  EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
814 
818  {
819  Index m_index;
820  Index m_value;
821  public:
822  typedef Index value_type;
823  SingletonVector(Index i, Index v)
824  : m_index(i), m_value(v)
825  {}
826 
827  Index operator[](Index i) const { return i==m_index ? m_value : 0; }
828  };
829 
832  EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
833 
834 public:
838  {
839  const Index outer = IsRowMajor ? row : col;
840  const Index inner = IsRowMajor ? col : row;
841 
842  eigen_assert(!isCompressed());
843  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
844 
845  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
846  m_data.index(p) = inner;
847  return (m_data.value(p) = 0);
848  }
849 
850 private:
852  {
853  EIGEN_STATIC_ASSERT(NumTraits<Index>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
854  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
855  }
856 
858  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
859  inline bool operator() (const Index&, const Index&, const Scalar& value) const
860  {
861  return !internal::isMuchSmallerThan(value, reference, epsilon);
862  }
864  RealScalar epsilon;
865  };
866 };
867 
868 template<typename Scalar, int _Options, typename _Index>
869 class SparseMatrix<Scalar,_Options,_Index>::InnerIterator
870 {
871  public:
872  InnerIterator(const SparseMatrix& mat, Index outer)
873  : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_id(mat.m_outerIndex[outer])
874  {
875  if(mat.isCompressed())
876  m_end = mat.m_outerIndex[outer+1];
877  else
878  m_end = m_id + mat.m_innerNonZeros[outer];
879  }
880 
881  inline InnerIterator& operator++() { m_id++; return *this; }
882 
883  inline const Scalar& value() const { return m_values[m_id]; }
884  inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id]); }
885 
886  inline Index index() const { return m_indices[m_id]; }
887  inline Index outer() const { return m_outer; }
888  inline Index row() const { return IsRowMajor ? m_outer : index(); }
889  inline Index col() const { return IsRowMajor ? index() : m_outer; }
890 
891  inline operator bool() const { return (m_id < m_end); }
892 
893  protected:
894  const Scalar* m_values;
895  const Index* m_indices;
896  const Index m_outer;
897  Index m_id;
898  Index m_end;
899 };
900 
901 template<typename Scalar, int _Options, typename _Index>
902 class SparseMatrix<Scalar,_Options,_Index>::ReverseInnerIterator
903 {
904  public:
906  : m_values(mat.valuePtr()), m_indices(mat.innerIndexPtr()), m_outer(outer), m_start(mat.m_outerIndex[outer])
907  {
908  if(mat.isCompressed())
909  m_id = mat.m_outerIndex[outer+1];
910  else
911  m_id = m_start + mat.m_innerNonZeros[outer];
912  }
913 
914  inline ReverseInnerIterator& operator--() { --m_id; return *this; }
915 
916  inline const Scalar& value() const { return m_values[m_id-1]; }
917  inline Scalar& valueRef() { return const_cast<Scalar&>(m_values[m_id-1]); }
918 
919  inline Index index() const { return m_indices[m_id-1]; }
920  inline Index outer() const { return m_outer; }
921  inline Index row() const { return IsRowMajor ? m_outer : index(); }
922  inline Index col() const { return IsRowMajor ? index() : m_outer; }
923 
924  inline operator bool() const { return (m_id > m_start); }
925 
926  protected:
927  const Scalar* m_values;
928  const Index* m_indices;
929  const Index m_outer;
931  const Index m_start;
932 };
933 
934 namespace internal {
935 
936 template<typename InputIterator, typename SparseMatrixType>
937 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, int Options = 0)
938 {
940  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
941  typedef typename SparseMatrixType::Scalar Scalar;
942  SparseMatrix<Scalar,IsRowMajor?ColMajor:RowMajor> trMat(mat.rows(),mat.cols());
943 
944  if(begin<end)
945  {
946  // pass 1: count the nnz per inner-vector
947  VectorXi wi(trMat.outerSize());
948  wi.setZero();
949  for(InputIterator it(begin); it!=end; ++it)
950  {
951  eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
952  wi(IsRowMajor ? it->col() : it->row())++;
953  }
954 
955  // pass 2: insert all the elements into trMat
956  trMat.reserve(wi);
957  for(InputIterator it(begin); it!=end; ++it)
958  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
959 
960  // pass 3:
961  trMat.sumupDuplicates();
962  }
963 
964  // pass 4: transposed copy -> implicit sorting
965  mat = trMat;
966 }
967 
968 }
969 
970 
1008 template<typename Scalar, int _Options, typename _Index>
1009 template<typename InputIterators>
1010 void SparseMatrix<Scalar,_Options,_Index>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1011 {
1012  internal::set_from_triplets(begin, end, *this);
1013 }
1014 
1016 template<typename Scalar, int _Options, typename _Index>
1018 {
1019  eigen_assert(!isCompressed());
1020  // TODO, in practice we should be able to use m_innerNonZeros for that task
1021  VectorXi wi(innerSize());
1022  wi.fill(-1);
1023  Index count = 0;
1024  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1025  for(int j=0; j<outerSize(); ++j)
1026  {
1027  Index start = count;
1028  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1029  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1030  {
1031  Index i = m_data.index(k);
1032  if(wi(i)>=start)
1033  {
1034  // we already meet this entry => accumulate it
1035  m_data.value(wi(i)) += m_data.value(k);
1036  }
1037  else
1038  {
1039  m_data.value(count) = m_data.value(k);
1040  m_data.index(count) = m_data.index(k);
1041  wi(i) = count;
1042  ++count;
1043  }
1044  }
1045  m_outerIndex[j] = start;
1046  }
1047  m_outerIndex[m_outerSize] = count;
1048 
1049  // turn the matrix into compressed form
1050  std::free(m_innerNonZeros);
1051  m_innerNonZeros = 0;
1052  m_data.resize(m_outerIndex[m_outerSize]);
1053 }
1054 
1055 template<typename Scalar, int _Options, typename _Index>
1056 template<typename OtherDerived>
1058 {
1060  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1061 
1062  const bool needToTranspose = (Flags & RowMajorBit) != (OtherDerived::Flags & RowMajorBit);
1063  if (needToTranspose)
1064  {
1065  // two passes algorithm:
1066  // 1 - compute the number of coeffs per dest inner vector
1067  // 2 - do the actual copy/eval
1068  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1069  typedef typename internal::nested<OtherDerived,2>::type OtherCopy;
1070  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1071  OtherCopy otherCopy(other.derived());
1072 
1073  SparseMatrix dest(other.rows(),other.cols());
1074  Eigen::Map<Matrix<Index, Dynamic, 1> > (dest.m_outerIndex,dest.outerSize()).setZero();
1075 
1076  // pass 1
1077  // FIXME the above copy could be merged with that pass
1078  for (Index j=0; j<otherCopy.outerSize(); ++j)
1079  for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
1080  ++dest.m_outerIndex[it.index()];
1081 
1082  // prefix sum
1083  Index count = 0;
1084  VectorXi positions(dest.outerSize());
1085  for (Index j=0; j<dest.outerSize(); ++j)
1086  {
1087  Index tmp = dest.m_outerIndex[j];
1088  dest.m_outerIndex[j] = count;
1089  positions[j] = count;
1090  count += tmp;
1091  }
1092  dest.m_outerIndex[dest.outerSize()] = count;
1093  // alloc
1094  dest.m_data.resize(count);
1095  // pass 2
1096  for (Index j=0; j<otherCopy.outerSize(); ++j)
1097  {
1098  for (typename _OtherCopy::InnerIterator it(otherCopy, j); it; ++it)
1099  {
1100  Index pos = positions[it.index()]++;
1101  dest.m_data.index(pos) = j;
1102  dest.m_data.value(pos) = it.value();
1103  }
1104  }
1105  this->swap(dest);
1106  return *this;
1107  }
1108  else
1109  {
1110  if(other.isRValue())
1111  initAssignment(other.derived());
1112  // there is no special optimization
1113  return Base::operator=(other.derived());
1114  }
1115 }
1116 
1117 template<typename _Scalar, int _Options, typename _Index>
1119 {
1120  eigen_assert(!isCompressed());
1121 
1122  const Index outer = IsRowMajor ? row : col;
1123  const Index inner = IsRowMajor ? col : row;
1124 
1125  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1126  Index innerNNZ = m_innerNonZeros[outer];
1127  if(innerNNZ>=room)
1128  {
1129  // this inner vector is full, we need to reallocate the whole buffer :(
1130  reserve(SingletonVector(outer,std::max<Index>(2,innerNNZ)));
1131  }
1132 
1133  Index startId = m_outerIndex[outer];
1134  Index p = startId + m_innerNonZeros[outer];
1135  while ( (p > startId) && (m_data.index(p-1) > inner) )
1136  {
1137  m_data.index(p) = m_data.index(p-1);
1138  m_data.value(p) = m_data.value(p-1);
1139  --p;
1140  }
1141  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exist, you must call coeffRef to this end");
1142 
1143  m_innerNonZeros[outer]++;
1144 
1145  m_data.index(p) = inner;
1146  return (m_data.value(p) = 0);
1147 }
1148 
1149 template<typename _Scalar, int _Options, typename _Index>
1151 {
1152  eigen_assert(isCompressed());
1153 
1154  const Index outer = IsRowMajor ? row : col;
1155  const Index inner = IsRowMajor ? col : row;
1156 
1157  Index previousOuter = outer;
1158  if (m_outerIndex[outer+1]==0)
1159  {
1160  // we start a new inner vector
1161  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1162  {
1163  m_outerIndex[previousOuter] = static_cast<Index>(m_data.size());
1164  --previousOuter;
1165  }
1166  m_outerIndex[outer+1] = m_outerIndex[outer];
1167  }
1168 
1169  // here we have to handle the tricky case where the outerIndex array
1170  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1171  // the 2nd inner vector...
1172  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1173  && (size_t(m_outerIndex[outer+1]) == m_data.size());
1174 
1175  size_t startId = m_outerIndex[outer];
1176  // FIXME let's make sure sizeof(long int) == sizeof(size_t)
1177  size_t p = m_outerIndex[outer+1];
1178  ++m_outerIndex[outer+1];
1179 
1180  float reallocRatio = 1;
1181  if (m_data.allocatedSize()<=m_data.size())
1182  {
1183  // if there is no preallocated memory, let's reserve a minimum of 32 elements
1184  if (m_data.size()==0)
1185  {
1186  m_data.reserve(32);
1187  }
1188  else
1189  {
1190  // we need to reallocate the data, to reduce multiple reallocations
1191  // we use a smart resize algorithm based on the current filling ratio
1192  // in addition, we use float to avoid integers overflows
1193  float nnzEstimate = float(m_outerIndex[outer])*float(m_outerSize)/float(outer+1);
1194  reallocRatio = (nnzEstimate-float(m_data.size()))/float(m_data.size());
1195  // furthermore we bound the realloc ratio to:
1196  // 1) reduce multiple minor realloc when the matrix is almost filled
1197  // 2) avoid to allocate too much memory when the matrix is almost empty
1198  reallocRatio = (std::min)((std::max)(reallocRatio,1.5f),8.f);
1199  }
1200  }
1201  m_data.resize(m_data.size()+1,reallocRatio);
1202 
1203  if (!isLastVec)
1204  {
1205  if (previousOuter==-1)
1206  {
1207  // oops wrong guess.
1208  // let's correct the outer offsets
1209  for (Index k=0; k<=(outer+1); ++k)
1210  m_outerIndex[k] = 0;
1211  Index k=outer+1;
1212  while(m_outerIndex[k]==0)
1213  m_outerIndex[k++] = 1;
1214  while (k<=m_outerSize && m_outerIndex[k]!=0)
1215  m_outerIndex[k++]++;
1216  p = 0;
1217  --k;
1218  k = m_outerIndex[k]-1;
1219  while (k>0)
1220  {
1221  m_data.index(k) = m_data.index(k-1);
1222  m_data.value(k) = m_data.value(k-1);
1223  k--;
1224  }
1225  }
1226  else
1227  {
1228  // we are not inserting into the last inner vec
1229  // update outer indices:
1230  Index j = outer+2;
1231  while (j<=m_outerSize && m_outerIndex[j]!=0)
1232  m_outerIndex[j++]++;
1233  --j;
1234  // shift data of last vecs:
1235  Index k = m_outerIndex[j]-1;
1236  while (k>=Index(p))
1237  {
1238  m_data.index(k) = m_data.index(k-1);
1239  m_data.value(k) = m_data.value(k-1);
1240  k--;
1241  }
1242  }
1243  }
1244 
1245  while ( (p > startId) && (m_data.index(p-1) > inner) )
1246  {
1247  m_data.index(p) = m_data.index(p-1);
1248  m_data.value(p) = m_data.value(p-1);
1249  --p;
1250  }
1251 
1252  m_data.index(p) = inner;
1253  return (m_data.value(p) = 0);
1254 }
1255 
1256 } // end namespace Eigen
1257 
1258 #endif // EIGEN_SPARSEMATRIX_H
const Diagonal< const SparseMatrix > diagonal() const
Definition: SparseMatrix.h:626
void reserve(const SizesType &reserveSizes, const typename SizesType::value_type &enableif=typename SizesType::value_type())
Definition: SparseMatrix.h:270
Index cols() const
Definition: SparseMatrix.h:121
#define EIGEN_STRONG_INLINE
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:532
Scalar & insertBackByOuterInner(Index outer, Index inner)
Definition: SparseMatrix.h:381
A versatible sparse matrix representation.
Definition: SparseMatrix.h:85
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:503
Index searchLowerIndex(Index key) const
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:104
const int InnerRandomAccessPattern
Definition: SparseUtil.h:66
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:189
#define EIGEN_UNUSED_VARIABLE(var)
const unsigned int LvalueBit
void resizeNonZeros(Index size)
Definition: SparseMatrix.h:619
EIGEN_DONT_INLINE Scalar & insertCompressed(Index row, Index col)
void startVec(Index outer)
Definition: SparseMatrix.h:403
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:665
iterative scaling algorithm to equilibrate rows and column norms in matrices
Definition: matrix.hpp:471
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
const Scalar & value() const
Definition: SparseMatrix.h:883
void append(const Scalar &v, Index i)
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
Definition: NumTraits.h:88
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:111
bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, typename NumTraits< Scalar >::Real precision=NumTraits< Scalar >::dummy_precision())
Derived & derived()
Definition: EigenBase.h:34
Index outerSize() const
Definition: SparseMatrix.h:126
const unsigned int RowMajorBit
Index nonZeros() const
Definition: SparseMatrix.h:246
EIGEN_STRONG_INLINE Scalar & insertBackUncompressed(Index row, Index col)
Definition: SparseMatrix.h:837
const Index * outerIndexPtr() const
Definition: SparseMatrix.h:149
Scalar & insertByOuterInner(Index j, Index i)
Definition: SparseMatrix.h:442
EIGEN_DONT_INLINE Scalar & insertUncompressed(Index row, Index col)
#define EIGEN_DBG_SPARSE(X)
Definition: SparseUtil.h:18
void initAssignment(const Other &other)
Definition: SparseMatrix.h:801
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
Definition: SparseUtil.h:62
void throw_std_bad_alloc()
Base class of any sparse matrices or sparse expressions.
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:490
static void check_template_parameters()
Definition: SparseMatrix.h:851
SparseMatrix & operator=(const SparseMatrix &other)
Definition: SparseMatrix.h:703
void reserve(const SizesType &reserveSizes, const typename SizesType::Scalar &enableif=typename SizesType::Scalar())
Definition: SparseMatrix.h:276
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:220
SparseMatrix & operator=(const EigenBase< OtherDerived > &other)
Definition: SparseMatrix.h:738
internal::traits< Derived >::Scalar Scalar
InnerIterator(const SparseMatrix &mat, Index outer)
Definition: SparseMatrix.h:872
Scalar & insertBack(Index row, Index col)
Definition: SparseMatrix.h:374
Index operator[](Index i) const
Definition: SparseMatrix.h:827
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:684
Provides a generic way to set and pass user-specified options.
Definition: options.hpp:65
SparseMatrix & operator=(const ReturnByValue< OtherDerived > &other)
Definition: SparseMatrix.h:731
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:596
void reserve(Index reserveSize)
Definition: SparseMatrix.h:256
Eigen::Map< Matrix< Index, Dynamic, 1 > > innerNonZeros()
Definition: SparseMatrix.h:110
#define EIGEN_SPARSE_INHERIT_ASSIGNMENT_OPERATOR(Derived, Op)
Definition: SparseUtil.h:21
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:637
internal::traits< Derived >::Index Index
Definition: EigenBase.h:31
SparseMatrix & operator=(const SparseSparseProduct< Lhs, Rhs > &product)
Definition: SparseMatrix.h:727
ExportStatementBlock & operator<<(ExportStatementBlock &_block, const ExportStatement &_statement)
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:646
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:171
Index innerSize() const
Definition: SparseMatrix.h:124
#define v
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:674
Index * outerIndexPtr()
Definition: SparseMatrix.h:153
const unsigned int NestByRefBit
void swap(CompressedStorage &other)
const Storage & data() const
Definition: SparseMatrix.h:167
bool isCompressed() const
Definition: SparseMatrix.h:116
SparseMatrix< Scalar,(Flags &~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix
Definition: SparseMatrix.h:102
An InnerIterator allows to loop over the element of a sparse (or dense) matrix or expression...
RowXpr row(Index i)
Definition: BlockMethods.h:725
const Derived & derived() const
void resize(size_t size, float reserveSizeFactor=0)
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
void set_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, int Options=0)
Definition: SparseMatrix.h:937
const Eigen::Map< const Matrix< Index, Dynamic, 1 > > innerNonZeros() const
Definition: SparseMatrix.h:111
Index * innerIndexPtr()
Definition: SparseMatrix.h:144
const Index * innerIndexPtr() const
Definition: SparseMatrix.h:140
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:64
void reserveInnerVectors(const SizesType &reserveSizes)
Definition: SparseMatrix.h:288
Scalar & insertBackByOuterInnerUnordered(Index outer, Index inner)
Definition: SparseMatrix.h:393
#define EIGEN_DONT_INLINE
ColXpr col(Index i)
Definition: BlockMethods.h:708
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:657
NumTraits< Scalar >::Real RealScalar
#define eigen_assert(x)
const Scalar * valuePtr() const
Definition: SparseMatrix.h:131
void evalTo(Dest &dst) const
Definition: ReturnByValue.h:60
Index * innerNonZeroPtr()
Definition: SparseMatrix.h:162
Index rows() const
Definition: SparseMatrix.h:119
Scalar atInRange(size_t start, size_t end, Index key, const Scalar &defaultValue=Scalar(0)) const
const Index * innerNonZeroPtr() const
Definition: SparseMatrix.h:158
ReverseInnerIterator(const SparseMatrix &mat, Index outer)
Definition: SparseMatrix.h:905
default_prunning_func(const Scalar &ref, const RealScalar &eps)
Definition: SparseMatrix.h:858
Derived & const_cast_derived() const
static void insert(double *xd, double *xa, double *u, double *p)


acado
Author(s): Milan Vukov, Rien Quirynen
autogenerated on Mon Jun 10 2019 12:35:08