SparseMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
45 namespace internal {
46 template<typename _Scalar, int _Options, typename _StorageIndex>
47 struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48 {
49  typedef _Scalar Scalar;
50  typedef _StorageIndex StorageIndex;
52  typedef MatrixXpr XprKind;
53  enum {
54  RowsAtCompileTime = Dynamic,
55  ColsAtCompileTime = Dynamic,
56  MaxRowsAtCompileTime = Dynamic,
57  MaxColsAtCompileTime = Dynamic,
58  Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
59  SupportedAccessPatterns = InnerRandomAccessPattern
60  };
61 };
62 
63 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65 {
69 
70  typedef _Scalar Scalar;
71  typedef Dense StorageKind;
72  typedef _StorageIndex StorageIndex;
73  typedef MatrixXpr XprKind;
74 
75  enum {
76  RowsAtCompileTime = Dynamic,
77  ColsAtCompileTime = 1,
78  MaxRowsAtCompileTime = Dynamic,
79  MaxColsAtCompileTime = 1,
80  Flags = LvalueBit
81  };
82 };
83 
84 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86  : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87 {
88  enum {
89  Flags = 0
90  };
91 };
92 
93 } // end namespace internal
94 
95 template<typename _Scalar, int _Options, typename _StorageIndex>
97  : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98 {
100  using Base::convert_index;
101  friend class SparseVector<_Scalar,0,_StorageIndex>;
102  public:
103  using Base::isCompressed;
104  using Base::nonZeros;
106  using Base::operator+=;
107  using Base::operator-=;
108 
109  typedef MappedSparseMatrix<Scalar,Flags> Map;
112  typedef typename Base::InnerIterator InnerIterator;
114 
115 
116  using Base::IsRowMajor;
117  typedef internal::CompressedStorage<Scalar,StorageIndex> Storage;
118  enum {
119  Options = _Options
120  };
121 
122  typedef typename Base::IndexVector IndexVector;
124  protected:
126 
130  StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
132 
133  public:
134 
136  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
138  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
139 
141  inline Index innerSize() const { return m_innerSize; }
143  inline Index outerSize() const { return m_outerSize; }
144 
148  inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
152  inline Scalar* valuePtr() { return m_data.valuePtr(); }
153 
157  inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
161  inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
162 
166  inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
170  inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
171 
175  inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
179  inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
180 
182  inline Storage& data() { return m_data; }
184  inline const Storage& data() const { return m_data; }
185 
188  inline Scalar coeff(Index row, Index col) const
189  {
190  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
191 
192  const Index outer = IsRowMajor ? row : col;
193  const Index inner = IsRowMajor ? col : row;
194  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
195  return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
196  }
197 
207  {
208  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
209 
210  const Index outer = IsRowMajor ? row : col;
211  const Index inner = IsRowMajor ? col : row;
212 
213  Index start = m_outerIndex[outer];
214  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
215  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
216  if(end<=start)
217  return insert(row,col);
218  const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
219  if((p<end) && (m_data.index(p)==inner))
220  return m_data.value(p);
221  else
222  return insert(row,col);
223  }
224 
241 
242  public:
243 
251  inline void setZero()
252  {
253  m_data.clear();
254  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
255  if(m_innerNonZeros)
256  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
257  }
258 
262  inline void reserve(Index reserveSize)
263  {
264  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
265  m_data.reserve(reserveSize);
266  }
267 
268  #ifdef EIGEN_PARSED_BY_DOXYGEN
269 
281  template<class SizesType>
282  inline void reserve(const SizesType& reserveSizes);
283  #else
284  template<class SizesType>
285  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
286  #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
287  typename
288  #endif
289  SizesType::value_type())
290  {
291  EIGEN_UNUSED_VARIABLE(enableif);
292  reserveInnerVectors(reserveSizes);
293  }
294  #endif // EIGEN_PARSED_BY_DOXYGEN
295  protected:
296  template<class SizesType>
297  inline void reserveInnerVectors(const SizesType& reserveSizes)
298  {
299  if(isCompressed())
300  {
301  Index totalReserveSize = 0;
302  // turn the matrix into non-compressed mode
303  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
304  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
305 
306  // temporarily use m_innerSizes to hold the new starting points.
307  StorageIndex* newOuterIndex = m_innerNonZeros;
308 
309  StorageIndex count = 0;
310  for(Index j=0; j<m_outerSize; ++j)
311  {
312  newOuterIndex[j] = count;
313  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
314  totalReserveSize += reserveSizes[j];
315  }
316  m_data.reserve(totalReserveSize);
317  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
318  for(Index j=m_outerSize-1; j>=0; --j)
319  {
320  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
321  for(Index i=innerNNZ-1; i>=0; --i)
322  {
323  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
324  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
325  }
326  previousOuterIndex = m_outerIndex[j];
327  m_outerIndex[j] = newOuterIndex[j];
328  m_innerNonZeros[j] = innerNNZ;
329  }
330  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
331 
332  m_data.resize(m_outerIndex[m_outerSize]);
333  }
334  else
335  {
336  StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
337  if (!newOuterIndex) internal::throw_std_bad_alloc();
338 
339  StorageIndex count = 0;
340  for(Index j=0; j<m_outerSize; ++j)
341  {
342  newOuterIndex[j] = count;
343  StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
344  StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
345  count += toReserve + m_innerNonZeros[j];
346  }
347  newOuterIndex[m_outerSize] = count;
348 
349  m_data.resize(count);
350  for(Index j=m_outerSize-1; j>=0; --j)
351  {
352  Index offset = newOuterIndex[j] - m_outerIndex[j];
353  if(offset>0)
354  {
355  StorageIndex innerNNZ = m_innerNonZeros[j];
356  for(Index i=innerNNZ-1; i>=0; --i)
357  {
358  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
359  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
360  }
361  }
362  }
363 
364  std::swap(m_outerIndex, newOuterIndex);
365  std::free(newOuterIndex);
366  }
367 
368  }
369  public:
370 
371  //--- low level purely coherent filling ---
372 
383  inline Scalar& insertBack(Index row, Index col)
384  {
385  return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
386  }
387 
390  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
391  {
392  eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
393  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
394  Index p = m_outerIndex[outer+1];
395  ++m_outerIndex[outer+1];
396  m_data.append(Scalar(0), inner);
397  return m_data.value(p);
398  }
399 
403  {
404  Index p = m_outerIndex[outer+1];
405  ++m_outerIndex[outer+1];
406  m_data.append(Scalar(0), inner);
407  return m_data.value(p);
408  }
409 
412  inline void startVec(Index outer)
413  {
414  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
415  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
416  m_outerIndex[outer+1] = m_outerIndex[outer];
417  }
418 
422  inline void finalize()
423  {
424  if(isCompressed())
425  {
426  StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
427  Index i = m_outerSize;
428  // find the last filled column
429  while (i>=0 && m_outerIndex[i]==0)
430  --i;
431  ++i;
432  while (i<=m_outerSize)
433  {
434  m_outerIndex[i] = size;
435  ++i;
436  }
437  }
438  }
439 
440  //---
441 
442  template<typename InputIterators>
443  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
444 
445  template<typename InputIterators,typename DupFunctor>
446  void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
447 
448  void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
449 
450  template<typename DupFunctor>
451  void collapseDuplicates(DupFunctor dup_func = DupFunctor());
452 
453  //---
454 
458  {
459  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
460  }
461 
465  {
466  if(isCompressed())
467  return;
468 
469  eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
470 
471  Index oldStart = m_outerIndex[1];
472  m_outerIndex[1] = m_innerNonZeros[0];
473  for(Index j=1; j<m_outerSize; ++j)
474  {
475  Index nextOldStart = m_outerIndex[j+1];
476  Index offset = oldStart - m_outerIndex[j];
477  if(offset>0)
478  {
479  for(Index k=0; k<m_innerNonZeros[j]; ++k)
480  {
481  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
482  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
483  }
484  }
485  m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
486  oldStart = nextOldStart;
487  }
488  std::free(m_innerNonZeros);
489  m_innerNonZeros = 0;
490  m_data.resize(m_outerIndex[m_outerSize]);
491  m_data.squeeze();
492  }
493 
495  void uncompress()
496  {
497  if(m_innerNonZeros != 0)
498  return;
499  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
500  for (Index i = 0; i < m_outerSize; i++)
501  {
502  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
503  }
504  }
505 
508  {
509  prune(default_prunning_func(reference,epsilon));
510  }
511 
519  template<typename KeepFunc>
520  void prune(const KeepFunc& keep = KeepFunc())
521  {
522  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
523  makeCompressed();
524 
525  StorageIndex k = 0;
526  for(Index j=0; j<m_outerSize; ++j)
527  {
528  Index previousStart = m_outerIndex[j];
529  m_outerIndex[j] = k;
530  Index end = m_outerIndex[j+1];
531  for(Index i=previousStart; i<end; ++i)
532  {
533  if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
534  {
535  m_data.value(k) = m_data.value(i);
536  m_data.index(k) = m_data.index(i);
537  ++k;
538  }
539  }
540  }
541  m_outerIndex[m_outerSize] = k;
542  m_data.resize(k,0);
543  }
544 
554  {
555  // No change
556  if (this->rows() == rows && this->cols() == cols) return;
557 
558  // If one dimension is null, then there is nothing to be preserved
559  if(rows==0 || cols==0) return resize(rows,cols);
560 
561  Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
562  Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
563  StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
564 
565  // Deals with inner non zeros
566  if (m_innerNonZeros)
567  {
568  // Resize m_innerNonZeros
569  StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
570  if (!newInnerNonZeros) internal::throw_std_bad_alloc();
571  m_innerNonZeros = newInnerNonZeros;
572 
573  for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
574  m_innerNonZeros[i] = 0;
575  }
576  else if (innerChange < 0)
577  {
578  // Inner size decreased: allocate a new m_innerNonZeros
579  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize+outerChange+1) * sizeof(StorageIndex)));
580  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
581  for(Index i = 0; i < m_outerSize; i++)
582  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
583  }
584 
585  // Change the m_innerNonZeros in case of a decrease of inner size
586  if (m_innerNonZeros && innerChange < 0)
587  {
588  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
589  {
590  StorageIndex &n = m_innerNonZeros[i];
591  StorageIndex start = m_outerIndex[i];
592  while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
593  }
594  }
595 
596  m_innerSize = newInnerSize;
597 
598  // Re-allocate outer index structure if necessary
599  if (outerChange == 0)
600  return;
601 
602  StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
603  if (!newOuterIndex) internal::throw_std_bad_alloc();
604  m_outerIndex = newOuterIndex;
605  if (outerChange > 0)
606  {
607  StorageIndex last = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
608  for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
609  m_outerIndex[i] = last;
610  }
611  m_outerSize += outerChange;
612  }
613 
622  {
623  const Index outerSize = IsRowMajor ? rows : cols;
624  m_innerSize = IsRowMajor ? cols : rows;
625  m_data.clear();
626  if (m_outerSize != outerSize || m_outerSize==0)
627  {
628  std::free(m_outerIndex);
629  m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
630  if (!m_outerIndex) internal::throw_std_bad_alloc();
631 
632  m_outerSize = outerSize;
633  }
634  if(m_innerNonZeros)
635  {
636  std::free(m_innerNonZeros);
637  m_innerNonZeros = 0;
638  }
639  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
640  }
641 
645  {
646  m_data.resize(size);
647  }
648 
651 
657 
659  inline SparseMatrix()
660  : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
661  {
662  check_template_parameters();
663  resize(0, 0);
664  }
665 
668  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
669  {
670  check_template_parameters();
671  resize(rows, cols);
672  }
673 
675  template<typename OtherDerived>
677  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
678  {
680  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
681  check_template_parameters();
682  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
683  if (needToTranspose)
684  *this = other.derived();
685  else
686  {
687  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
689  #endif
691  }
692  }
693 
695  template<typename OtherDerived, unsigned int UpLo>
697  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
698  {
699  check_template_parameters();
700  Base::operator=(other);
701  }
702 
704  inline SparseMatrix(const SparseMatrix& other)
705  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
706  {
707  check_template_parameters();
708  *this = other.derived();
709  }
710 
712  template<typename OtherDerived>
714  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
715  {
716  check_template_parameters();
717  initAssignment(other);
718  other.evalTo(*this);
719  }
720 
722  template<typename OtherDerived>
724  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
725  {
726  check_template_parameters();
727  *this = other.derived();
728  }
729 
732  inline void swap(SparseMatrix& other)
733  {
734  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
735  std::swap(m_outerIndex, other.m_outerIndex);
736  std::swap(m_innerSize, other.m_innerSize);
737  std::swap(m_outerSize, other.m_outerSize);
738  std::swap(m_innerNonZeros, other.m_innerNonZeros);
739  m_data.swap(other.m_data);
740  }
741 
744  inline void setIdentity()
745  {
746  eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
747  this->m_data.resize(rows());
749  Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
750  Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
751  std::free(m_innerNonZeros);
752  m_innerNonZeros = 0;
753  }
754  inline SparseMatrix& operator=(const SparseMatrix& other)
755  {
756  if (other.isRValue())
757  {
758  swap(other.const_cast_derived());
759  }
760  else if(this!=&other)
761  {
762  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
764  #endif
765  initAssignment(other);
766  if(other.isCompressed())
767  {
768  internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
769  m_data = other.m_data;
770  }
771  else
772  {
773  Base::operator=(other);
774  }
775  }
776  return *this;
777  }
778 
779 #ifndef EIGEN_PARSED_BY_DOXYGEN
780  template<typename OtherDerived>
782  { return Base::operator=(other.derived()); }
783 #endif // EIGEN_PARSED_BY_DOXYGEN
784 
785  template<typename OtherDerived>
787 
788  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
789  {
791  s << "Nonzero entries:\n";
792  if(m.isCompressed())
793  {
794  for (Index i=0; i<m.nonZeros(); ++i)
795  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
796  }
797  else
798  {
799  for (Index i=0; i<m.outerSize(); ++i)
800  {
801  Index p = m.m_outerIndex[i];
802  Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
803  Index k=p;
804  for (; k<pe; ++k) {
805  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
806  }
807  for (; k<m.m_outerIndex[i+1]; ++k) {
808  s << "(_,_) ";
809  }
810  }
811  }
812  s << std::endl;
813  s << std::endl;
814  s << "Outer pointers:\n";
815  for (Index i=0; i<m.outerSize(); ++i) {
816  s << m.m_outerIndex[i] << " ";
817  }
818  s << " $" << std::endl;
819  if(!m.isCompressed())
820  {
821  s << "Inner non zeros:\n";
822  for (Index i=0; i<m.outerSize(); ++i) {
823  s << m.m_innerNonZeros[i] << " ";
824  }
825  s << " $" << std::endl;
826  }
827  s << std::endl;
828  );
829  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
830  return s;
831  }
832 
834  inline ~SparseMatrix()
835  {
836  std::free(m_outerIndex);
837  std::free(m_innerNonZeros);
838  }
839 
841  Scalar sum() const;
842 
843 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
844 # include EIGEN_SPARSEMATRIX_PLUGIN
845 # endif
846 
847 protected:
848 
849  template<typename Other>
850  void initAssignment(const Other& other)
851  {
852  resize(other.rows(), other.cols());
853  if(m_innerNonZeros)
854  {
855  std::free(m_innerNonZeros);
856  m_innerNonZeros = 0;
857  }
858  }
859 
862  EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
863 
867  {
870  public:
873  : m_index(convert_index(i)), m_value(convert_index(v))
874  {}
875 
876  StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
877  };
878 
881  EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
882 
883 public:
887  {
888  const Index outer = IsRowMajor ? row : col;
889  const Index inner = IsRowMajor ? col : row;
890 
891  eigen_assert(!isCompressed());
892  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
893 
894  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
895  m_data.index(p) = convert_index(inner);
896  return (m_data.value(p) = Scalar(0));
897  }
898 
899 private:
901  {
902  EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
903  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
904  }
905 
907  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
908  inline bool operator() (const Index&, const Index&, const Scalar& value) const
909  {
910  return !internal::isMuchSmallerThan(value, reference, epsilon);
911  }
914  };
915 };
916 
917 namespace internal {
918 
919 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
920 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
921 {
922  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
923  typedef typename SparseMatrixType::Scalar Scalar;
924  typedef typename SparseMatrixType::StorageIndex StorageIndex;
926 
927  if(begin!=end)
928  {
929  // pass 1: count the nnz per inner-vector
930  typename SparseMatrixType::IndexVector wi(trMat.outerSize());
931  wi.setZero();
932  for(InputIterator it(begin); it!=end; ++it)
933  {
934  eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
935  wi(IsRowMajor ? it->col() : it->row())++;
936  }
937 
938  // pass 2: insert all the elements into trMat
939  trMat.reserve(wi);
940  for(InputIterator it(begin); it!=end; ++it)
941  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
942 
943  // pass 3:
944  trMat.collapseDuplicates(dup_func);
945  }
946 
947  // pass 4: transposed copy -> implicit sorting
948  mat = trMat;
949 }
950 
951 }
952 
953 
991 template<typename Scalar, int _Options, typename _StorageIndex>
992 template<typename InputIterators>
993 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
994 {
995  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
996 }
997 
1007 template<typename Scalar, int _Options, typename _StorageIndex>
1008 template<typename InputIterators,typename DupFunctor>
1009 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1010 {
1011  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1012 }
1013 
1015 template<typename Scalar, int _Options, typename _StorageIndex>
1016 template<typename DupFunctor>
1018 {
1019  eigen_assert(!isCompressed());
1020  // TODO, in practice we should be able to use m_innerNonZeros for that task
1021  IndexVector wi(innerSize());
1022  wi.fill(-1);
1023  StorageIndex count = 0;
1024  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1025  for(Index j=0; j<outerSize(); ++j)
1026  {
1027  StorageIndex start = count;
1028  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1029  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1030  {
1031  Index i = m_data.index(k);
1032  if(wi(i)>=start)
1033  {
1034  // we already meet this entry => accumulate it
1035  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1036  }
1037  else
1038  {
1039  m_data.value(count) = m_data.value(k);
1040  m_data.index(count) = m_data.index(k);
1041  wi(i) = count;
1042  ++count;
1043  }
1044  }
1045  m_outerIndex[j] = start;
1046  }
1047  m_outerIndex[m_outerSize] = count;
1048 
1049  // turn the matrix into compressed form
1050  std::free(m_innerNonZeros);
1051  m_innerNonZeros = 0;
1052  m_data.resize(m_outerIndex[m_outerSize]);
1053 }
1054 
1055 template<typename Scalar, int _Options, typename _StorageIndex>
1056 template<typename OtherDerived>
1058 {
1060  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1061 
1062  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1064  #endif
1065 
1066  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1067  if (needToTranspose)
1068  {
1069  #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1071  #endif
1072  // two passes algorithm:
1073  // 1 - compute the number of coeffs per dest inner vector
1074  // 2 - do the actual copy/eval
1075  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1077  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1078  typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1079  OtherCopy otherCopy(other.derived());
1080  OtherCopyEval otherCopyEval(otherCopy);
1081 
1082  SparseMatrix dest(other.rows(),other.cols());
1083  Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1084 
1085  // pass 1
1086  // FIXME the above copy could be merged with that pass
1087  for (Index j=0; j<otherCopy.outerSize(); ++j)
1088  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1089  ++dest.m_outerIndex[it.index()];
1090 
1091  // prefix sum
1092  StorageIndex count = 0;
1093  IndexVector positions(dest.outerSize());
1094  for (Index j=0; j<dest.outerSize(); ++j)
1095  {
1096  StorageIndex tmp = dest.m_outerIndex[j];
1097  dest.m_outerIndex[j] = count;
1098  positions[j] = count;
1099  count += tmp;
1100  }
1101  dest.m_outerIndex[dest.outerSize()] = count;
1102  // alloc
1103  dest.m_data.resize(count);
1104  // pass 2
1105  for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1106  {
1107  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1108  {
1109  Index pos = positions[it.index()]++;
1110  dest.m_data.index(pos) = j;
1111  dest.m_data.value(pos) = it.value();
1112  }
1113  }
1114  this->swap(dest);
1115  return *this;
1116  }
1117  else
1118  {
1119  if(other.isRValue())
1120  {
1121  initAssignment(other.derived());
1122  }
1123  // there is no special optimization
1124  return Base::operator=(other.derived());
1125  }
1126 }
1127 
1128 template<typename _Scalar, int _Options, typename _StorageIndex>
1130 {
1131  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1132 
1133  const Index outer = IsRowMajor ? row : col;
1134  const Index inner = IsRowMajor ? col : row;
1135 
1136  if(isCompressed())
1137  {
1138  if(nonZeros()==0)
1139  {
1140  // reserve space if not already done
1141  if(m_data.allocatedSize()==0)
1142  m_data.reserve(2*m_innerSize);
1143 
1144  // turn the matrix into non-compressed mode
1145  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1146  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1147 
1148  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1149 
1150  // pack all inner-vectors to the end of the pre-allocated space
1151  // and allocate the entire free-space to the first inner-vector
1152  StorageIndex end = convert_index(m_data.allocatedSize());
1153  for(Index j=1; j<=m_outerSize; ++j)
1154  m_outerIndex[j] = end;
1155  }
1156  else
1157  {
1158  // turn the matrix into non-compressed mode
1159  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1160  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1161  for(Index j=0; j<m_outerSize; ++j)
1162  m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1163  }
1164  }
1165 
1166  // check whether we can do a fast "push back" insertion
1167  Index data_end = m_data.allocatedSize();
1168 
1169  // First case: we are filling a new inner vector which is packed at the end.
1170  // We assume that all remaining inner-vectors are also empty and packed to the end.
1171  if(m_outerIndex[outer]==data_end)
1172  {
1173  eigen_internal_assert(m_innerNonZeros[outer]==0);
1174 
1175  // pack previous empty inner-vectors to end of the used-space
1176  // and allocate the entire free-space to the current inner-vector.
1177  StorageIndex p = convert_index(m_data.size());
1178  Index j = outer;
1179  while(j>=0 && m_innerNonZeros[j]==0)
1180  m_outerIndex[j--] = p;
1181 
1182  // push back the new element
1183  ++m_innerNonZeros[outer];
1184  m_data.append(Scalar(0), inner);
1185 
1186  // check for reallocation
1187  if(data_end != m_data.allocatedSize())
1188  {
1189  // m_data has been reallocated
1190  // -> move remaining inner-vectors back to the end of the free-space
1191  // so that the entire free-space is allocated to the current inner-vector.
1192  eigen_internal_assert(data_end < m_data.allocatedSize());
1193  StorageIndex new_end = convert_index(m_data.allocatedSize());
1194  for(Index k=outer+1; k<=m_outerSize; ++k)
1195  if(m_outerIndex[k]==data_end)
1196  m_outerIndex[k] = new_end;
1197  }
1198  return m_data.value(p);
1199  }
1200 
1201  // Second case: the next inner-vector is packed to the end
1202  // and the current inner-vector end match the used-space.
1203  if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1204  {
1205  eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1206 
1207  // add space for the new element
1208  ++m_innerNonZeros[outer];
1209  m_data.resize(m_data.size()+1);
1210 
1211  // check for reallocation
1212  if(data_end != m_data.allocatedSize())
1213  {
1214  // m_data has been reallocated
1215  // -> move remaining inner-vectors back to the end of the free-space
1216  // so that the entire free-space is allocated to the current inner-vector.
1217  eigen_internal_assert(data_end < m_data.allocatedSize());
1218  StorageIndex new_end = convert_index(m_data.allocatedSize());
1219  for(Index k=outer+1; k<=m_outerSize; ++k)
1220  if(m_outerIndex[k]==data_end)
1221  m_outerIndex[k] = new_end;
1222  }
1223 
1224  // and insert it at the right position (sorted insertion)
1225  Index startId = m_outerIndex[outer];
1226  Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1227  while ( (p > startId) && (m_data.index(p-1) > inner) )
1228  {
1229  m_data.index(p) = m_data.index(p-1);
1230  m_data.value(p) = m_data.value(p-1);
1231  --p;
1232  }
1233 
1234  m_data.index(p) = convert_index(inner);
1235  return (m_data.value(p) = 0);
1236  }
1237 
1238  if(m_data.size() != m_data.allocatedSize())
1239  {
1240  // make sure the matrix is compatible to random un-compressed insertion:
1241  m_data.resize(m_data.allocatedSize());
1242  this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1243  }
1244 
1245  return insertUncompressed(row,col);
1246 }
1247 
1248 template<typename _Scalar, int _Options, typename _StorageIndex>
1250 {
1251  eigen_assert(!isCompressed());
1252 
1253  const Index outer = IsRowMajor ? row : col;
1254  const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1255 
1256  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1257  StorageIndex innerNNZ = m_innerNonZeros[outer];
1258  if(innerNNZ>=room)
1259  {
1260  // this inner vector is full, we need to reallocate the whole buffer :(
1261  reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1262  }
1263 
1264  Index startId = m_outerIndex[outer];
1265  Index p = startId + m_innerNonZeros[outer];
1266  while ( (p > startId) && (m_data.index(p-1) > inner) )
1267  {
1268  m_data.index(p) = m_data.index(p-1);
1269  m_data.value(p) = m_data.value(p-1);
1270  --p;
1271  }
1272  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1273 
1274  m_innerNonZeros[outer]++;
1275 
1276  m_data.index(p) = inner;
1277  return (m_data.value(p) = Scalar(0));
1278 }
1279 
1280 template<typename _Scalar, int _Options, typename _StorageIndex>
1282 {
1283  eigen_assert(isCompressed());
1284 
1285  const Index outer = IsRowMajor ? row : col;
1286  const Index inner = IsRowMajor ? col : row;
1287 
1288  Index previousOuter = outer;
1289  if (m_outerIndex[outer+1]==0)
1290  {
1291  // we start a new inner vector
1292  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1293  {
1294  m_outerIndex[previousOuter] = convert_index(m_data.size());
1295  --previousOuter;
1296  }
1297  m_outerIndex[outer+1] = m_outerIndex[outer];
1298  }
1299 
1300  // here we have to handle the tricky case where the outerIndex array
1301  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1302  // the 2nd inner vector...
1303  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1304  && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1305 
1306  std::size_t startId = m_outerIndex[outer];
1307  // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1308  std::size_t p = m_outerIndex[outer+1];
1309  ++m_outerIndex[outer+1];
1310 
1311  double reallocRatio = 1;
1312  if (m_data.allocatedSize()<=m_data.size())
1313  {
1314  // if there is no preallocated memory, let's reserve a minimum of 32 elements
1315  if (m_data.size()==0)
1316  {
1317  m_data.reserve(32);
1318  }
1319  else
1320  {
1321  // we need to reallocate the data, to reduce multiple reallocations
1322  // we use a smart resize algorithm based on the current filling ratio
1323  // in addition, we use double to avoid integers overflows
1324  double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1325  reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1326  // furthermore we bound the realloc ratio to:
1327  // 1) reduce multiple minor realloc when the matrix is almost filled
1328  // 2) avoid to allocate too much memory when the matrix is almost empty
1329  reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1330  }
1331  }
1332  m_data.resize(m_data.size()+1,reallocRatio);
1333 
1334  if (!isLastVec)
1335  {
1336  if (previousOuter==-1)
1337  {
1338  // oops wrong guess.
1339  // let's correct the outer offsets
1340  for (Index k=0; k<=(outer+1); ++k)
1341  m_outerIndex[k] = 0;
1342  Index k=outer+1;
1343  while(m_outerIndex[k]==0)
1344  m_outerIndex[k++] = 1;
1345  while (k<=m_outerSize && m_outerIndex[k]!=0)
1346  m_outerIndex[k++]++;
1347  p = 0;
1348  --k;
1349  k = m_outerIndex[k]-1;
1350  while (k>0)
1351  {
1352  m_data.index(k) = m_data.index(k-1);
1353  m_data.value(k) = m_data.value(k-1);
1354  k--;
1355  }
1356  }
1357  else
1358  {
1359  // we are not inserting into the last inner vec
1360  // update outer indices:
1361  Index j = outer+2;
1362  while (j<=m_outerSize && m_outerIndex[j]!=0)
1363  m_outerIndex[j++]++;
1364  --j;
1365  // shift data of last vecs:
1366  Index k = m_outerIndex[j]-1;
1367  while (k>=Index(p))
1368  {
1369  m_data.index(k) = m_data.index(k-1);
1370  m_data.value(k) = m_data.value(k-1);
1371  k--;
1372  }
1373  }
1374  }
1375 
1376  while ( (p > startId) && (m_data.index(p-1) > inner) )
1377  {
1378  m_data.index(p) = m_data.index(p-1);
1379  m_data.value(p) = m_data.value(p-1);
1380  --p;
1381  }
1382 
1383  m_data.index(p) = inner;
1384  return (m_data.value(p) = Scalar(0));
1385 }
1386 
1387 namespace internal {
1388 
1389 template<typename _Scalar, int _Options, typename _StorageIndex>
1390 struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1391  : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1392 {
1395  evaluator() : Base() {}
1396  explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1397 };
1398 
1399 }
1400 
1401 } // end namespace Eigen
1402 
1403 #endif // EIGEN_SPARSEMATRIX_H
Matrix3f m
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:723
SCALAR Scalar
Definition: bench_gemm.cpp:33
EIGEN_DEVICE_FUNC const Derived & derived() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)
#define max(a, b)
Definition: datatypes.h:20
#define EIGEN_STRONG_INLINE
Definition: Macros.h:494
EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
constexpr int last(int, int result)
A insert(1, 2)=0
StorageIndex * m_outerIndex
Definition: SparseMatrix.h:129
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:696
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:188
const unsigned int CompressedAccessBit
Definition: Constants.h:186
A makeCompressed()
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:704
A versatible sparse matrix representation.
Definition: SparseMatrix.h:96
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:175
std::ostream & operator<<(std::ostream &s, const Packet16uc &v)
#define min(a, b)
Definition: datatypes.h:19
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:713
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:94
EIGEN_DONT_INLINE Scalar & insertCompressed(Index row, Index col)
const int InnerRandomAccessPattern
Definition: SparseUtil.h:48
Base::ReverseInnerIterator ReverseInnerIterator
Definition: SparseMatrix.h:113
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:621
ArrayXcf v
Definition: Cwise_arg.cpp:1
const unsigned int LvalueBit
Definition: Constants.h:139
int n
Scalar & insertByOuterInner(Index j, Index i)
Definition: SparseMatrix.h:457
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:732
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
Namespace containing all symbols from the Eigen library.
Definition: jet.h:637
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
EIGEN_DEVICE_FUNC void evalTo(Dest &dst) const
Definition: ReturnByValue.h:61
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
Definition: NumTraits.h:150
SparseMatrix & operator=(const SparseMatrix &other)
Definition: SparseMatrix.h:754
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:124
evaluator< SparseCompressedBase< SparseMatrix< _Scalar, _Options, _StorageIndex > > > Base
EIGEN_DEVICE_FUNC IndexDest convert_index(const IndexSrc &idx)
Definition: XprHelper.h:31
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:38
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:179
const Scalar * valuePtr() const
Definition: SparseMatrix.h:148
const unsigned int RowMajorBit
Definition: Constants.h:61
void resizeNonZeros(Index size)
Definition: SparseMatrix.h:644
static double epsilon
Definition: testRot3.cpp:39
#define EIGEN_DONT_INLINE
Definition: Macros.h:517
Index rows() const
Definition: SparseMatrix.h:136
SparseMatrix< _Scalar, _Options, _StorageIndex > SparseMatrixType
SparseMatrix< Scalar,(Flags &~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix
Definition: SparseMatrix.h:125
#define EIGEN_DBG_SPARSE(X)
Definition: SparseUtil.h:18
void collapseDuplicates(DupFunctor dup_func=DupFunctor())
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:507
#define EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
EIGEN_DEVICE_FUNC void smart_copy(const T *start, const T *end, T *target)
Definition: Memory.h:485
internal::traits< Derived >::StorageIndex StorageIndex
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
Definition: SparseUtil.h:43
EIGEN_STRONG_INLINE Scalar & insertBackUncompressed(Index row, Index col)
Definition: SparseMatrix.h:886
Scalar & insertBack(Index row, Index col)
Definition: SparseMatrix.h:383
Base class of any sparse matrices or sparse expressions.
internal::traits< Derived >::Scalar Scalar
m row(1)
void append(const Scalar &v, Index i)
v setLinSpaced(5, 0.5f, 1.5f)
Index cols() const
Definition: SparseMatrix.h:138
Index searchLowerIndex(Index key) const
void startVec(Index outer)
Definition: SparseMatrix.h:412
a sparse vector class
Definition: SparseUtil.h:54
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:676
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
Scalar & insert(Index row, Index col)
#define eigen_assert(x)
Definition: Macros.h:579
#define EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
v setOnes(3)
const mpreal sum(const mpreal tab[], const unsigned long int n, int &status, mp_rnd_t mode=mpreal::get_default_rnd())
Definition: mpreal.h:2381
RealScalar s
StorageIndex operator[](Index i) const
Definition: SparseMatrix.h:876
void resize(Index size, double reserveSizeFactor=0)
void reserve(const SizesType &reserveSizes, const typename SizesType::value_type &enableif=typename SizesType::value_type())
Definition: SparseMatrix.h:285
Base::IndexVector IndexVector
Definition: SparseMatrix.h:122
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:553
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:520
Reference counting helper.
Definition: object.h:62
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:170
Scalar & insertBackByOuterInnerUnordered(Index outer, Index inner)
Definition: SparseMatrix.h:402
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:166
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
default_prunning_func(const Scalar &ref, const RealScalar &eps)
Definition: SparseMatrix.h:907
const unsigned int NestByRefBit
Definition: Constants.h:164
StorageIndex * m_innerNonZeros
Definition: SparseMatrix.h:130
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:206
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:993
Index innerSize() const
Definition: SparseMatrix.h:141
Base::ScalarVector ScalarVector
Definition: SparseMatrix.h:123
const StorageIndex * indexPtr() const
SparseCompressedBase< SparseMatrix > Base
Definition: SparseMatrix.h:99
static void check_template_parameters()
Definition: SparseMatrix.h:900
const Derived & derived() const
SparseMatrix & operator=(const EigenBase< OtherDerived > &other)
Definition: SparseMatrix.h:781
#define EIGEN_COMP_MSVC
Definition: Macros.h:71
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:45
const Storage & data() const
Definition: SparseMatrix.h:184
float * p
void reserve(Index reserveSize)
Definition: SparseMatrix.h:262
m col(1)
EIGEN_DONT_INLINE Scalar & insertUncompressed(Index row, Index col)
void reserveInnerVectors(const SizesType &reserveSizes)
Definition: SparseMatrix.h:297
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:650
Index outerSize() const
Definition: SparseMatrix.h:143
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:63
const int Dynamic
Definition: Constants.h:21
EIGEN_DEVICE_FUNC void throw_std_bad_alloc()
Definition: Memory.h:67
#define eigen_internal_assert(x)
Definition: Macros.h:585
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:157
NumTraits< Scalar >::Real RealScalar
Common base class for sparse [compressed]-{row|column}-storage format.
v resize(3)
EIGEN_DEVICE_FUNC Derived & derived()
Definition: EigenBase.h:45
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:656
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:161
void swap(CompressedStorage &other)
void swap(mpfr::mpreal &x, mpfr::mpreal &y)
Definition: mpreal.h:2986
std::ptrdiff_t j
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:667
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:618
Definition: pytypes.h:897
Scalar & insertBackByOuterInner(Index outer, Index inner)
Definition: SparseMatrix.h:390
void set_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
Definition: SparseMatrix.h:920
void swap(scoped_array< T > &a, scoped_array< T > &b)
Definition: Memory.h:602
Derived & const_cast_derived() const
void initAssignment(const Other &other)
Definition: SparseMatrix.h:850
v setZero(3)


gtsam
Author(s):
autogenerated on Sat May 8 2021 02:44:35