SparseMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
45 namespace internal {
46 template<typename _Scalar, int _Options, typename _StorageIndex>
47 struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48 {
49  typedef _Scalar Scalar;
50  typedef _StorageIndex StorageIndex;
52  typedef MatrixXpr XprKind;
53  enum {
54  RowsAtCompileTime = Dynamic,
55  ColsAtCompileTime = Dynamic,
56  MaxRowsAtCompileTime = Dynamic,
57  MaxColsAtCompileTime = Dynamic,
58  Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
59  SupportedAccessPatterns = InnerRandomAccessPattern
60  };
61 };
62 
63 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65 {
69 
70  typedef _Scalar Scalar;
71  typedef Dense StorageKind;
72  typedef _StorageIndex StorageIndex;
73  typedef MatrixXpr XprKind;
74 
75  enum {
76  RowsAtCompileTime = Dynamic,
77  ColsAtCompileTime = 1,
78  MaxRowsAtCompileTime = Dynamic,
79  MaxColsAtCompileTime = 1,
80  Flags = LvalueBit
81  };
82 };
83 
84 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86  : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87 {
88  enum {
89  Flags = 0
90  };
91 };
92 
93 } // end namespace internal
94 
95 template<typename _Scalar, int _Options, typename _StorageIndex>
97  : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98 {
100  using Base::convert_index;
101  friend class SparseVector<_Scalar,0,_StorageIndex>;
102  template<typename, typename, typename, typename, typename>
103  friend struct internal::Assignment;
104  public:
105  using Base::isCompressed;
106  using Base::nonZeros;
108  using Base::operator+=;
109  using Base::operator-=;
110 
114  typedef typename Base::InnerIterator InnerIterator;
115  typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
116 
117 
118  using Base::IsRowMajor;
120  enum {
121  Options = _Options
122  };
123 
124  typedef typename Base::IndexVector IndexVector;
126  protected:
128 
132  StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
133  Storage m_data;
134 
135  public:
136 
138  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
140  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
141 
143  inline Index innerSize() const { return m_innerSize; }
145  inline Index outerSize() const { return m_outerSize; }
146 
150  inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
154  inline Scalar* valuePtr() { return m_data.valuePtr(); }
155 
159  inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
163  inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
164 
168  inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
172  inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
173 
177  inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
181  inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
182 
184  inline Storage& data() { return m_data; }
186  inline const Storage& data() const { return m_data; }
187 
190  inline Scalar coeff(Index row, Index col) const
191  {
192  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
193 
194  const Index outer = IsRowMajor ? row : col;
195  const Index inner = IsRowMajor ? col : row;
196  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
197  return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
198  }
199 
209  {
210  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
211 
212  const Index outer = IsRowMajor ? row : col;
213  const Index inner = IsRowMajor ? col : row;
214 
215  Index start = m_outerIndex[outer];
216  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
218  if(end<=start)
219  return insert(row,col);
220  const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
221  if((p<end) && (m_data.index(p)==inner))
222  return m_data.value(p);
223  else
224  return insert(row,col);
225  }
226 
243 
244  public:
245 
253  inline void setZero()
254  {
255  m_data.clear();
256  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
257  if(m_innerNonZeros)
258  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
259  }
260 
264  inline void reserve(Index reserveSize)
265  {
266  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
267  m_data.reserve(reserveSize);
268  }
269 
270  #ifdef EIGEN_PARSED_BY_DOXYGEN
271 
283  template<class SizesType>
284  inline void reserve(const SizesType& reserveSizes);
285  #else
286  template<class SizesType>
287  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
288  #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
289  typename
290  #endif
291  SizesType::value_type())
292  {
293  EIGEN_UNUSED_VARIABLE(enableif);
294  reserveInnerVectors(reserveSizes);
295  }
296  #endif // EIGEN_PARSED_BY_DOXYGEN
297  protected:
298  template<class SizesType>
299  inline void reserveInnerVectors(const SizesType& reserveSizes)
300  {
301  if(isCompressed())
302  {
303  Index totalReserveSize = 0;
304  // turn the matrix into non-compressed mode
305  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
306  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
307 
308  // temporarily use m_innerSizes to hold the new starting points.
309  StorageIndex* newOuterIndex = m_innerNonZeros;
310 
311  StorageIndex count = 0;
312  for(Index j=0; j<m_outerSize; ++j)
313  {
314  newOuterIndex[j] = count;
315  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316  totalReserveSize += reserveSizes[j];
317  }
318  m_data.reserve(totalReserveSize);
319  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320  for(Index j=m_outerSize-1; j>=0; --j)
321  {
322  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323  for(Index i=innerNNZ-1; i>=0; --i)
324  {
325  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
327  }
328  previousOuterIndex = m_outerIndex[j];
329  m_outerIndex[j] = newOuterIndex[j];
330  m_innerNonZeros[j] = innerNNZ;
331  }
332  if(m_outerSize>0)
333  m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
334 
335  m_data.resize(m_outerIndex[m_outerSize]);
336  }
337  else
338  {
339  StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
340  if (!newOuterIndex) internal::throw_std_bad_alloc();
341 
342  StorageIndex count = 0;
343  for(Index j=0; j<m_outerSize; ++j)
344  {
345  newOuterIndex[j] = count;
346  StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347  StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348  count += toReserve + m_innerNonZeros[j];
349  }
350  newOuterIndex[m_outerSize] = count;
351 
352  m_data.resize(count);
353  for(Index j=m_outerSize-1; j>=0; --j)
354  {
355  Index offset = newOuterIndex[j] - m_outerIndex[j];
356  if(offset>0)
357  {
358  StorageIndex innerNNZ = m_innerNonZeros[j];
359  for(Index i=innerNNZ-1; i>=0; --i)
360  {
361  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
363  }
364  }
365  }
366 
367  std::swap(m_outerIndex, newOuterIndex);
368  std::free(newOuterIndex);
369  }
370 
371  }
372  public:
373 
374  //--- low level purely coherent filling ---
375 
386  inline Scalar& insertBack(Index row, Index col)
387  {
388  return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
389  }
390 
393  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
394  {
395  eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
396  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
397  Index p = m_outerIndex[outer+1];
398  ++m_outerIndex[outer+1];
399  m_data.append(Scalar(0), inner);
400  return m_data.value(p);
401  }
402 
406  {
407  Index p = m_outerIndex[outer+1];
408  ++m_outerIndex[outer+1];
409  m_data.append(Scalar(0), inner);
410  return m_data.value(p);
411  }
412 
415  inline void startVec(Index outer)
416  {
417  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
418  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
419  m_outerIndex[outer+1] = m_outerIndex[outer];
420  }
421 
425  inline void finalize()
426  {
427  if(isCompressed())
428  {
429  StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
430  Index i = m_outerSize;
431  // find the last filled column
432  while (i>=0 && m_outerIndex[i]==0)
433  --i;
434  ++i;
435  while (i<=m_outerSize)
436  {
437  m_outerIndex[i] = size;
438  ++i;
439  }
440  }
441  }
442 
443  //---
444 
445  template<typename InputIterators>
446  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
447 
448  template<typename InputIterators,typename DupFunctor>
449  void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
450 
451  void sumupDuplicates() { collapseDuplicates(internal::scalar_sum_op<Scalar,Scalar>()); }
452 
453  template<typename DupFunctor>
454  void collapseDuplicates(DupFunctor dup_func = DupFunctor());
455 
456  //---
457 
461  {
462  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
463  }
464 
468  {
469  if(isCompressed())
470  return;
471 
472  eigen_internal_assert(m_outerIndex!=0 && m_outerSize>0);
473 
474  Index oldStart = m_outerIndex[1];
475  m_outerIndex[1] = m_innerNonZeros[0];
476  for(Index j=1; j<m_outerSize; ++j)
477  {
478  Index nextOldStart = m_outerIndex[j+1];
479  Index offset = oldStart - m_outerIndex[j];
480  if(offset>0)
481  {
482  for(Index k=0; k<m_innerNonZeros[j]; ++k)
483  {
484  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
486  }
487  }
488  m_outerIndex[j+1] = m_outerIndex[j] + m_innerNonZeros[j];
489  oldStart = nextOldStart;
490  }
491  std::free(m_innerNonZeros);
492  m_innerNonZeros = 0;
493  m_data.resize(m_outerIndex[m_outerSize]);
494  m_data.squeeze();
495  }
496 
498  void uncompress()
499  {
500  if(m_innerNonZeros != 0)
501  return;
502  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
503  for (Index i = 0; i < m_outerSize; i++)
504  {
505  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
506  }
507  }
508 
511  {
512  prune(default_prunning_func(reference,epsilon));
513  }
514 
522  template<typename KeepFunc>
523  void prune(const KeepFunc& keep = KeepFunc())
524  {
525  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
526  makeCompressed();
527 
528  StorageIndex k = 0;
529  for(Index j=0; j<m_outerSize; ++j)
530  {
531  Index previousStart = m_outerIndex[j];
532  m_outerIndex[j] = k;
533  Index end = m_outerIndex[j+1];
534  for(Index i=previousStart; i<end; ++i)
535  {
536  if(keep(IsRowMajor?j:m_data.index(i), IsRowMajor?m_data.index(i):j, m_data.value(i)))
537  {
538  m_data.value(k) = m_data.value(i);
539  m_data.index(k) = m_data.index(i);
540  ++k;
541  }
542  }
543  }
544  m_outerIndex[m_outerSize] = k;
545  m_data.resize(k,0);
546  }
547 
557  {
558  // No change
559  if (this->rows() == rows && this->cols() == cols) return;
560 
561  // If one dimension is null, then there is nothing to be preserved
562  if(rows==0 || cols==0) return resize(rows,cols);
563 
564  Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
565  Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
566  StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
567 
568  // Deals with inner non zeros
569  if (m_innerNonZeros)
570  {
571  // Resize m_innerNonZeros
572  StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
573  if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574  m_innerNonZeros = newInnerNonZeros;
575 
576  for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577  m_innerNonZeros[i] = 0;
578  }
579  else if (innerChange < 0)
580  {
581  // Inner size decreased: allocate a new m_innerNonZeros
582  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
583  if (!m_innerNonZeros) internal::throw_std_bad_alloc();
584  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
585  m_innerNonZeros[i] = m_outerIndex[i+1] - m_outerIndex[i];
586  for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
587  m_innerNonZeros[i] = 0;
588  }
589 
590  // Change the m_innerNonZeros in case of a decrease of inner size
591  if (m_innerNonZeros && innerChange < 0)
592  {
593  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
594  {
595  StorageIndex &n = m_innerNonZeros[i];
596  StorageIndex start = m_outerIndex[i];
597  while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
598  }
599  }
600 
601  m_innerSize = newInnerSize;
602 
603  // Re-allocate outer index structure if necessary
604  if (outerChange == 0)
605  return;
606 
607  StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
608  if (!newOuterIndex) internal::throw_std_bad_alloc();
609  m_outerIndex = newOuterIndex;
610  if (outerChange > 0)
611  {
612  StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613  for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
614  m_outerIndex[i] = lastIdx;
615  }
616  m_outerSize += outerChange;
617  }
618 
627  {
628  const Index outerSize = IsRowMajor ? rows : cols;
629  m_innerSize = IsRowMajor ? cols : rows;
630  m_data.clear();
631  if (m_outerSize != outerSize || m_outerSize==0)
632  {
633  std::free(m_outerIndex);
634  m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
635  if (!m_outerIndex) internal::throw_std_bad_alloc();
636 
637  m_outerSize = outerSize;
638  }
639  if(m_innerNonZeros)
640  {
641  std::free(m_innerNonZeros);
642  m_innerNonZeros = 0;
643  }
644  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
645  }
646 
650  {
651  m_data.resize(size);
652  }
653 
655  const ConstDiagonalReturnType diagonal() const { return ConstDiagonalReturnType(*this); }
656 
661  DiagonalReturnType diagonal() { return DiagonalReturnType(*this); }
662 
664  inline SparseMatrix()
665  : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
666  {
667  check_template_parameters();
668  resize(0, 0);
669  }
670 
673  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
674  {
675  check_template_parameters();
676  resize(rows, cols);
677  }
678 
680  template<typename OtherDerived>
682  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
683  {
685  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
686  check_template_parameters();
687  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
688  if (needToTranspose)
689  *this = other.derived();
690  else
691  {
692  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
694  #endif
696  }
697  }
698 
700  template<typename OtherDerived, unsigned int UpLo>
702  : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
703  {
704  check_template_parameters();
705  Base::operator=(other);
706  }
707 
710  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
711  {
712  check_template_parameters();
713  *this = other.derived();
714  }
715 
717  template<typename OtherDerived>
719  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
720  {
721  check_template_parameters();
722  initAssignment(other);
723  other.evalTo(*this);
724  }
725 
727  template<typename OtherDerived>
729  : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
730  {
731  check_template_parameters();
732  *this = other.derived();
733  }
734 
737  inline void swap(SparseMatrix& other)
738  {
739  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
740  std::swap(m_outerIndex, other.m_outerIndex);
741  std::swap(m_innerSize, other.m_innerSize);
742  std::swap(m_outerSize, other.m_outerSize);
743  std::swap(m_innerNonZeros, other.m_innerNonZeros);
744  m_data.swap(other.m_data);
745  }
746 
749  inline void setIdentity()
750  {
751  eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
752  this->m_data.resize(rows());
754  Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
755  Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
756  std::free(m_innerNonZeros);
757  m_innerNonZeros = 0;
758  }
760  {
761  if (other.isRValue())
762  {
763  swap(other.const_cast_derived());
764  }
765  else if(this!=&other)
766  {
767  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
769  #endif
770  initAssignment(other);
771  if(other.isCompressed())
772  {
773  internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
774  m_data = other.m_data;
775  }
776  else
777  {
778  Base::operator=(other);
779  }
780  }
781  return *this;
782  }
783 
784 #ifndef EIGEN_PARSED_BY_DOXYGEN
785  template<typename OtherDerived>
787  { return Base::operator=(other.derived()); }
788 
789  template<typename Lhs, typename Rhs>
790  inline SparseMatrix& operator=(const Product<Lhs,Rhs,AliasFreeProduct>& other);
791 #endif // EIGEN_PARSED_BY_DOXYGEN
792 
793  template<typename OtherDerived>
795 
796  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
797  {
799  s << "Nonzero entries:\n";
800  if(m.isCompressed())
801  {
802  for (Index i=0; i<m.nonZeros(); ++i)
803  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
804  }
805  else
806  {
807  for (Index i=0; i<m.outerSize(); ++i)
808  {
809  Index p = m.m_outerIndex[i];
810  Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
811  Index k=p;
812  for (; k<pe; ++k) {
813  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
814  }
815  for (; k<m.m_outerIndex[i+1]; ++k) {
816  s << "(_,_) ";
817  }
818  }
819  }
820  s << std::endl;
821  s << std::endl;
822  s << "Outer pointers:\n";
823  for (Index i=0; i<m.outerSize(); ++i) {
824  s << m.m_outerIndex[i] << " ";
825  }
826  s << " $" << std::endl;
827  if(!m.isCompressed())
828  {
829  s << "Inner non zeros:\n";
830  for (Index i=0; i<m.outerSize(); ++i) {
831  s << m.m_innerNonZeros[i] << " ";
832  }
833  s << " $" << std::endl;
834  }
835  s << std::endl;
836  );
837  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
838  return s;
839  }
840 
842  inline ~SparseMatrix()
843  {
844  std::free(m_outerIndex);
845  std::free(m_innerNonZeros);
846  }
847 
849  Scalar sum() const;
850 
851 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
852 # include EIGEN_SPARSEMATRIX_PLUGIN
853 # endif
854 
855 protected:
856 
857  template<typename Other>
858  void initAssignment(const Other& other)
859  {
860  resize(other.rows(), other.cols());
861  if(m_innerNonZeros)
862  {
863  std::free(m_innerNonZeros);
864  m_innerNonZeros = 0;
865  }
866  }
867 
870  EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
871 
875  {
878  public:
881  : m_index(convert_index(i)), m_value(convert_index(v))
882  {}
883 
884  StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
885  };
886 
889  EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
890 
891 public:
895  {
896  const Index outer = IsRowMajor ? row : col;
897  const Index inner = IsRowMajor ? col : row;
898 
899  eigen_assert(!isCompressed());
900  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
901 
902  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
903  m_data.index(p) = convert_index(inner);
904  return (m_data.value(p) = Scalar(0));
905  }
906 protected:
907  struct IndexPosPair {
908  IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
911  };
912 
926  template<typename DiagXpr, typename Func>
927  void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
928  {
929  Index n = diagXpr.size();
930 
932  if(overwrite)
933  {
934  if((this->rows()!=n) || (this->cols()!=n))
935  this->resize(n, n);
936  }
937 
938  if(m_data.size()==0 || overwrite)
939  {
940  typedef Array<StorageIndex,Dynamic,1> ArrayXI;
941  this->makeCompressed();
942  this->resizeNonZeros(n);
943  Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
944  Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
945  Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
946  values.setZero();
947  internal::call_assignment_no_alias(values, diagXpr, assignFunc);
948  }
949  else
950  {
951  bool isComp = isCompressed();
952  internal::evaluator<DiagXpr> diaEval(diagXpr);
953  std::vector<IndexPosPair> newEntries;
954 
955  // 1 - try in-place update and record insertion failures
956  for(Index i = 0; i<n; ++i)
957  {
958  internal::LowerBoundIndex lb = this->lower_bound(i,i);
959  Index p = lb.value;
960  if(lb.found)
961  {
962  // the coeff already exists
963  assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
964  }
965  else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
966  {
967  // non compressed mode with local room for inserting one element
968  m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
969  m_innerNonZeros[i]++;
970  m_data.value(p) = Scalar(0);
971  m_data.index(p) = StorageIndex(i);
972  assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
973  }
974  else
975  {
976  // defer insertion
977  newEntries.push_back(IndexPosPair(i,p));
978  }
979  }
980  // 2 - insert deferred entries
981  Index n_entries = Index(newEntries.size());
982  if(n_entries>0)
983  {
984  Storage newData(m_data.size()+n_entries);
985  Index prev_p = 0;
986  Index prev_i = 0;
987  for(Index k=0; k<n_entries;++k)
988  {
989  Index i = newEntries[k].i;
990  Index p = newEntries[k].p;
991  internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
992  internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
993  for(Index j=prev_i;j<i;++j)
994  m_outerIndex[j+1] += k;
995  if(!isComp)
996  m_innerNonZeros[i]++;
997  prev_p = p;
998  prev_i = i;
999  newData.value(p+k) = Scalar(0);
1000  newData.index(p+k) = StorageIndex(i);
1001  assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002  }
1003  {
1004  internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005  internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006  for(Index j=prev_i+1;j<=m_outerSize;++j)
1007  m_outerIndex[j] += n_entries;
1008  }
1009  m_data.swap(newData);
1010  }
1011  }
1012  }
1013 
1014 private:
1016  {
1017  EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019  }
1020 
1022  default_prunning_func(const Scalar& ref, const RealScalar& eps) : reference(ref), epsilon(eps) {}
1023  inline bool operator() (const Index&, const Index&, const Scalar& value) const
1024  {
1025  return !internal::isMuchSmallerThan(value, reference, epsilon);
1026  }
1029  };
1030 };
1031 
1032 namespace internal {
1033 
1034 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1035 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1036 {
1037  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038  typedef typename SparseMatrixType::Scalar Scalar;
1039  typedef typename SparseMatrixType::StorageIndex StorageIndex;
1041 
1042  if(begin!=end)
1043  {
1044  // pass 1: count the nnz per inner-vector
1045  typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1046  wi.setZero();
1047  for(InputIterator it(begin); it!=end; ++it)
1048  {
1049  eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050  wi(IsRowMajor ? it->col() : it->row())++;
1051  }
1052 
1053  // pass 2: insert all the elements into trMat
1054  trMat.reserve(wi);
1055  for(InputIterator it(begin); it!=end; ++it)
1056  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1057 
1058  // pass 3:
1059  trMat.collapseDuplicates(dup_func);
1060  }
1061 
1062  // pass 4: transposed copy -> implicit sorting
1063  mat = trMat;
1064 }
1065 
1066 }
1067 
1068 
1106 template<typename Scalar, int _Options, typename _StorageIndex>
1107 template<typename InputIterators>
1108 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1109 {
1110  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1111 }
1112 
1122 template<typename Scalar, int _Options, typename _StorageIndex>
1123 template<typename InputIterators,typename DupFunctor>
1124 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1125 {
1126  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1127 }
1128 
1130 template<typename Scalar, int _Options, typename _StorageIndex>
1131 template<typename DupFunctor>
1133 {
1134  eigen_assert(!isCompressed());
1135  // TODO, in practice we should be able to use m_innerNonZeros for that task
1136  IndexVector wi(innerSize());
1137  wi.fill(-1);
1138  StorageIndex count = 0;
1139  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1140  for(Index j=0; j<outerSize(); ++j)
1141  {
1142  StorageIndex start = count;
1143  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1144  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1145  {
1146  Index i = m_data.index(k);
1147  if(wi(i)>=start)
1148  {
1149  // we already meet this entry => accumulate it
1150  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1151  }
1152  else
1153  {
1154  m_data.value(count) = m_data.value(k);
1155  m_data.index(count) = m_data.index(k);
1156  wi(i) = count;
1157  ++count;
1158  }
1159  }
1160  m_outerIndex[j] = start;
1161  }
1162  m_outerIndex[m_outerSize] = count;
1163 
1164  // turn the matrix into compressed form
1165  std::free(m_innerNonZeros);
1166  m_innerNonZeros = 0;
1167  m_data.resize(m_outerIndex[m_outerSize]);
1168 }
1169 
1170 template<typename Scalar, int _Options, typename _StorageIndex>
1171 template<typename OtherDerived>
1173 {
1175  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1176 
1177  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179  #endif
1180 
1181  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1182  if (needToTranspose)
1183  {
1184  #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1186  #endif
1187  // two passes algorithm:
1188  // 1 - compute the number of coeffs per dest inner vector
1189  // 2 - do the actual copy/eval
1190  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1192  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1193  typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1194  OtherCopy otherCopy(other.derived());
1195  OtherCopyEval otherCopyEval(otherCopy);
1196 
1197  SparseMatrix dest(other.rows(),other.cols());
1198  Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1199 
1200  // pass 1
1201  // FIXME the above copy could be merged with that pass
1202  for (Index j=0; j<otherCopy.outerSize(); ++j)
1203  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1204  ++dest.m_outerIndex[it.index()];
1205 
1206  // prefix sum
1207  StorageIndex count = 0;
1208  IndexVector positions(dest.outerSize());
1209  for (Index j=0; j<dest.outerSize(); ++j)
1210  {
1211  StorageIndex tmp = dest.m_outerIndex[j];
1212  dest.m_outerIndex[j] = count;
1213  positions[j] = count;
1214  count += tmp;
1215  }
1216  dest.m_outerIndex[dest.outerSize()] = count;
1217  // alloc
1218  dest.m_data.resize(count);
1219  // pass 2
1220  for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1221  {
1222  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1223  {
1224  Index pos = positions[it.index()]++;
1225  dest.m_data.index(pos) = j;
1226  dest.m_data.value(pos) = it.value();
1227  }
1228  }
1229  this->swap(dest);
1230  return *this;
1231  }
1232  else
1233  {
1234  if(other.isRValue())
1235  {
1236  initAssignment(other.derived());
1237  }
1238  // there is no special optimization
1239  return Base::operator=(other.derived());
1240  }
1241 }
1242 
1243 template<typename _Scalar, int _Options, typename _StorageIndex>
1245 {
1246  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1247 
1248  const Index outer = IsRowMajor ? row : col;
1249  const Index inner = IsRowMajor ? col : row;
1250 
1251  if(isCompressed())
1252  {
1253  if(nonZeros()==0)
1254  {
1255  // reserve space if not already done
1256  if(m_data.allocatedSize()==0)
1257  m_data.reserve(2*m_innerSize);
1258 
1259  // turn the matrix into non-compressed mode
1260  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1261  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1262 
1263  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1264 
1265  // pack all inner-vectors to the end of the pre-allocated space
1266  // and allocate the entire free-space to the first inner-vector
1267  StorageIndex end = convert_index(m_data.allocatedSize());
1268  for(Index j=1; j<=m_outerSize; ++j)
1269  m_outerIndex[j] = end;
1270  }
1271  else
1272  {
1273  // turn the matrix into non-compressed mode
1274  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1275  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276  for(Index j=0; j<m_outerSize; ++j)
1277  m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1278  }
1279  }
1280 
1281  // check whether we can do a fast "push back" insertion
1282  Index data_end = m_data.allocatedSize();
1283 
1284  // First case: we are filling a new inner vector which is packed at the end.
1285  // We assume that all remaining inner-vectors are also empty and packed to the end.
1286  if(m_outerIndex[outer]==data_end)
1287  {
1288  eigen_internal_assert(m_innerNonZeros[outer]==0);
1289 
1290  // pack previous empty inner-vectors to end of the used-space
1291  // and allocate the entire free-space to the current inner-vector.
1292  StorageIndex p = convert_index(m_data.size());
1293  Index j = outer;
1294  while(j>=0 && m_innerNonZeros[j]==0)
1295  m_outerIndex[j--] = p;
1296 
1297  // push back the new element
1298  ++m_innerNonZeros[outer];
1299  m_data.append(Scalar(0), inner);
1300 
1301  // check for reallocation
1302  if(data_end != m_data.allocatedSize())
1303  {
1304  // m_data has been reallocated
1305  // -> move remaining inner-vectors back to the end of the free-space
1306  // so that the entire free-space is allocated to the current inner-vector.
1307  eigen_internal_assert(data_end < m_data.allocatedSize());
1308  StorageIndex new_end = convert_index(m_data.allocatedSize());
1309  for(Index k=outer+1; k<=m_outerSize; ++k)
1310  if(m_outerIndex[k]==data_end)
1311  m_outerIndex[k] = new_end;
1312  }
1313  return m_data.value(p);
1314  }
1315 
1316  // Second case: the next inner-vector is packed to the end
1317  // and the current inner-vector end match the used-space.
1318  if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1319  {
1320  eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321 
1322  // add space for the new element
1323  ++m_innerNonZeros[outer];
1324  m_data.resize(m_data.size()+1);
1325 
1326  // check for reallocation
1327  if(data_end != m_data.allocatedSize())
1328  {
1329  // m_data has been reallocated
1330  // -> move remaining inner-vectors back to the end of the free-space
1331  // so that the entire free-space is allocated to the current inner-vector.
1332  eigen_internal_assert(data_end < m_data.allocatedSize());
1333  StorageIndex new_end = convert_index(m_data.allocatedSize());
1334  for(Index k=outer+1; k<=m_outerSize; ++k)
1335  if(m_outerIndex[k]==data_end)
1336  m_outerIndex[k] = new_end;
1337  }
1338 
1339  // and insert it at the right position (sorted insertion)
1340  Index startId = m_outerIndex[outer];
1341  Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342  while ( (p > startId) && (m_data.index(p-1) > inner) )
1343  {
1344  m_data.index(p) = m_data.index(p-1);
1345  m_data.value(p) = m_data.value(p-1);
1346  --p;
1347  }
1348 
1349  m_data.index(p) = convert_index(inner);
1350  return (m_data.value(p) = Scalar(0));
1351  }
1352 
1353  if(m_data.size() != m_data.allocatedSize())
1354  {
1355  // make sure the matrix is compatible to random un-compressed insertion:
1356  m_data.resize(m_data.allocatedSize());
1357  this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1358  }
1359 
1360  return insertUncompressed(row,col);
1361 }
1362 
1363 template<typename _Scalar, int _Options, typename _StorageIndex>
1365 {
1366  eigen_assert(!isCompressed());
1367 
1368  const Index outer = IsRowMajor ? row : col;
1369  const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1370 
1371  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1372  StorageIndex innerNNZ = m_innerNonZeros[outer];
1373  if(innerNNZ>=room)
1374  {
1375  // this inner vector is full, we need to reallocate the whole buffer :(
1376  reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377  }
1378 
1379  Index startId = m_outerIndex[outer];
1380  Index p = startId + m_innerNonZeros[outer];
1381  while ( (p > startId) && (m_data.index(p-1) > inner) )
1382  {
1383  m_data.index(p) = m_data.index(p-1);
1384  m_data.value(p) = m_data.value(p-1);
1385  --p;
1386  }
1387  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1388 
1389  m_innerNonZeros[outer]++;
1390 
1391  m_data.index(p) = inner;
1392  return (m_data.value(p) = Scalar(0));
1393 }
1394 
1395 template<typename _Scalar, int _Options, typename _StorageIndex>
1397 {
1398  eigen_assert(isCompressed());
1399 
1400  const Index outer = IsRowMajor ? row : col;
1401  const Index inner = IsRowMajor ? col : row;
1402 
1403  Index previousOuter = outer;
1404  if (m_outerIndex[outer+1]==0)
1405  {
1406  // we start a new inner vector
1407  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1408  {
1409  m_outerIndex[previousOuter] = convert_index(m_data.size());
1410  --previousOuter;
1411  }
1412  m_outerIndex[outer+1] = m_outerIndex[outer];
1413  }
1414 
1415  // here we have to handle the tricky case where the outerIndex array
1416  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1417  // the 2nd inner vector...
1418  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1419  && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1420 
1421  std::size_t startId = m_outerIndex[outer];
1422  // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1423  std::size_t p = m_outerIndex[outer+1];
1424  ++m_outerIndex[outer+1];
1425 
1426  double reallocRatio = 1;
1427  if (m_data.allocatedSize()<=m_data.size())
1428  {
1429  // if there is no preallocated memory, let's reserve a minimum of 32 elements
1430  if (m_data.size()==0)
1431  {
1432  m_data.reserve(32);
1433  }
1434  else
1435  {
1436  // we need to reallocate the data, to reduce multiple reallocations
1437  // we use a smart resize algorithm based on the current filling ratio
1438  // in addition, we use double to avoid integers overflows
1439  double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440  reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1441  // furthermore we bound the realloc ratio to:
1442  // 1) reduce multiple minor realloc when the matrix is almost filled
1443  // 2) avoid to allocate too much memory when the matrix is almost empty
1444  reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1445  }
1446  }
1447  m_data.resize(m_data.size()+1,reallocRatio);
1448 
1449  if (!isLastVec)
1450  {
1451  if (previousOuter==-1)
1452  {
1453  // oops wrong guess.
1454  // let's correct the outer offsets
1455  for (Index k=0; k<=(outer+1); ++k)
1456  m_outerIndex[k] = 0;
1457  Index k=outer+1;
1458  while(m_outerIndex[k]==0)
1459  m_outerIndex[k++] = 1;
1460  while (k<=m_outerSize && m_outerIndex[k]!=0)
1461  m_outerIndex[k++]++;
1462  p = 0;
1463  --k;
1464  k = m_outerIndex[k]-1;
1465  while (k>0)
1466  {
1467  m_data.index(k) = m_data.index(k-1);
1468  m_data.value(k) = m_data.value(k-1);
1469  k--;
1470  }
1471  }
1472  else
1473  {
1474  // we are not inserting into the last inner vec
1475  // update outer indices:
1476  Index j = outer+2;
1477  while (j<=m_outerSize && m_outerIndex[j]!=0)
1478  m_outerIndex[j++]++;
1479  --j;
1480  // shift data of last vecs:
1481  Index k = m_outerIndex[j]-1;
1482  while (k>=Index(p))
1483  {
1484  m_data.index(k) = m_data.index(k-1);
1485  m_data.value(k) = m_data.value(k-1);
1486  k--;
1487  }
1488  }
1489  }
1490 
1491  while ( (p > startId) && (m_data.index(p-1) > inner) )
1492  {
1493  m_data.index(p) = m_data.index(p-1);
1494  m_data.value(p) = m_data.value(p-1);
1495  --p;
1496  }
1497 
1498  m_data.index(p) = inner;
1499  return (m_data.value(p) = Scalar(0));
1500 }
1501 
1502 namespace internal {
1503 
1504 template<typename _Scalar, int _Options, typename _StorageIndex>
1505 struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1506  : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1507 {
1510  evaluator() : Base() {}
1511  explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1512 };
1513 
1514 }
1515 
1516 } // end namespace Eigen
1517 
1518 #endif // EIGEN_SPARSEMATRIX_H
Matrix3f m
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:728
Index cols() const
Definition: SparseMatrix.h:140
SCALAR Scalar
Definition: bench_gemm.cpp:46
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)
#define max(a, b)
Definition: datatypes.h:20
#define EIGEN_STRONG_INLINE
Definition: Macros.h:917
EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
A insert(1, 2)=0
StorageIndex * m_outerIndex
Definition: SparseMatrix.h:131
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:701
const unsigned int CompressedAccessBit
Definition: Constants.h:191
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:71
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:168
void assignDiagonal(const DiagXpr diagXpr, const Func &assignFunc)
Definition: SparseMatrix.h:927
A makeCompressed()
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:709
A versatible sparse matrix representation.
Definition: SparseMatrix.h:96
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:655
void moveChunk(Index from, Index to, Index chunkSize)
Index rows() const
Definition: SparseMatrix.h:138
#define min(a, b)
Definition: datatypes.h:19
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:718
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:94
EIGEN_DONT_INLINE Scalar & insertCompressed(Index row, Index col)
const int InnerRandomAccessPattern
Definition: SparseUtil.h:48
Base::ReverseInnerIterator ReverseInnerIterator
Definition: SparseMatrix.h:115
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:626
const unsigned int LvalueBit
Definition: Constants.h:144
int n
IndexPosPair(Index a_i, Index a_p)
Definition: SparseMatrix.h:908
const StorageIndex * indexPtr() const
Scalar & insertByOuterInner(Index j, Index i)
Definition: SparseMatrix.h:460
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:737
leaf::MyValues values
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
Namespace containing all symbols from the Eigen library.
Definition: jet.h:637
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:190
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
Definition: NumTraits.h:232
Index innerSize() const
Definition: SparseMatrix.h:143
SparseMatrix & operator=(const SparseMatrix &other)
Definition: SparseMatrix.h:759
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:127
evaluator< SparseCompressedBase< SparseMatrix< _Scalar, _Options, _StorageIndex > > > Base
EIGEN_DEVICE_FUNC IndexDest convert_index(const IndexSrc &idx)
Definition: XprHelper.h:31
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:39
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:181
const unsigned int RowMajorBit
Definition: Constants.h:66
void resizeNonZeros(Index size)
Definition: SparseMatrix.h:649
StorageIndex operator[](Index i) const
Definition: SparseMatrix.h:884
static double epsilon
Definition: testRot3.cpp:37
#define EIGEN_DONT_INLINE
Definition: Macros.h:940
SparseMatrix< _Scalar, _Options, _StorageIndex > SparseMatrixType
SparseMatrix< Scalar,(Flags &~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix
Definition: SparseMatrix.h:127
#define EIGEN_DBG_SPARSE(X)
Definition: SparseUtil.h:18
void collapseDuplicates(DupFunctor dup_func=DupFunctor())
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:510
#define EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_DEVICE_FUNC void smart_copy(const T *start, const T *end, T *target)
Definition: Memory.h:515
internal::traits< Derived >::StorageIndex StorageIndex
Diagonal< const SparseMatrix > ConstDiagonalReturnType
Definition: SparseMatrix.h:113
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
Definition: SparseUtil.h:43
EIGEN_STRONG_INLINE Scalar & insertBackUncompressed(Index row, Index col)
Definition: SparseMatrix.h:894
Scalar & insertBack(Index row, Index col)
Definition: SparseMatrix.h:386
Base class of any sparse matrices or sparse expressions.
internal::traits< Derived >::Scalar Scalar
m row(1)
void append(const Scalar &v, Index i)
v setLinSpaced(5, 0.5f, 1.5f)
void startVec(Index outer)
Definition: SparseMatrix.h:415
a sparse vector class
Definition: SparseUtil.h:54
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:681
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
Scalar & insert(Index row, Index col)
#define eigen_assert(x)
Definition: Macros.h:1037
Array< int, Dynamic, 1 > v
#define EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
v setOnes(3)
Base::InnerIterator InnerIterator
Definition: SparseMatrix.h:114
RealScalar s
EIGEN_DEVICE_FUNC void evalTo(Dest &dst) const
Definition: ReturnByValue.h:61
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:177
void resize(Index size, double reserveSizeFactor=0)
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
void reserve(const SizesType &reserveSizes, const typename SizesType::value_type &enableif=typename SizesType::value_type())
Definition: SparseMatrix.h:287
Base::IndexVector IndexVector
Definition: SparseMatrix.h:124
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:556
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:523
Reference counting helper.
Definition: object.h:67
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:172
Scalar & insertBackByOuterInnerUnordered(Index outer, Index inner)
Definition: SparseMatrix.h:405
void swap(GeographicLib::NearestNeighbor< dist_t, pos_t, distfun_t > &a, GeographicLib::NearestNeighbor< dist_t, pos_t, distfun_t > &b)
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:159
default_prunning_func(const Scalar &ref, const RealScalar &eps)
const unsigned int NestByRefBit
Definition: Constants.h:169
EIGEN_CONSTEXPR Index size(const T &x)
Definition: Meta.h:479
StorageIndex * m_innerNonZeros
Definition: SparseMatrix.h:132
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:208
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Base::ScalarVector ScalarVector
Definition: SparseMatrix.h:125
SparseCompressedBase< SparseMatrix > Base
Definition: SparseMatrix.h:99
static void check_template_parameters()
Derived & const_cast_derived() const
const Storage & data() const
Definition: SparseMatrix.h:186
SparseMatrix & operator=(const EigenBase< OtherDerived > &other)
Definition: SparseMatrix.h:786
Diagonal< SparseMatrix > DiagonalReturnType
Definition: SparseMatrix.h:112
#define EIGEN_COMP_MSVC
Definition: Macros.h:114
static EIGEN_DEPRECATED const end_t end
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:45
float * p
void reserve(Index reserveSize)
Definition: SparseMatrix.h:264
const Derived & derived() const
Index outerSize() const
Definition: SparseMatrix.h:145
m col(1)
EIGEN_DONT_INLINE Scalar & insertUncompressed(Index row, Index col)
void reserveInnerVectors(const SizesType &reserveSizes)
Definition: SparseMatrix.h:299
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:63
const int Dynamic
Definition: Constants.h:22
EIGEN_DEVICE_FUNC void throw_std_bad_alloc()
Definition: Memory.h:67
#define eigen_internal_assert(x)
Definition: Macros.h:1043
NumTraits< Scalar >::Real RealScalar
Common base class for sparse [compressed]-{row|column}-storage format.
internal::enable_if< internal::valid_indexed_view_overload< RowIndices, ColIndices >::value &&internal::traits< typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::ReturnAsIndexedView, typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::type operator()(const RowIndices &rowIndices, const ColIndices &colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
v resize(3)
EIGEN_DEVICE_FUNC Derived & derived()
Definition: EigenBase.h:46
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:661
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:163
const Scalar * valuePtr() const
Definition: SparseMatrix.h:150
void swap(CompressedStorage &other)
std::ostream & operator<<(std::ostream &s, const Packet16c &v)
Index searchLowerIndex(Index key) const
std::ptrdiff_t j
MappedSparseMatrix< Scalar, Flags > Map
Definition: SparseMatrix.h:111
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:672
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:1076
Definition: pytypes.h:1370
Scalar & insertBackByOuterInner(Index outer, Index inner)
Definition: SparseMatrix.h:393
void set_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
void swap(scoped_array< T > &a, scoped_array< T > &b)
Definition: Memory.h:709
internal::CompressedStorage< Scalar, StorageIndex > Storage
Definition: SparseMatrix.h:119
void initAssignment(const Other &other)
Definition: SparseMatrix.h:858
EIGEN_DEVICE_FUNC const Derived & derived() const
v setZero(3)


gtsam
Author(s):
autogenerated on Tue Jul 4 2023 02:36:07