SparseMatrix.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSEMATRIX_H
11 #define EIGEN_SPARSEMATRIX_H
12 
13 namespace Eigen {
14 
45 namespace internal {
46 template<typename _Scalar, int _Options, typename _StorageIndex>
47 struct traits<SparseMatrix<_Scalar, _Options, _StorageIndex> >
48 {
49  typedef _Scalar Scalar;
50  typedef _StorageIndex StorageIndex;
52  typedef MatrixXpr XprKind;
53  enum {
54  RowsAtCompileTime = Dynamic,
55  ColsAtCompileTime = Dynamic,
56  MaxRowsAtCompileTime = Dynamic,
57  MaxColsAtCompileTime = Dynamic,
58  Flags = _Options | NestByRefBit | LvalueBit | CompressedAccessBit,
59  SupportedAccessPatterns = InnerRandomAccessPattern
60  };
61 };
62 
63 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
64 struct traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
65 {
69 
70  typedef _Scalar Scalar;
71  typedef Dense StorageKind;
72  typedef _StorageIndex StorageIndex;
73  typedef MatrixXpr XprKind;
74 
75  enum {
76  RowsAtCompileTime = Dynamic,
77  ColsAtCompileTime = 1,
78  MaxRowsAtCompileTime = Dynamic,
79  MaxColsAtCompileTime = 1,
80  Flags = LvalueBit
81  };
82 };
83 
84 template<typename _Scalar, int _Options, typename _StorageIndex, int DiagIndex>
85 struct traits<Diagonal<const SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
86  : public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
87 {
88  enum {
89  Flags = 0
90  };
91 };
92 
93 } // end namespace internal
94 
95 template<typename _Scalar, int _Options, typename _StorageIndex>
97  : public SparseCompressedBase<SparseMatrix<_Scalar, _Options, _StorageIndex> >
98 {
100  using Base::convert_index;
101  friend class SparseVector<_Scalar,0,_StorageIndex>;
102  template<typename, typename, typename, typename, typename>
103  friend struct internal::Assignment;
104  public:
105  using Base::isCompressed;
106  using Base::nonZeros;
108  using Base::operator+=;
109  using Base::operator-=;
110 
114  typedef typename Base::InnerIterator InnerIterator;
115  typedef typename Base::ReverseInnerIterator ReverseInnerIterator;
116 
117 
118  using Base::IsRowMajor;
120  enum {
121  Options = _Options
122  };
123 
124  typedef typename Base::IndexVector IndexVector;
126  protected:
128 
131  StorageIndex* m_outerIndex;
132  StorageIndex* m_innerNonZeros; // optional, if null then the data is compressed
134 
135  public:
136 
138  inline Index rows() const { return IsRowMajor ? m_outerSize : m_innerSize; }
140  inline Index cols() const { return IsRowMajor ? m_innerSize : m_outerSize; }
141 
143  inline Index innerSize() const { return m_innerSize; }
145  inline Index outerSize() const { return m_outerSize; }
146 
150  inline const Scalar* valuePtr() const { return m_data.valuePtr(); }
154  inline Scalar* valuePtr() { return m_data.valuePtr(); }
155 
159  inline const StorageIndex* innerIndexPtr() const { return m_data.indexPtr(); }
163  inline StorageIndex* innerIndexPtr() { return m_data.indexPtr(); }
164 
168  inline const StorageIndex* outerIndexPtr() const { return m_outerIndex; }
172  inline StorageIndex* outerIndexPtr() { return m_outerIndex; }
173 
177  inline const StorageIndex* innerNonZeroPtr() const { return m_innerNonZeros; }
181  inline StorageIndex* innerNonZeroPtr() { return m_innerNonZeros; }
182 
184  inline Storage& data() { return m_data; }
186  inline const Storage& data() const { return m_data; }
187 
190  inline Scalar coeff(Index row, Index col) const
191  {
192  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
193 
194  const Index outer = IsRowMajor ? row : col;
195  const Index inner = IsRowMajor ? col : row;
196  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
197  return m_data.atInRange(m_outerIndex[outer], end, StorageIndex(inner));
198  }
199 
209  {
210  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
211 
212  const Index outer = IsRowMajor ? row : col;
213  const Index inner = IsRowMajor ? col : row;
214 
215  Index start = m_outerIndex[outer];
216  Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217  eigen_assert(end>=start && "you probably called coeffRef on a non finalized matrix");
218  if(end<=start)
219  return insert(row,col);
220  const Index p = m_data.searchLowerIndex(start,end-1,StorageIndex(inner));
221  if((p<end) && (m_data.index(p)==inner))
222  return m_data.value(p);
223  else
224  return insert(row,col);
225  }
226 
243 
244  public:
245 
253  inline void setZero()
254  {
255  m_data.clear();
256  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
257  if(m_innerNonZeros)
258  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
259  }
260 
264  inline void reserve(Index reserveSize)
265  {
266  eigen_assert(isCompressed() && "This function does not make sense in non compressed mode.");
267  m_data.reserve(reserveSize);
268  }
269 
270  #ifdef EIGEN_PARSED_BY_DOXYGEN
271 
283  template<class SizesType>
284  inline void reserve(const SizesType& reserveSizes);
285  #else
286  template<class SizesType>
287  inline void reserve(const SizesType& reserveSizes, const typename SizesType::value_type& enableif =
288  #if (!EIGEN_COMP_MSVC) || (EIGEN_COMP_MSVC>=1500) // MSVC 2005 fails to compile with this typename
289  typename
290  #endif
291  SizesType::value_type())
292  {
293  EIGEN_UNUSED_VARIABLE(enableif);
294  reserveInnerVectors(reserveSizes);
295  }
296  #endif // EIGEN_PARSED_BY_DOXYGEN
297  protected:
298  template<class SizesType>
299  inline void reserveInnerVectors(const SizesType& reserveSizes)
300  {
301  if(isCompressed())
302  {
303  Index totalReserveSize = 0;
304  // turn the matrix into non-compressed mode
305  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
307 
308  // temporarily use m_innerSizes to hold the new starting points.
309  StorageIndex* newOuterIndex = m_innerNonZeros;
310 
311  StorageIndex count = 0;
312  for(Index j=0; j<m_outerSize; ++j)
313  {
314  newOuterIndex[j] = count;
315  count += reserveSizes[j] + (m_outerIndex[j+1]-m_outerIndex[j]);
316  totalReserveSize += reserveSizes[j];
317  }
318  m_data.reserve(totalReserveSize);
319  StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320  for(Index j=m_outerSize-1; j>=0; --j)
321  {
322  StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[j];
323  for(Index i=innerNNZ-1; i>=0; --i)
324  {
325  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
326  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
327  }
328  previousOuterIndex = m_outerIndex[j];
329  m_outerIndex[j] = newOuterIndex[j];
330  m_innerNonZeros[j] = innerNNZ;
331  }
332  if(m_outerSize>0)
334 
336  }
337  else
338  {
339  StorageIndex* newOuterIndex = static_cast<StorageIndex*>(std::malloc((m_outerSize+1)*sizeof(StorageIndex)));
340  if (!newOuterIndex) internal::throw_std_bad_alloc();
341 
342  StorageIndex count = 0;
343  for(Index j=0; j<m_outerSize; ++j)
344  {
345  newOuterIndex[j] = count;
346  StorageIndex alreadyReserved = (m_outerIndex[j+1]-m_outerIndex[j]) - m_innerNonZeros[j];
347  StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[j], alreadyReserved);
348  count += toReserve + m_innerNonZeros[j];
349  }
350  newOuterIndex[m_outerSize] = count;
351 
352  m_data.resize(count);
353  for(Index j=m_outerSize-1; j>=0; --j)
354  {
355  Index offset = newOuterIndex[j] - m_outerIndex[j];
356  if(offset>0)
357  {
358  StorageIndex innerNNZ = m_innerNonZeros[j];
359  for(Index i=innerNNZ-1; i>=0; --i)
360  {
361  m_data.index(newOuterIndex[j]+i) = m_data.index(m_outerIndex[j]+i);
362  m_data.value(newOuterIndex[j]+i) = m_data.value(m_outerIndex[j]+i);
363  }
364  }
365  }
366 
367  std::swap(m_outerIndex, newOuterIndex);
368  std::free(newOuterIndex);
369  }
370 
371  }
372  public:
373 
374  //--- low level purely coherent filling ---
375 
387  {
389  }
390 
393  inline Scalar& insertBackByOuterInner(Index outer, Index inner)
394  {
395  eigen_assert(Index(m_outerIndex[outer+1]) == m_data.size() && "Invalid ordered insertion (invalid outer index)");
396  eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.index(m_data.size()-1)<inner) && "Invalid ordered insertion (invalid inner index)");
397  Index p = m_outerIndex[outer+1];
398  ++m_outerIndex[outer+1];
399  m_data.append(Scalar(0), inner);
400  return m_data.value(p);
401  }
402 
406  {
407  Index p = m_outerIndex[outer+1];
408  ++m_outerIndex[outer+1];
409  m_data.append(Scalar(0), inner);
410  return m_data.value(p);
411  }
412 
415  inline void startVec(Index outer)
416  {
417  eigen_assert(m_outerIndex[outer]==Index(m_data.size()) && "You must call startVec for each inner vector sequentially");
418  eigen_assert(m_outerIndex[outer+1]==0 && "You must call startVec for each inner vector sequentially");
419  m_outerIndex[outer+1] = m_outerIndex[outer];
420  }
421 
425  inline void finalize()
426  {
427  if(isCompressed())
428  {
429  StorageIndex size = internal::convert_index<StorageIndex>(m_data.size());
430  Index i = m_outerSize;
431  // find the last filled column
432  while (i>=0 && m_outerIndex[i]==0)
433  --i;
434  ++i;
435  while (i<=m_outerSize)
436  {
437  m_outerIndex[i] = size;
438  ++i;
439  }
440  }
441  }
442 
443  //---
444 
445  template<typename InputIterators>
446  void setFromTriplets(const InputIterators& begin, const InputIterators& end);
447 
448  template<typename InputIterators,typename DupFunctor>
449  void setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func);
450 
452 
453  template<typename DupFunctor>
454  void collapseDuplicates(DupFunctor dup_func = DupFunctor());
455 
456  //---
457 
461  {
462  return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
463  }
464 
468  {
469  if(isCompressed())
470  return;
471 
473 
474  Index oldStart = m_outerIndex[1];
476  for(Index j=1; j<m_outerSize; ++j)
477  {
478  Index nextOldStart = m_outerIndex[j+1];
479  Index offset = oldStart - m_outerIndex[j];
480  if(offset>0)
481  {
482  for(Index k=0; k<m_innerNonZeros[j]; ++k)
483  {
484  m_data.index(m_outerIndex[j]+k) = m_data.index(oldStart+k);
485  m_data.value(m_outerIndex[j]+k) = m_data.value(oldStart+k);
486  }
487  }
489  oldStart = nextOldStart;
490  }
491  std::free(m_innerNonZeros);
492  m_innerNonZeros = 0;
494  m_data.squeeze();
495  }
496 
498  void uncompress()
499  {
500  if(m_innerNonZeros != 0)
501  return;
502  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
503  for (Index i = 0; i < m_outerSize; i++)
504  {
506  }
507  }
508 
511  {
512  prune(default_prunning_func(reference,epsilon));
513  }
514 
522  template<typename KeepFunc>
523  void prune(const KeepFunc& keep = KeepFunc())
524  {
525  // TODO optimize the uncompressed mode to avoid moving and allocating the data twice
526  makeCompressed();
527 
528  StorageIndex k = 0;
529  for(Index j=0; j<m_outerSize; ++j)
530  {
531  Index previousStart = m_outerIndex[j];
532  m_outerIndex[j] = k;
533  Index end = m_outerIndex[j+1];
534  for(Index i=previousStart; i<end; ++i)
535  {
537  {
538  m_data.value(k) = m_data.value(i);
539  m_data.index(k) = m_data.index(i);
540  ++k;
541  }
542  }
543  }
545  m_data.resize(k,0);
546  }
547 
557  {
558  // No change
559  if (this->rows() == rows && this->cols() == cols) return;
560 
561  // If one dimension is null, then there is nothing to be preserved
562  if(rows==0 || cols==0) return resize(rows,cols);
563 
564  Index innerChange = IsRowMajor ? cols - this->cols() : rows - this->rows();
565  Index outerChange = IsRowMajor ? rows - this->rows() : cols - this->cols();
566  StorageIndex newInnerSize = convert_index(IsRowMajor ? cols : rows);
567 
568  // Deals with inner non zeros
569  if (m_innerNonZeros)
570  {
571  // Resize m_innerNonZeros
572  StorageIndex *newInnerNonZeros = static_cast<StorageIndex*>(std::realloc(m_innerNonZeros, (m_outerSize + outerChange) * sizeof(StorageIndex)));
573  if (!newInnerNonZeros) internal::throw_std_bad_alloc();
574  m_innerNonZeros = newInnerNonZeros;
575 
576  for(Index i=m_outerSize; i<m_outerSize+outerChange; i++)
577  m_innerNonZeros[i] = 0;
578  }
579  else if (innerChange < 0)
580  {
581  // Inner size decreased: allocate a new m_innerNonZeros
582  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc((m_outerSize + outerChange) * sizeof(StorageIndex)));
584  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
586  for(Index i = m_outerSize; i < m_outerSize + outerChange; i++)
587  m_innerNonZeros[i] = 0;
588  }
589 
590  // Change the m_innerNonZeros in case of a decrease of inner size
591  if (m_innerNonZeros && innerChange < 0)
592  {
593  for(Index i = 0; i < m_outerSize + (std::min)(outerChange, Index(0)); i++)
594  {
595  StorageIndex &n = m_innerNonZeros[i];
596  StorageIndex start = m_outerIndex[i];
597  while (n > 0 && m_data.index(start+n-1) >= newInnerSize) --n;
598  }
599  }
600 
601  m_innerSize = newInnerSize;
602 
603  // Re-allocate outer index structure if necessary
604  if (outerChange == 0)
605  return;
606 
607  StorageIndex *newOuterIndex = static_cast<StorageIndex*>(std::realloc(m_outerIndex, (m_outerSize + outerChange + 1) * sizeof(StorageIndex)));
608  if (!newOuterIndex) internal::throw_std_bad_alloc();
609  m_outerIndex = newOuterIndex;
610  if (outerChange > 0)
611  {
612  StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613  for(Index i=m_outerSize; i<m_outerSize+outerChange+1; i++)
614  m_outerIndex[i] = lastIdx;
615  }
616  m_outerSize += outerChange;
617  }
618 
627  {
628  const Index outerSize = IsRowMajor ? rows : cols;
630  m_data.clear();
631  if (m_outerSize != outerSize || m_outerSize==0)
632  {
633  std::free(m_outerIndex);
634  m_outerIndex = static_cast<StorageIndex*>(std::malloc((outerSize + 1) * sizeof(StorageIndex)));
636 
638  }
639  if(m_innerNonZeros)
640  {
641  std::free(m_innerNonZeros);
642  m_innerNonZeros = 0;
643  }
644  memset(m_outerIndex, 0, (m_outerSize+1)*sizeof(StorageIndex));
645  }
646 
650  {
651  m_data.resize(size);
652  }
653 
656 
662 
664  inline SparseMatrix()
666  {
668  resize(0, 0);
669  }
670 
674  {
676  resize(rows, cols);
677  }
678 
680  template<typename OtherDerived>
683  {
685  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
687  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
688  if (needToTranspose)
689  *this = other.derived();
690  else
691  {
692  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
694  #endif
695  internal::call_assignment_no_alias(*this, other.derived());
696  }
697  }
698 
700  template<typename OtherDerived, unsigned int UpLo>
703  {
705  Base::operator=(other);
706  }
707 
711  {
713  *this = other.derived();
714  }
715 
717  template<typename OtherDerived>
720  {
723  other.evalTo(*this);
724  }
725 
727  template<typename OtherDerived>
730  {
732  *this = other.derived();
733  }
734 
737  inline void swap(SparseMatrix& other)
738  {
739  //EIGEN_DBG_SPARSE(std::cout << "SparseMatrix:: swap\n");
740  std::swap(m_outerIndex, other.m_outerIndex);
741  std::swap(m_innerSize, other.m_innerSize);
742  std::swap(m_outerSize, other.m_outerSize);
743  std::swap(m_innerNonZeros, other.m_innerNonZeros);
744  m_data.swap(other.m_data);
745  }
746 
749  inline void setIdentity()
750  {
751  eigen_assert(rows() == cols() && "ONLY FOR SQUARED MATRICES");
752  this->m_data.resize(rows());
753  Eigen::Map<IndexVector>(this->m_data.indexPtr(), rows()).setLinSpaced(0, StorageIndex(rows()-1));
754  Eigen::Map<ScalarVector>(this->m_data.valuePtr(), rows()).setOnes();
755  Eigen::Map<IndexVector>(this->m_outerIndex, rows()+1).setLinSpaced(0, StorageIndex(rows()));
756  std::free(m_innerNonZeros);
757  m_innerNonZeros = 0;
758  }
760  {
761  if (other.isRValue())
762  {
763  swap(other.const_cast_derived());
764  }
765  else if(this!=&other)
766  {
767  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
769  #endif
771  if(other.isCompressed())
772  {
773  internal::smart_copy(other.m_outerIndex, other.m_outerIndex + m_outerSize + 1, m_outerIndex);
774  m_data = other.m_data;
775  }
776  else
777  {
778  Base::operator=(other);
779  }
780  }
781  return *this;
782  }
783 
784 #ifndef EIGEN_PARSED_BY_DOXYGEN
785  template<typename OtherDerived>
787  { return Base::operator=(other.derived()); }
788 
789  template<typename Lhs, typename Rhs>
791 #endif // EIGEN_PARSED_BY_DOXYGEN
792 
793  template<typename OtherDerived>
795 
796  friend std::ostream & operator << (std::ostream & s, const SparseMatrix& m)
797  {
799  s << "Nonzero entries:\n";
800  if(m.isCompressed())
801  {
802  for (Index i=0; i<m.nonZeros(); ++i)
803  s << "(" << m.m_data.value(i) << "," << m.m_data.index(i) << ") ";
804  }
805  else
806  {
807  for (Index i=0; i<m.outerSize(); ++i)
808  {
809  Index p = m.m_outerIndex[i];
810  Index pe = m.m_outerIndex[i]+m.m_innerNonZeros[i];
811  Index k=p;
812  for (; k<pe; ++k) {
813  s << "(" << m.m_data.value(k) << "," << m.m_data.index(k) << ") ";
814  }
815  for (; k<m.m_outerIndex[i+1]; ++k) {
816  s << "(_,_) ";
817  }
818  }
819  }
820  s << std::endl;
821  s << std::endl;
822  s << "Outer pointers:\n";
823  for (Index i=0; i<m.outerSize(); ++i) {
824  s << m.m_outerIndex[i] << " ";
825  }
826  s << " $" << std::endl;
827  if(!m.isCompressed())
828  {
829  s << "Inner non zeros:\n";
830  for (Index i=0; i<m.outerSize(); ++i) {
831  s << m.m_innerNonZeros[i] << " ";
832  }
833  s << " $" << std::endl;
834  }
835  s << std::endl;
836  );
837  s << static_cast<const SparseMatrixBase<SparseMatrix>&>(m);
838  return s;
839  }
840 
842  inline ~SparseMatrix()
843  {
844  std::free(m_outerIndex);
845  std::free(m_innerNonZeros);
846  }
847 
849  Scalar sum() const;
850 
851 # ifdef EIGEN_SPARSEMATRIX_PLUGIN
852 # include EIGEN_SPARSEMATRIX_PLUGIN
853 # endif
854 
855 protected:
856 
857  template<typename Other>
858  void initAssignment(const Other& other)
859  {
860  resize(other.rows(), other.cols());
861  if(m_innerNonZeros)
862  {
863  std::free(m_innerNonZeros);
864  m_innerNonZeros = 0;
865  }
866  }
867 
870  EIGEN_DONT_INLINE Scalar& insertCompressed(Index row, Index col);
871 
875  {
876  StorageIndex m_index;
877  StorageIndex m_value;
878  public:
879  typedef StorageIndex value_type;
881  : m_index(convert_index(i)), m_value(convert_index(v))
882  {}
883 
884  StorageIndex operator[](Index i) const { return i==m_index ? m_value : 0; }
885  };
886 
889  EIGEN_DONT_INLINE Scalar& insertUncompressed(Index row, Index col);
890 
891 public:
895  {
896  const Index outer = IsRowMajor ? row : col;
897  const Index inner = IsRowMajor ? col : row;
898 
899  eigen_assert(!isCompressed());
900  eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
901 
902  Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
903  m_data.index(p) = convert_index(inner);
904  return (m_data.value(p) = Scalar(0));
905  }
906 protected:
907  struct IndexPosPair {
908  IndexPosPair(Index a_i, Index a_p) : i(a_i), p(a_p) {}
911  };
912 
926  template<typename DiagXpr, typename Func>
927  void assignDiagonal(const DiagXpr diagXpr, const Func& assignFunc)
928  {
929  Index n = diagXpr.size();
930 
932  if(overwrite)
933  {
934  if((this->rows()!=n) || (this->cols()!=n))
935  this->resize(n, n);
936  }
937 
938  if(m_data.size()==0 || overwrite)
939  {
940  typedef Array<StorageIndex,Dynamic,1> ArrayXI;
941  this->makeCompressed();
942  this->resizeNonZeros(n);
943  Eigen::Map<ArrayXI>(this->innerIndexPtr(), n).setLinSpaced(0,StorageIndex(n)-1);
944  Eigen::Map<ArrayXI>(this->outerIndexPtr(), n+1).setLinSpaced(0,StorageIndex(n));
945  Eigen::Map<Array<Scalar,Dynamic,1> > values = this->coeffs();
946  values.setZero();
947  internal::call_assignment_no_alias(values, diagXpr, assignFunc);
948  }
949  else
950  {
951  bool isComp = isCompressed();
952  internal::evaluator<DiagXpr> diaEval(diagXpr);
953  std::vector<IndexPosPair> newEntries;
954 
955  // 1 - try in-place update and record insertion failures
956  for(Index i = 0; i<n; ++i)
957  {
958  internal::LowerBoundIndex lb = this->lower_bound(i,i);
959  Index p = lb.value;
960  if(lb.found)
961  {
962  // the coeff already exists
963  assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
964  }
965  else if((!isComp) && m_innerNonZeros[i] < (m_outerIndex[i+1]-m_outerIndex[i]))
966  {
967  // non compressed mode with local room for inserting one element
968  m_data.moveChunk(p, p+1, m_outerIndex[i]+m_innerNonZeros[i]-p);
969  m_innerNonZeros[i]++;
970  m_data.value(p) = Scalar(0);
971  m_data.index(p) = StorageIndex(i);
972  assignFunc.assignCoeff(m_data.value(p), diaEval.coeff(i));
973  }
974  else
975  {
976  // defer insertion
977  newEntries.push_back(IndexPosPair(i,p));
978  }
979  }
980  // 2 - insert deferred entries
981  Index n_entries = Index(newEntries.size());
982  if(n_entries>0)
983  {
984  Storage newData(m_data.size()+n_entries);
985  Index prev_p = 0;
986  Index prev_i = 0;
987  for(Index k=0; k<n_entries;++k)
988  {
989  Index i = newEntries[k].i;
990  Index p = newEntries[k].p;
991  internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+p, newData.valuePtr()+prev_p+k);
992  internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+p, newData.indexPtr()+prev_p+k);
993  for(Index j=prev_i;j<i;++j)
994  m_outerIndex[j+1] += k;
995  if(!isComp)
996  m_innerNonZeros[i]++;
997  prev_p = p;
998  prev_i = i;
999  newData.value(p+k) = Scalar(0);
1000  newData.index(p+k) = StorageIndex(i);
1001  assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1002  }
1003  {
1004  internal::smart_copy(m_data.valuePtr()+prev_p, m_data.valuePtr()+m_data.size(), newData.valuePtr()+prev_p+n_entries);
1005  internal::smart_copy(m_data.indexPtr()+prev_p, m_data.indexPtr()+m_data.size(), newData.indexPtr()+prev_p+n_entries);
1006  for(Index j=prev_i+1;j<=m_outerSize;++j)
1007  m_outerIndex[j] += n_entries;
1008  }
1009  m_data.swap(newData);
1010  }
1011  }
1012  }
1013 
1014 private:
1016  {
1017  EIGEN_STATIC_ASSERT(NumTraits<StorageIndex>::IsSigned,THE_INDEX_TYPE_MUST_BE_A_SIGNED_TYPE);
1018  EIGEN_STATIC_ASSERT((Options&(ColMajor|RowMajor))==Options,INVALID_MATRIX_TEMPLATE_PARAMETERS);
1019  }
1020 
1023  inline bool operator() (const Index&, const Index&, const Scalar& value) const
1024  {
1026  }
1029  };
1030 };
1031 
1032 namespace internal {
1033 
1034 template<typename InputIterator, typename SparseMatrixType, typename DupFunctor>
1035 void set_from_triplets(const InputIterator& begin, const InputIterator& end, SparseMatrixType& mat, DupFunctor dup_func)
1036 {
1037  enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1038  typedef typename SparseMatrixType::Scalar Scalar;
1039  typedef typename SparseMatrixType::StorageIndex StorageIndex;
1041 
1042  if(begin!=end)
1043  {
1044  // pass 1: count the nnz per inner-vector
1045  typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1046  wi.setZero();
1047  for(InputIterator it(begin); it!=end; ++it)
1048  {
1049  eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050  wi(IsRowMajor ? it->col() : it->row())++;
1051  }
1052 
1053  // pass 2: insert all the elements into trMat
1054  trMat.reserve(wi);
1055  for(InputIterator it(begin); it!=end; ++it)
1056  trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1057 
1058  // pass 3:
1059  trMat.collapseDuplicates(dup_func);
1060  }
1061 
1062  // pass 4: transposed copy -> implicit sorting
1063  mat = trMat;
1064 }
1065 
1066 }
1067 
1068 
1106 template<typename Scalar, int _Options, typename _StorageIndex>
1107 template<typename InputIterators>
1108 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end)
1109 {
1110  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex> >(begin, end, *this, internal::scalar_sum_op<Scalar,Scalar>());
1111 }
1112 
1122 template<typename Scalar, int _Options, typename _StorageIndex>
1123 template<typename InputIterators,typename DupFunctor>
1124 void SparseMatrix<Scalar,_Options,_StorageIndex>::setFromTriplets(const InputIterators& begin, const InputIterators& end, DupFunctor dup_func)
1125 {
1126  internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin, end, *this, dup_func);
1127 }
1128 
1130 template<typename Scalar, int _Options, typename _StorageIndex>
1131 template<typename DupFunctor>
1133 {
1134  eigen_assert(!isCompressed());
1135  // TODO, in practice we should be able to use m_innerNonZeros for that task
1136  IndexVector wi(innerSize());
1137  wi.fill(-1);
1138  StorageIndex count = 0;
1139  // for each inner-vector, wi[inner_index] will hold the position of first element into the index/value buffers
1140  for(Index j=0; j<outerSize(); ++j)
1141  {
1142  StorageIndex start = count;
1143  Index oldEnd = m_outerIndex[j]+m_innerNonZeros[j];
1144  for(Index k=m_outerIndex[j]; k<oldEnd; ++k)
1145  {
1146  Index i = m_data.index(k);
1147  if(wi(i)>=start)
1148  {
1149  // we already meet this entry => accumulate it
1150  m_data.value(wi(i)) = dup_func(m_data.value(wi(i)), m_data.value(k));
1151  }
1152  else
1153  {
1154  m_data.value(count) = m_data.value(k);
1155  m_data.index(count) = m_data.index(k);
1156  wi(i) = count;
1157  ++count;
1158  }
1159  }
1160  m_outerIndex[j] = start;
1161  }
1162  m_outerIndex[m_outerSize] = count;
1163 
1164  // turn the matrix into compressed form
1165  std::free(m_innerNonZeros);
1166  m_innerNonZeros = 0;
1167  m_data.resize(m_outerIndex[m_outerSize]);
1168 }
1169 
1170 template<typename Scalar, int _Options, typename _StorageIndex>
1171 template<typename OtherDerived>
1173 {
1175  YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1176 
1177  #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
1179  #endif
1180 
1181  const bool needToTranspose = (Flags & RowMajorBit) != (internal::evaluator<OtherDerived>::Flags & RowMajorBit);
1182  if (needToTranspose)
1183  {
1184  #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
1186  #endif
1187  // two passes algorithm:
1188  // 1 - compute the number of coeffs per dest inner vector
1189  // 2 - do the actual copy/eval
1190  // Since each coeff of the rhs has to be evaluated twice, let's evaluate it if needed
1192  typedef typename internal::remove_all<OtherCopy>::type _OtherCopy;
1193  typedef internal::evaluator<_OtherCopy> OtherCopyEval;
1194  OtherCopy otherCopy(other.derived());
1195  OtherCopyEval otherCopyEval(otherCopy);
1196 
1197  SparseMatrix dest(other.rows(),other.cols());
1198  Eigen::Map<IndexVector> (dest.m_outerIndex,dest.outerSize()).setZero();
1199 
1200  // pass 1
1201  // FIXME the above copy could be merged with that pass
1202  for (Index j=0; j<otherCopy.outerSize(); ++j)
1203  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1204  ++dest.m_outerIndex[it.index()];
1205 
1206  // prefix sum
1207  StorageIndex count = 0;
1208  IndexVector positions(dest.outerSize());
1209  for (Index j=0; j<dest.outerSize(); ++j)
1210  {
1211  StorageIndex tmp = dest.m_outerIndex[j];
1212  dest.m_outerIndex[j] = count;
1213  positions[j] = count;
1214  count += tmp;
1215  }
1216  dest.m_outerIndex[dest.outerSize()] = count;
1217  // alloc
1218  dest.m_data.resize(count);
1219  // pass 2
1220  for (StorageIndex j=0; j<otherCopy.outerSize(); ++j)
1221  {
1222  for (typename OtherCopyEval::InnerIterator it(otherCopyEval, j); it; ++it)
1223  {
1224  Index pos = positions[it.index()]++;
1225  dest.m_data.index(pos) = j;
1226  dest.m_data.value(pos) = it.value();
1227  }
1228  }
1229  this->swap(dest);
1230  return *this;
1231  }
1232  else
1233  {
1234  if(other.isRValue())
1235  {
1236  initAssignment(other.derived());
1237  }
1238  // there is no special optimization
1239  return Base::operator=(other.derived());
1240  }
1241 }
1242 
1243 template<typename _Scalar, int _Options, typename _StorageIndex>
1245 {
1246  eigen_assert(row>=0 && row<rows() && col>=0 && col<cols());
1247 
1248  const Index outer = IsRowMajor ? row : col;
1249  const Index inner = IsRowMajor ? col : row;
1250 
1251  if(isCompressed())
1252  {
1253  if(nonZeros()==0)
1254  {
1255  // reserve space if not already done
1256  if(m_data.allocatedSize()==0)
1257  m_data.reserve(2*m_innerSize);
1258 
1259  // turn the matrix into non-compressed mode
1260  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1261  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1262 
1263  memset(m_innerNonZeros, 0, (m_outerSize)*sizeof(StorageIndex));
1264 
1265  // pack all inner-vectors to the end of the pre-allocated space
1266  // and allocate the entire free-space to the first inner-vector
1267  StorageIndex end = convert_index(m_data.allocatedSize());
1268  for(Index j=1; j<=m_outerSize; ++j)
1269  m_outerIndex[j] = end;
1270  }
1271  else
1272  {
1273  // turn the matrix into non-compressed mode
1274  m_innerNonZeros = static_cast<StorageIndex*>(std::malloc(m_outerSize * sizeof(StorageIndex)));
1275  if(!m_innerNonZeros) internal::throw_std_bad_alloc();
1276  for(Index j=0; j<m_outerSize; ++j)
1277  m_innerNonZeros[j] = m_outerIndex[j+1]-m_outerIndex[j];
1278  }
1279  }
1280 
1281  // check whether we can do a fast "push back" insertion
1282  Index data_end = m_data.allocatedSize();
1283 
1284  // First case: we are filling a new inner vector which is packed at the end.
1285  // We assume that all remaining inner-vectors are also empty and packed to the end.
1286  if(m_outerIndex[outer]==data_end)
1287  {
1288  eigen_internal_assert(m_innerNonZeros[outer]==0);
1289 
1290  // pack previous empty inner-vectors to end of the used-space
1291  // and allocate the entire free-space to the current inner-vector.
1292  StorageIndex p = convert_index(m_data.size());
1293  Index j = outer;
1294  while(j>=0 && m_innerNonZeros[j]==0)
1295  m_outerIndex[j--] = p;
1296 
1297  // push back the new element
1298  ++m_innerNonZeros[outer];
1299  m_data.append(Scalar(0), inner);
1300 
1301  // check for reallocation
1302  if(data_end != m_data.allocatedSize())
1303  {
1304  // m_data has been reallocated
1305  // -> move remaining inner-vectors back to the end of the free-space
1306  // so that the entire free-space is allocated to the current inner-vector.
1307  eigen_internal_assert(data_end < m_data.allocatedSize());
1308  StorageIndex new_end = convert_index(m_data.allocatedSize());
1309  for(Index k=outer+1; k<=m_outerSize; ++k)
1310  if(m_outerIndex[k]==data_end)
1311  m_outerIndex[k] = new_end;
1312  }
1313  return m_data.value(p);
1314  }
1315 
1316  // Second case: the next inner-vector is packed to the end
1317  // and the current inner-vector end match the used-space.
1318  if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.size())
1319  {
1320  eigen_internal_assert(outer+1==m_outerSize || m_innerNonZeros[outer+1]==0);
1321 
1322  // add space for the new element
1323  ++m_innerNonZeros[outer];
1324  m_data.resize(m_data.size()+1);
1325 
1326  // check for reallocation
1327  if(data_end != m_data.allocatedSize())
1328  {
1329  // m_data has been reallocated
1330  // -> move remaining inner-vectors back to the end of the free-space
1331  // so that the entire free-space is allocated to the current inner-vector.
1332  eigen_internal_assert(data_end < m_data.allocatedSize());
1333  StorageIndex new_end = convert_index(m_data.allocatedSize());
1334  for(Index k=outer+1; k<=m_outerSize; ++k)
1335  if(m_outerIndex[k]==data_end)
1336  m_outerIndex[k] = new_end;
1337  }
1338 
1339  // and insert it at the right position (sorted insertion)
1340  Index startId = m_outerIndex[outer];
1341  Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342  while ( (p > startId) && (m_data.index(p-1) > inner) )
1343  {
1344  m_data.index(p) = m_data.index(p-1);
1345  m_data.value(p) = m_data.value(p-1);
1346  --p;
1347  }
1348 
1349  m_data.index(p) = convert_index(inner);
1350  return (m_data.value(p) = Scalar(0));
1351  }
1352 
1353  if(m_data.size() != m_data.allocatedSize())
1354  {
1355  // make sure the matrix is compatible to random un-compressed insertion:
1356  m_data.resize(m_data.allocatedSize());
1357  this->reserveInnerVectors(Array<StorageIndex,Dynamic,1>::Constant(m_outerSize, 2));
1358  }
1359 
1360  return insertUncompressed(row,col);
1361 }
1362 
1363 template<typename _Scalar, int _Options, typename _StorageIndex>
1365 {
1366  eigen_assert(!isCompressed());
1367 
1368  const Index outer = IsRowMajor ? row : col;
1369  const StorageIndex inner = convert_index(IsRowMajor ? col : row);
1370 
1371  Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1372  StorageIndex innerNNZ = m_innerNonZeros[outer];
1373  if(innerNNZ>=room)
1374  {
1375  // this inner vector is full, we need to reallocate the whole buffer :(
1376  reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1377  }
1378 
1379  Index startId = m_outerIndex[outer];
1380  Index p = startId + m_innerNonZeros[outer];
1381  while ( (p > startId) && (m_data.index(p-1) > inner) )
1382  {
1383  m_data.index(p) = m_data.index(p-1);
1384  m_data.value(p) = m_data.value(p-1);
1385  --p;
1386  }
1387  eigen_assert((p<=startId || m_data.index(p-1)!=inner) && "you cannot insert an element that already exists, you must call coeffRef to this end");
1388 
1389  m_innerNonZeros[outer]++;
1390 
1391  m_data.index(p) = inner;
1392  return (m_data.value(p) = Scalar(0));
1393 }
1394 
1395 template<typename _Scalar, int _Options, typename _StorageIndex>
1397 {
1398  eigen_assert(isCompressed());
1399 
1400  const Index outer = IsRowMajor ? row : col;
1401  const Index inner = IsRowMajor ? col : row;
1402 
1403  Index previousOuter = outer;
1404  if (m_outerIndex[outer+1]==0)
1405  {
1406  // we start a new inner vector
1407  while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1408  {
1409  m_outerIndex[previousOuter] = convert_index(m_data.size());
1410  --previousOuter;
1411  }
1412  m_outerIndex[outer+1] = m_outerIndex[outer];
1413  }
1414 
1415  // here we have to handle the tricky case where the outerIndex array
1416  // starts with: [ 0 0 0 0 0 1 ...] and we are inserted in, e.g.,
1417  // the 2nd inner vector...
1418  bool isLastVec = (!(previousOuter==-1 && m_data.size()!=0))
1419  && (std::size_t(m_outerIndex[outer+1]) == m_data.size());
1420 
1421  std::size_t startId = m_outerIndex[outer];
1422  // FIXME let's make sure sizeof(long int) == sizeof(std::size_t)
1423  std::size_t p = m_outerIndex[outer+1];
1424  ++m_outerIndex[outer+1];
1425 
1426  double reallocRatio = 1;
1427  if (m_data.allocatedSize()<=m_data.size())
1428  {
1429  // if there is no preallocated memory, let's reserve a minimum of 32 elements
1430  if (m_data.size()==0)
1431  {
1432  m_data.reserve(32);
1433  }
1434  else
1435  {
1436  // we need to reallocate the data, to reduce multiple reallocations
1437  // we use a smart resize algorithm based on the current filling ratio
1438  // in addition, we use double to avoid integers overflows
1439  double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440  reallocRatio = (nnzEstimate-double(m_data.size()))/double(m_data.size());
1441  // furthermore we bound the realloc ratio to:
1442  // 1) reduce multiple minor realloc when the matrix is almost filled
1443  // 2) avoid to allocate too much memory when the matrix is almost empty
1444  reallocRatio = (std::min)((std::max)(reallocRatio,1.5),8.);
1445  }
1446  }
1447  m_data.resize(m_data.size()+1,reallocRatio);
1448 
1449  if (!isLastVec)
1450  {
1451  if (previousOuter==-1)
1452  {
1453  // oops wrong guess.
1454  // let's correct the outer offsets
1455  for (Index k=0; k<=(outer+1); ++k)
1456  m_outerIndex[k] = 0;
1457  Index k=outer+1;
1458  while(m_outerIndex[k]==0)
1459  m_outerIndex[k++] = 1;
1460  while (k<=m_outerSize && m_outerIndex[k]!=0)
1461  m_outerIndex[k++]++;
1462  p = 0;
1463  --k;
1464  k = m_outerIndex[k]-1;
1465  while (k>0)
1466  {
1467  m_data.index(k) = m_data.index(k-1);
1468  m_data.value(k) = m_data.value(k-1);
1469  k--;
1470  }
1471  }
1472  else
1473  {
1474  // we are not inserting into the last inner vec
1475  // update outer indices:
1476  Index j = outer+2;
1477  while (j<=m_outerSize && m_outerIndex[j]!=0)
1478  m_outerIndex[j++]++;
1479  --j;
1480  // shift data of last vecs:
1481  Index k = m_outerIndex[j]-1;
1482  while (k>=Index(p))
1483  {
1484  m_data.index(k) = m_data.index(k-1);
1485  m_data.value(k) = m_data.value(k-1);
1486  k--;
1487  }
1488  }
1489  }
1490 
1491  while ( (p > startId) && (m_data.index(p-1) > inner) )
1492  {
1493  m_data.index(p) = m_data.index(p-1);
1494  m_data.value(p) = m_data.value(p-1);
1495  --p;
1496  }
1497 
1498  m_data.index(p) = inner;
1499  return (m_data.value(p) = Scalar(0));
1500 }
1501 
1502 namespace internal {
1503 
1504 template<typename _Scalar, int _Options, typename _StorageIndex>
1505 struct evaluator<SparseMatrix<_Scalar,_Options,_StorageIndex> >
1506  : evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1507 {
1510  evaluator() : Base() {}
1511  explicit evaluator(const SparseMatrixType &mat) : Base(mat) {}
1512 };
1513 
1514 }
1515 
1516 } // end namespace Eigen
1517 
1518 #endif // EIGEN_SPARSEMATRIX_H
Eigen::SparseMatrix::resize
void resize(Index rows, Index cols)
Definition: SparseMatrix.h:626
Eigen::SparseMatrix::initAssignment
void initAssignment(const Other &other)
Definition: SparseMatrix.h:858
Eigen::internal::CompressedStorage::reserve
void reserve(Index size)
Definition: CompressedStorage.h:76
Eigen::SparseMatrix::cols
Index cols() const
Definition: SparseMatrix.h:140
Eigen::SparseMatrix::ConstDiagonalReturnType
Diagonal< const SparseMatrix > ConstDiagonalReturnType
Definition: SparseMatrix.h:113
Eigen::MatrixXpr
Definition: Constants.h:522
Eigen::SparseMatrix::assignDiagonal
void assignDiagonal(const DiagXpr diagXpr, const Func &assignFunc)
Definition: SparseMatrix.h:927
Eigen
Namespace containing all symbols from the Eigen library.
Definition: jet.h:637
Eigen::SparseMatrix
A versatible sparse matrix representation.
Definition: SparseMatrix.h:96
Eigen::internal::traits< SparseMatrix< _Scalar, _Options, _StorageIndex > >::StorageIndex
_StorageIndex StorageIndex
Definition: SparseMatrix.h:50
Eigen::ReturnByValue
Definition: ReturnByValue.h:50
Eigen::SparseMatrix::outerIndexPtr
StorageIndex * outerIndexPtr()
Definition: SparseMatrix.h:172
col
m col(1)
Eigen::SparseMatrix::m_outerIndex
StorageIndex * m_outerIndex
Definition: SparseMatrix.h:131
Eigen::SparseMatrix::operator=
SparseMatrix & operator=(const SparseMatrix &other)
Definition: SparseMatrix.h:759
Eigen::SparseMatrix::SingletonVector
Definition: SparseMatrix.h:874
Eigen::SparseMatrix::SingletonVector::value_type
StorageIndex value_type
Definition: SparseMatrix.h:879
resize
v resize(3)
s
RealScalar s
Definition: level1_cplx_impl.h:126
Eigen::SparseMatrix::default_prunning_func::reference
Scalar reference
Definition: SparseMatrix.h:1027
Eigen::Sparse
Definition: Constants.h:510
Eigen::SparseMatrix::prune
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
Definition: SparseMatrix.h:510
Eigen::internal::CompressedStorage::append
void append(const Scalar &v, Index i)
Definition: CompressedStorage.h:101
Eigen::internal::CompressedStorage::index
StorageIndex & index(Index i)
Definition: CompressedStorage.h:121
Eigen::SparseMatrix::operator<<
friend std::ostream & operator<<(std::ostream &s, const SparseMatrix &m)
Definition: SparseMatrix.h:796
Eigen::internal::traits< Diagonal< SparseMatrix< _Scalar, _Options, _StorageIndex >, DiagIndex > >::_MatrixTypeNested
remove_reference< MatrixTypeNested >::type _MatrixTypeNested
Definition: SparseMatrix.h:68
Eigen::SparseMatrix::SparseMatrix
SparseMatrix()
Definition: SparseMatrix.h:664
Eigen::EigenBase
Definition: EigenBase.h:29
Eigen::SparseMatrix::SingletonVector::m_index
StorageIndex m_index
Definition: SparseMatrix.h:876
EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
#define EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
Definition: sparse_permutations.cpp:12
Eigen::internal::CompressedStorage::swap
void swap(CompressedStorage &other)
Definition: CompressedStorage.h:62
Eigen::internal::traits< Diagonal< SparseMatrix< _Scalar, _Options, _StorageIndex >, DiagIndex > >::Scalar
_Scalar Scalar
Definition: SparseMatrix.h:70
Eigen::SparseMatrix::ScalarVector
Base::ScalarVector ScalarVector
Definition: SparseMatrix.h:125
eigen_assert
#define eigen_assert(x)
Definition: Macros.h:1037
Eigen::internal::CompressedStorage::moveChunk
void moveChunk(Index from, Index to, Index chunkSize)
Definition: CompressedStorage.h:210
Eigen::SparseMatrix::data
Storage & data()
Definition: SparseMatrix.h:184
Eigen::RowMajorBit
const unsigned int RowMajorBit
Definition: Constants.h:66
Eigen::internal::evaluator< SparseMatrix< _Scalar, _Options, _StorageIndex > >::Base
evaluator< SparseCompressedBase< SparseMatrix< _Scalar, _Options, _StorageIndex > > > Base
Definition: SparseMatrix.h:1508
Eigen::SparseCompressedBase::isCompressed
bool isCompressed() const
Definition: SparseCompressedBase.h:107
Eigen::SparseMatrix::uncompress
void uncompress()
Definition: SparseMatrix.h:498
Eigen::CompressedAccessBit
const unsigned int CompressedAccessBit
Definition: Constants.h:191
EIGEN_COMP_MSVC
#define EIGEN_COMP_MSVC
Definition: Macros.h:114
Eigen::SparseMatrix::IndexPosPair::p
Index p
Definition: SparseMatrix.h:910
Eigen::internal::CompressedStorage::allocatedSize
Index allocatedSize() const
Definition: CompressedStorage.h:110
Eigen::Array
General-purpose arrays with easy API for coefficient-wise operations.
Definition: Array.h:45
makeCompressed
A makeCompressed()
type
Definition: pytypes.h:1491
Eigen::internal::nested_eval
Definition: XprHelper.h:457
Eigen::RowMajor
@ RowMajor
Definition: Constants.h:321
Eigen::SparseMatrix::IndexPosPair::IndexPosPair
IndexPosPair(Index a_i, Index a_p)
Definition: SparseMatrix.h:908
Eigen::internal::LowerBoundIndex::found
bool found
Definition: SparseUtil.h:148
Eigen::internal::evaluator< SparseMatrix< _Scalar, _Options, _StorageIndex > >::evaluator
evaluator()
Definition: SparseMatrix.h:1510
mat
MatrixXf mat
Definition: Tutorial_AdvancedInitialization_CommaTemporary.cpp:1
Eigen::internal::call_assignment_no_alias
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)
Definition: AssignEvaluator.h:873
Eigen::internal::isMuchSmallerThan
EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
Definition: Eigen/src/Core/MathFunctions.h:1940
Eigen::SparseMatrix::reserveInnerVectors
void reserveInnerVectors(const SizesType &reserveSizes)
Definition: SparseMatrix.h:299
Eigen::SparseMatrix::startVec
void startVec(Index outer)
Definition: SparseMatrix.h:415
rows
int rows
Definition: Tutorial_commainit_02.cpp:1
eigen_internal_assert
#define eigen_internal_assert(x)
Definition: Macros.h:1043
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
Definition: SparseMatrix.h:701
size
Scalar Scalar int size
Definition: benchVecAdd.cpp:17
Eigen::SparseMatrix::IndexVector
Base::IndexVector IndexVector
Definition: SparseMatrix.h:124
n
int n
Definition: BiCGSTAB_simple.cpp:1
Eigen::SparseMatrix::Base
SparseCompressedBase< SparseMatrix > Base
Definition: SparseMatrix.h:99
epsilon
static double epsilon
Definition: testRot3.cpp:37
Eigen::internal::traits< Diagonal< SparseMatrix< _Scalar, _Options, _StorageIndex >, DiagIndex > >::XprKind
MatrixXpr XprKind
Definition: SparseMatrix.h:73
EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
#define EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
Definition: test/sparse_product.cpp:23
Eigen::SparseCompressedBase
Common base class for sparse [compressed]-{row|column}-storage format.
Definition: SparseCompressedBase.h:15
Eigen::SparseMatrix::diagonal
const ConstDiagonalReturnType diagonal() const
Definition: SparseMatrix.h:655
EIGEN_SPARSE_PUBLIC_INTERFACE
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
Definition: SparseUtil.h:43
Eigen::SparseMatrix::valuePtr
const Scalar * valuePtr() const
Definition: SparseMatrix.h:150
Eigen::internal::CompressedStorage::searchLowerIndex
Index searchLowerIndex(Index key) const
Definition: CompressedStorage.h:125
Eigen::SparseMatrix::Map
MappedSparseMatrix< Scalar, Flags > Map
Definition: SparseMatrix.h:111
EIGEN_UNUSED_VARIABLE
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:1076
Eigen::internal::CompressedStorage::size
Index size() const
Definition: CompressedStorage.h:109
Eigen::SparseMatrix::innerIndexPtr
const StorageIndex * innerIndexPtr() const
Definition: SparseMatrix.h:159
j
std::ptrdiff_t j
Definition: tut_arithmetic_redux_minmax.cpp:2
Eigen::internal::throw_std_bad_alloc
EIGEN_DEVICE_FUNC void throw_std_bad_alloc()
Definition: Memory.h:67
Eigen::SparseMatrix::sumupDuplicates
void sumupDuplicates()
Definition: SparseMatrix.h:451
Eigen::LvalueBit
const unsigned int LvalueBit
Definition: Constants.h:144
Eigen::internal::evaluator< SparseMatrix< _Scalar, _Options, _StorageIndex > >::SparseMatrixType
SparseMatrix< _Scalar, _Options, _StorageIndex > SparseMatrixType
Definition: SparseMatrix.h:1509
Eigen::internal::evaluator< SparseMatrix< _Scalar, _Options, _StorageIndex > >::evaluator
evaluator(const SparseMatrixType &mat)
Definition: SparseMatrix.h:1511
operator()
internal::enable_if< internal::valid_indexed_view_overload< RowIndices, ColIndices >::value &&internal::traits< typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::ReturnAsIndexedView, typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::type operator()(const RowIndices &rowIndices, const ColIndices &colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
Definition: IndexedViewMethods.h:73
Eigen::internal::LowerBoundIndex::value
Index value
Definition: SparseUtil.h:147
Eigen::Dynamic
const int Dynamic
Definition: Constants.h:22
Eigen::SparseMatrix::setIdentity
void setIdentity()
Definition: SparseMatrix.h:749
Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _StorageIndex > >::operator=
Derived & operator=(const Derived &other)
Eigen::internal::CompressedStorage::clear
void clear()
Definition: CompressedStorage.h:111
return_value_policy::reference
@ reference
Eigen::Diagonal
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
Definition: Diagonal.h:63
Eigen::internal::traits< Diagonal< SparseMatrix< _Scalar, _Options, _StorageIndex >, DiagIndex > >::MatrixType
SparseMatrix< _Scalar, _Options, _StorageIndex > MatrixType
Definition: SparseMatrix.h:66
Eigen::SparseMatrix::SingletonVector::SingletonVector
SingletonVector(Index i, Index v)
Definition: SparseMatrix.h:880
EIGEN_STRONG_INLINE
#define EIGEN_STRONG_INLINE
Definition: Macros.h:917
Eigen::internal::CompressedStorage::indexPtr
const StorageIndex * indexPtr() const
Definition: CompressedStorage.h:115
std::swap
void swap(GeographicLib::NearestNeighbor< dist_t, pos_t, distfun_t > &a, GeographicLib::NearestNeighbor< dist_t, pos_t, distfun_t > &b)
Definition: NearestNeighbor.hpp:827
Eigen::SparseMatrix::m_data
Storage m_data
Definition: SparseMatrix.h:133
Eigen::SparseMatrix::outerSize
Index outerSize() const
Definition: SparseMatrix.h:145
Eigen::Product
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:71
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const SparseMatrix &other)
Definition: SparseMatrix.h:709
Eigen::SparseMatrix::~SparseMatrix
~SparseMatrix()
Definition: SparseMatrix.h:842
m
Matrix3f m
Definition: AngleAxis_mimic_euler.cpp:1
Eigen::SparseMatrix::IndexPosPair
Definition: SparseMatrix.h:907
Eigen::SparseMatrix::swap
void swap(SparseMatrix &other)
Definition: SparseMatrix.h:737
Eigen::Triplet< double >
Eigen::SparseMatrix::innerNonZeroPtr
const StorageIndex * innerNonZeroPtr() const
Definition: SparseMatrix.h:177
Eigen::internal::evaluator
Definition: CoreEvaluators.h:90
Eigen::SparseMatrix::insertBackUncompressed
EIGEN_STRONG_INLINE Scalar & insertBackUncompressed(Index row, Index col)
Definition: SparseMatrix.h:894
Eigen::SparseMatrix::coeff
Scalar coeff(Index row, Index col) const
Definition: SparseMatrix.h:190
Eigen::Map
A matrix or vector expression mapping an existing array of data.
Definition: Map.h:94
size_t
std::size_t size_t
Definition: wrap/pybind11/include/pybind11/detail/common.h:476
Eigen::internal::ref_selector
Definition: XprHelper.h:416
Eigen::internal::traits< Diagonal< SparseMatrix< _Scalar, _Options, _StorageIndex >, DiagIndex > >::StorageIndex
_StorageIndex StorageIndex
Definition: SparseMatrix.h:72
Eigen::SparseMatrix::insertBackByOuterInner
Scalar & insertBackByOuterInner(Index outer, Index inner)
Definition: SparseMatrix.h:393
Eigen::internal::convert_index
EIGEN_DEVICE_FUNC IndexDest convert_index(const IndexSrc &idx)
Definition: XprHelper.h:31
Eigen::SparseMatrix::reserve
void reserve(Index reserveSize)
Definition: SparseMatrix.h:264
Eigen::internal::CompressedStorage::value
Scalar & value(Index i)
Definition: CompressedStorage.h:118
offset
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
Definition: gnuplot_common_settings.hh:64
Eigen::internal::CompressedStorage::resize
void resize(Index size, double reserveSizeFactor=0)
Definition: CompressedStorage.h:89
Eigen::internal::traits< SparseMatrix< _Scalar, _Options, _StorageIndex > >::StorageKind
Sparse StorageKind
Definition: SparseMatrix.h:51
Eigen::internal::CompressedStorage::valuePtr
const Scalar * valuePtr() const
Definition: CompressedStorage.h:113
Eigen::SparseMatrix::insertBackByOuterInnerUnordered
Scalar & insertBackByOuterInnerUnordered(Index outer, Index inner)
Definition: SparseMatrix.h:405
Eigen::SparseMatrix::outerIndexPtr
const StorageIndex * outerIndexPtr() const
Definition: SparseMatrix.h:168
Eigen::SparseMatrix::default_prunning_func
Definition: SparseMatrix.h:1021
Eigen::SparseMatrix::check_template_parameters
static void check_template_parameters()
Definition: SparseMatrix.h:1015
RealScalar
NumTraits< Scalar >::Real RealScalar
Definition: bench_gemm.cpp:47
Eigen::internal::CompressedStorage::atInRange
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
Definition: CompressedStorage.h:159
Eigen::SparseMatrix::innerNonZeroPtr
StorageIndex * innerNonZeroPtr()
Definition: SparseMatrix.h:181
Eigen::SparseMatrix::Options
@ Options
Definition: SparseMatrix.h:121
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseMatrix.h:681
Eigen::SparseMatrix::data
const Storage & data() const
Definition: SparseMatrix.h:186
Eigen::internal::traits< SparseMatrix< _Scalar, _Options, _StorageIndex > >::Scalar
_Scalar Scalar
Definition: SparseMatrix.h:49
Eigen::SparseMatrix::DiagonalReturnType
Diagonal< SparseMatrix > DiagonalReturnType
Definition: SparseMatrix.h:112
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(Index rows, Index cols)
Definition: SparseMatrix.h:672
Eigen::internal::traits
Definition: ForwardDeclarations.h:17
Eigen::SparseMatrix::InnerIterator
Base::InnerIterator InnerIterator
Definition: SparseMatrix.h:114
EIGEN_STATIC_ASSERT
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:127
Eigen::SparseMatrix::default_prunning_func::default_prunning_func
default_prunning_func(const Scalar &ref, const RealScalar &eps)
Definition: SparseMatrix.h:1022
Eigen::SparseMatrix::insert
Scalar & insert(Index row, Index col)
Definition: SparseMatrix.h:1244
EIGEN_DBG_SPARSE
#define EIGEN_DBG_SPARSE(X)
Definition: SparseUtil.h:18
leaf::values
leaf::MyValues values
Eigen::internal::traits< SparseMatrix< _Scalar, _Options, _StorageIndex > >::XprKind
MatrixXpr XprKind
Definition: SparseMatrix.h:52
Eigen::SparseMatrix::collapseDuplicates
void collapseDuplicates(DupFunctor dup_func=DupFunctor())
Definition: SparseMatrix.h:1132
row
m row(1)
Eigen::SparseVector
a sparse vector class
Definition: SparseUtil.h:54
Eigen::SparseMatrix::valuePtr
Scalar * valuePtr()
Definition: SparseMatrix.h:154
Eigen::SparseMatrix::m_outerSize
Index m_outerSize
Definition: SparseMatrix.h:129
p
float * p
Definition: Tutorial_Map_using.cpp:9
Eigen::SparseMatrix::IndexPosPair::i
Index i
Definition: SparseMatrix.h:909
Eigen::SparseMatrix::setFromTriplets
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Definition: SparseMatrix.h:1108
Eigen::SparseMatrix::SingletonVector::operator[]
StorageIndex operator[](Index i) const
Definition: SparseMatrix.h:884
Eigen::SparseMatrix::operator=
SparseMatrix & operator=(const EigenBase< OtherDerived > &other)
Definition: SparseMatrix.h:786
ref
Reference counting helper.
Definition: object.h:67
Eigen::internal::traits< Diagonal< SparseMatrix< _Scalar, _Options, _StorageIndex >, DiagIndex > >::MatrixTypeNested
ref_selector< MatrixType >::type MatrixTypeNested
Definition: SparseMatrix.h:67
Eigen::internal::Assignment
Definition: AssignEvaluator.h:824
Eigen::internal::LowerBoundIndex
Definition: SparseUtil.h:144
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:718
v
Array< int, Dynamic, 1 > v
Definition: Array_initializer_list_vector_cxx11.cpp:1
Eigen::SparseMatrix::default_prunning_func::epsilon
RealScalar epsilon
Definition: SparseMatrix.h:1028
Eigen::SparseMatrixBase
Base class of any sparse matrices or sparse expressions.
Definition: ForwardDeclarations.h:301
Eigen::SparseMatrix::setZero
void setZero()
Definition: SparseMatrix.h:253
Eigen::SparseMatrix::insertBack
Scalar & insertBack(Index row, Index col)
Definition: SparseMatrix.h:386
Eigen::internal::CompressedStorage< Scalar, StorageIndex >
Eigen::SparseMatrix::innerSize
Index innerSize() const
Definition: SparseMatrix.h:143
min
#define min(a, b)
Definition: datatypes.h:19
Eigen::internal::traits< Diagonal< SparseMatrix< _Scalar, _Options, _StorageIndex >, DiagIndex > >::StorageKind
Dense StorageKind
Definition: SparseMatrix.h:71
Eigen::SparseMatrix::reserve
void reserve(const SizesType &reserveSizes, const typename SizesType::value_type &enableif=typename SizesType::value_type())
Definition: SparseMatrix.h:287
Eigen::SparseMatrix::conservativeResize
void conservativeResize(Index rows, Index cols)
Definition: SparseMatrix.h:556
Eigen::internal::is_same
Definition: Meta.h:148
Eigen::SparseSelfAdjointView
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Definition: SparseSelfAdjointView.h:43
Eigen::Matrix< StorageIndex, Dynamic, 1 >
Eigen::SparseMatrix::insertByOuterInner
Scalar & insertByOuterInner(Index j, Index i)
Definition: SparseMatrix.h:460
Eigen::internal::scalar_sum_op
Definition: BinaryFunctors.h:32
internal
Definition: BandTriangularSolver.h:13
Eigen::SparseMatrix::m_innerNonZeros
StorageIndex * m_innerNonZeros
Definition: SparseMatrix.h:132
Eigen::internal::smart_copy
EIGEN_DEVICE_FUNC void smart_copy(const T *start, const T *end, T *target)
Definition: Memory.h:515
Eigen::ColMajor
@ ColMajor
Definition: Constants.h:319
Eigen::SparseMatrix::prune
void prune(const KeepFunc &keep=KeepFunc())
Definition: SparseMatrix.h:523
cols
int cols
Definition: Tutorial_commainit_02.cpp:1
Eigen::InnerRandomAccessPattern
const int InnerRandomAccessPattern
Definition: SparseUtil.h:48
Eigen::placeholders::end
static const EIGEN_DEPRECATED end_t end
Definition: IndexedViewHelper.h:181
pos
Definition: example-NearestNeighbor.cpp:32
Eigen::SparseMatrix::rows
Index rows() const
Definition: SparseMatrix.h:138
Eigen::SparseMatrix::coeffRef
Scalar & coeffRef(Index row, Index col)
Definition: SparseMatrix.h:208
Eigen::SparseMatrix::diagonal
DiagonalReturnType diagonal()
Definition: SparseMatrix.h:661
max
#define max(a, b)
Definition: datatypes.h:20
Eigen::SparseMatrix::makeCompressed
void makeCompressed()
Definition: SparseMatrix.h:467
insert
A insert(1, 2)=0
swap
int EIGEN_BLAS_FUNC() swap(int *n, RealScalar *px, int *incx, RealScalar *py, int *incy)
Definition: level1_impl.h:130
Eigen::SparseMatrix::finalize
void finalize()
Definition: SparseMatrix.h:425
Eigen::NestByRefBit
const unsigned int NestByRefBit
Definition: Constants.h:169
Eigen::SparseMatrix::innerIndexPtr
StorageIndex * innerIndexPtr()
Definition: SparseMatrix.h:163
Eigen::DiagonalBase
Definition: DiagonalMatrix.h:18
Eigen::NumTraits
Holds information about the various numeric (i.e. scalar) types allowed by Eigen.
Definition: NumTraits.h:232
Eigen::SparseCompressedBase< SparseMatrix< _Scalar, _Options, _StorageIndex > >::IsRowMajor
@ IsRowMajor
Definition: SparseMatrixBase.h:100
test_callbacks.value
value
Definition: test_callbacks.py:158
Eigen::SparseCompressedBase::nonZeros
Index nonZeros() const
Definition: SparseCompressedBase.h:56
Eigen::SparseMatrix::SparseMatrix
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
Definition: SparseMatrix.h:728
Eigen::SparseMatrix::m_innerSize
Index m_innerSize
Definition: SparseMatrix.h:130
i
int i
Definition: BiCGSTAB_step_by_step.cpp:9
Eigen::SparseMatrix::resizeNonZeros
void resizeNonZeros(Index size)
Definition: SparseMatrix.h:649
pybind_wrapper_test_script.other
other
Definition: pybind_wrapper_test_script.py:42
Eigen::internal::CompressedStorage::squeeze
void squeeze()
Definition: CompressedStorage.h:83
Eigen::MappedSparseMatrix
Sparse matrix.
Definition: MappedSparseMatrix.h:32
Eigen::SparseMatrix::ReverseInnerIterator
Base::ReverseInnerIterator ReverseInnerIterator
Definition: SparseMatrix.h:115
Eigen::SparseMatrix::Storage
internal::CompressedStorage< Scalar, StorageIndex > Storage
Definition: SparseMatrix.h:119
Eigen::SparseMatrix::SingletonVector::m_value
StorageIndex m_value
Definition: SparseMatrix.h:877
Scalar
SCALAR Scalar
Definition: bench_gemm.cpp:46
Eigen::Index
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
Eigen::internal::set_from_triplets
void set_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
Definition: SparseMatrix.h:1035
EIGEN_DONT_INLINE
#define EIGEN_DONT_INLINE
Definition: Macros.h:940
Eigen::Dense
Definition: Constants.h:507
Eigen::SparseMatrix::TransposedSparseMatrix
SparseMatrix< Scalar,(Flags &~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix
Definition: SparseMatrix.h:127


gtsam
Author(s):
autogenerated on Thu Jun 13 2024 03:06:04