10 #ifndef EIGEN_SPARSEMATRIX_H 11 #define EIGEN_SPARSEMATRIX_H 46 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
63 template<
typename _Scalar,
int _Options,
typename _StorageIndex,
int DiagIndex>
77 ColsAtCompileTime = 1,
79 MaxColsAtCompileTime = 1,
84 template<
typename _Scalar,
int _Options,
typename _StorageIndex,
int DiagIndex>
86 :
public traits<Diagonal<SparseMatrix<_Scalar, _Options, _StorageIndex>, DiagIndex> >
95 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
102 template<
typename,
typename,
typename,
typename,
typename>
105 using Base::isCompressed;
106 using Base::nonZeros;
108 using Base::operator+=;
109 using Base::operator-=;
118 using Base::IsRowMajor;
138 inline Index rows()
const {
return IsRowMajor ? m_outerSize : m_innerSize; }
140 inline Index cols()
const {
return IsRowMajor ? m_innerSize : m_outerSize; }
184 inline Storage&
data() {
return m_data; }
186 inline const Storage&
data()
const {
return m_data; }
196 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
215 Index start = m_outerIndex[outer];
216 Index end = m_innerNonZeros ? m_outerIndex[outer] + m_innerNonZeros[outer] : m_outerIndex[outer+1];
217 eigen_assert(end>=start &&
"you probably called coeffRef on a non finalized matrix");
221 if((p<end) && (m_data.
index(p)==inner))
222 return m_data.
value(p);
256 memset(m_outerIndex, 0, (m_outerSize+1)*
sizeof(
StorageIndex));
258 memset(m_innerNonZeros, 0, (m_outerSize)*
sizeof(
StorageIndex));
266 eigen_assert(isCompressed() &&
"This function does not make sense in non compressed mode.");
270 #ifdef EIGEN_PARSED_BY_DOXYGEN 283 template<
class SizesType>
284 inline void reserve(
const SizesType& reserveSizes);
286 template<
class SizesType>
287 inline void reserve(
const SizesType& reserveSizes,
const typename SizesType::value_type& enableif =
291 SizesType::value_type())
294 reserveInnerVectors(reserveSizes);
296 #endif // EIGEN_PARSED_BY_DOXYGEN 298 template<
class SizesType>
303 Index totalReserveSize = 0;
314 newOuterIndex[
j] = count;
315 count += reserveSizes[
j] + (m_outerIndex[
j+1]-m_outerIndex[
j]);
316 totalReserveSize += reserveSizes[
j];
318 m_data.
reserve(totalReserveSize);
319 StorageIndex previousOuterIndex = m_outerIndex[m_outerSize];
320 for(
Index j=m_outerSize-1;
j>=0; --
j)
322 StorageIndex innerNNZ = previousOuterIndex - m_outerIndex[
j];
325 m_data.
index(newOuterIndex[
j]+
i) = m_data.
index(m_outerIndex[
j]+
i);
326 m_data.
value(newOuterIndex[
j]+
i) = m_data.
value(m_outerIndex[
j]+
i);
328 previousOuterIndex = m_outerIndex[
j];
329 m_outerIndex[
j] = newOuterIndex[
j];
330 m_innerNonZeros[
j] = innerNNZ;
333 m_outerIndex[m_outerSize] = m_outerIndex[m_outerSize-1] + m_innerNonZeros[m_outerSize-1] + reserveSizes[m_outerSize-1];
335 m_data.
resize(m_outerIndex[m_outerSize]);
345 newOuterIndex[
j] = count;
346 StorageIndex alreadyReserved = (m_outerIndex[
j+1]-m_outerIndex[
j]) - m_innerNonZeros[
j];
347 StorageIndex toReserve = std::max<StorageIndex>(reserveSizes[
j], alreadyReserved);
348 count += toReserve + m_innerNonZeros[
j];
350 newOuterIndex[m_outerSize] = count;
353 for(
Index j=m_outerSize-1;
j>=0; --
j)
361 m_data.
index(newOuterIndex[
j]+
i) = m_data.
index(m_outerIndex[
j]+
i);
362 m_data.
value(newOuterIndex[
j]+
i) = m_data.
value(m_outerIndex[
j]+
i);
368 std::free(newOuterIndex);
388 return insertBackByOuterInner(IsRowMajor?row:col, IsRowMajor?col:row);
395 eigen_assert(
Index(m_outerIndex[outer+1]) == m_data.
size() &&
"Invalid ordered insertion (invalid outer index)");
396 eigen_assert( (m_outerIndex[outer+1]-m_outerIndex[outer]==0 || m_data.
index(m_data.
size()-1)<inner) &&
"Invalid ordered insertion (invalid inner index)");
397 Index p = m_outerIndex[outer+1];
398 ++m_outerIndex[outer+1];
400 return m_data.
value(p);
407 Index p = m_outerIndex[outer+1];
408 ++m_outerIndex[outer+1];
410 return m_data.
value(p);
417 eigen_assert(m_outerIndex[outer]==
Index(m_data.
size()) &&
"You must call startVec for each inner vector sequentially");
418 eigen_assert(m_outerIndex[outer+1]==0 &&
"You must call startVec for each inner vector sequentially");
419 m_outerIndex[outer+1] = m_outerIndex[outer];
432 while (i>=0 && m_outerIndex[i]==0)
435 while (i<=m_outerSize)
437 m_outerIndex[
i] =
size;
445 template<
typename InputIterators>
446 void setFromTriplets(
const InputIterators& begin,
const InputIterators&
end);
448 template<
typename InputIterators,
typename DupFunctor>
449 void setFromTriplets(
const InputIterators& begin,
const InputIterators& end, DupFunctor dup_func);
453 template<
typename DupFunctor>
454 void collapseDuplicates(DupFunctor dup_func = DupFunctor());
462 return insert(IsRowMajor ? j : i, IsRowMajor ? i : j);
474 Index oldStart = m_outerIndex[1];
475 m_outerIndex[1] = m_innerNonZeros[0];
478 Index nextOldStart = m_outerIndex[
j+1];
482 for(
Index k=0; k<m_innerNonZeros[
j]; ++k)
484 m_data.
index(m_outerIndex[
j]+k) = m_data.
index(oldStart+k);
485 m_data.
value(m_outerIndex[
j]+k) = m_data.
value(oldStart+k);
488 m_outerIndex[
j+1] = m_outerIndex[
j] + m_innerNonZeros[
j];
489 oldStart = nextOldStart;
491 std::free(m_innerNonZeros);
493 m_data.
resize(m_outerIndex[m_outerSize]);
500 if(m_innerNonZeros != 0)
503 for (
Index i = 0;
i < m_outerSize;
i++)
505 m_innerNonZeros[
i] = m_outerIndex[
i+1] - m_outerIndex[
i];
512 prune(default_prunning_func(reference,
epsilon));
522 template<
typename KeepFunc>
523 void prune(
const KeepFunc& keep = KeepFunc())
531 Index previousStart = m_outerIndex[
j];
533 Index end = m_outerIndex[
j+1];
544 m_outerIndex[m_outerSize] = k;
559 if (this->
rows() == rows && this->
cols() == cols)
return;
562 if(rows==0 || cols==0)
return resize(rows,cols);
564 Index innerChange = IsRowMajor ? cols - this->
cols() : rows - this->
rows();
565 Index outerChange = IsRowMajor ? rows - this->
rows() : cols - this->
cols();
574 m_innerNonZeros = newInnerNonZeros;
576 for(
Index i=m_outerSize;
i<m_outerSize+outerChange;
i++)
577 m_innerNonZeros[
i] = 0;
579 else if (innerChange < 0)
585 m_innerNonZeros[
i] = m_outerIndex[
i+1] - m_outerIndex[
i];
586 for(
Index i = m_outerSize;
i < m_outerSize + outerChange;
i++)
587 m_innerNonZeros[
i] = 0;
591 if (m_innerNonZeros && innerChange < 0)
597 while (n > 0 && m_data.
index(start+n-1) >= newInnerSize) --n;
601 m_innerSize = newInnerSize;
604 if (outerChange == 0)
609 m_outerIndex = newOuterIndex;
612 StorageIndex lastIdx = m_outerSize == 0 ? 0 : m_outerIndex[m_outerSize];
613 for(
Index i=m_outerSize;
i<m_outerSize+outerChange+1;
i++)
614 m_outerIndex[
i] = lastIdx;
616 m_outerSize += outerChange;
629 m_innerSize = IsRowMajor ?
cols :
rows;
631 if (m_outerSize != outerSize || m_outerSize==0)
633 std::free(m_outerIndex);
637 m_outerSize = outerSize;
641 std::free(m_innerNonZeros);
644 memset(m_outerIndex, 0, (m_outerSize+1)*
sizeof(
StorageIndex));
655 const ConstDiagonalReturnType
diagonal()
const {
return ConstDiagonalReturnType(*
this); }
661 DiagonalReturnType
diagonal() {
return DiagonalReturnType(*
this); }
665 : m_outerSize(-1), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
667 check_template_parameters();
673 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
675 check_template_parameters();
680 template<
typename OtherDerived>
682 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
685 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
686 check_template_parameters();
692 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN 700 template<
typename OtherDerived,
unsigned int UpLo>
702 : m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
704 check_template_parameters();
705 Base::operator=(other);
710 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
712 check_template_parameters();
717 template<
typename OtherDerived>
719 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
721 check_template_parameters();
722 initAssignment(other);
727 template<
typename OtherDerived>
729 : Base(), m_outerSize(0), m_innerSize(0), m_outerIndex(0), m_innerNonZeros(0)
731 check_template_parameters();
756 std::free(m_innerNonZeros);
765 else if(
this!=&other)
767 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN 770 initAssignment(other);
778 Base::operator=(other);
784 #ifndef EIGEN_PARSED_BY_DOXYGEN 785 template<
typename OtherDerived>
787 {
return Base::operator=(other.
derived()); }
789 template<
typename Lhs,
typename Rhs>
791 #endif // EIGEN_PARSED_BY_DOXYGEN 793 template<
typename OtherDerived>
799 s <<
"Nonzero entries:\n";
822 s <<
"Outer pointers:\n";
826 s <<
" $" << std::endl;
829 s <<
"Inner non zeros:\n";
833 s <<
" $" << std::endl;
837 s << static_cast<const SparseMatrixBase<SparseMatrix>&>(
m);
844 std::free(m_outerIndex);
845 std::free(m_innerNonZeros);
851 # ifdef EIGEN_SPARSEMATRIX_PLUGIN 852 # include EIGEN_SPARSEMATRIX_PLUGIN 857 template<
typename Other>
860 resize(other.rows(), other.cols());
863 std::free(m_innerNonZeros);
900 eigen_assert(m_innerNonZeros[outer]<=(m_outerIndex[outer+1] - m_outerIndex[outer]));
902 Index p = m_outerIndex[outer] + m_innerNonZeros[outer]++;
926 template<
typename DiagXpr,
typename Func>
934 if((this->
rows()!=n) || (this->
cols()!=n))
938 if(m_data.
size()==0 || overwrite)
942 this->resizeNonZeros(n);
951 bool isComp = isCompressed();
953 std::vector<IndexPosPair> newEntries;
963 assignFunc.assignCoeff(m_data.
value(p), diaEval.coeff(
i));
965 else if((!isComp) && m_innerNonZeros[
i] < (m_outerIndex[
i+1]-m_outerIndex[
i]))
968 m_data.
moveChunk(p, p+1, m_outerIndex[
i]+m_innerNonZeros[
i]-p);
969 m_innerNonZeros[
i]++;
972 assignFunc.assignCoeff(m_data.
value(p), diaEval.coeff(
i));
977 newEntries.push_back(IndexPosPair(
i,p));
984 Storage newData(m_data.
size()+n_entries);
987 for(
Index k=0; k<n_entries;++k)
989 Index i = newEntries[k].i;
990 Index p = newEntries[k].p;
994 m_outerIndex[
j+1] += k;
996 m_innerNonZeros[
i]++;
999 newData.value(p+k) =
Scalar(0);
1001 assignFunc.assignCoeff(newData.value(p+k), diaEval.coeff(i));
1006 for(
Index j=prev_i+1;
j<=m_outerSize;++
j)
1007 m_outerIndex[
j] += n_entries;
1009 m_data.
swap(newData);
1034 template<
typename InputIterator,
typename SparseMatrixType,
typename DupFunctor>
1035 void set_from_triplets(
const InputIterator& begin,
const InputIterator& end, SparseMatrixType&
mat, DupFunctor dup_func)
1037 enum { IsRowMajor = SparseMatrixType::IsRowMajor };
1039 typedef typename SparseMatrixType::StorageIndex StorageIndex;
1045 typename SparseMatrixType::IndexVector wi(trMat.outerSize());
1047 for(InputIterator it(begin); it!=
end; ++it)
1049 eigen_assert(it->row()>=0 && it->row()<mat.rows() && it->col()>=0 && it->col()<mat.cols());
1050 wi(IsRowMajor ? it->col() : it->row())++;
1055 for(InputIterator it(begin); it!=
end; ++it)
1056 trMat.insertBackUncompressed(it->row(),it->col()) = it->value();
1059 trMat.collapseDuplicates(dup_func);
1106 template<
typename Scalar,
int _Options,
typename _StorageIndex>
1107 template<
typename InputIterators>
1122 template<
typename Scalar,
int _Options,
typename _StorageIndex>
1123 template<
typename InputIterators,
typename DupFunctor>
1126 internal::set_from_triplets<InputIterators, SparseMatrix<Scalar,_Options,_StorageIndex>, DupFunctor>(begin,
end, *
this, dup_func);
1130 template<
typename Scalar,
int _Options,
typename _StorageIndex>
1131 template<
typename DupFunctor>
1136 IndexVector wi(innerSize());
1143 Index oldEnd = m_outerIndex[
j]+m_innerNonZeros[
j];
1144 for(
Index k=m_outerIndex[
j]; k<oldEnd; ++k)
1160 m_outerIndex[
j] = start;
1162 m_outerIndex[m_outerSize] = count;
1165 std::free(m_innerNonZeros);
1166 m_innerNonZeros = 0;
1167 m_data.
resize(m_outerIndex[m_outerSize]);
1170 template<
typename Scalar,
int _Options,
typename _StorageIndex>
1171 template<
typename OtherDerived>
1175 YOU_MIXED_DIFFERENT_NUMERIC_TYPES__YOU_NEED_TO_USE_THE_CAST_METHOD_OF_MATRIXBASE_TO_CAST_NUMERIC_TYPES_EXPLICITLY)
1177 #ifdef EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN 1182 if (needToTranspose)
1184 #ifdef EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN 1194 OtherCopy otherCopy(other.
derived());
1195 OtherCopyEval otherCopyEval(otherCopy);
1202 for (
Index j=0;
j<otherCopy.outerSize(); ++
j)
1203 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval,
j); it; ++it)
1204 ++dest.m_outerIndex[it.index()];
1208 IndexVector positions(dest.outerSize());
1209 for (
Index j=0;
j<dest.outerSize(); ++
j)
1212 dest.m_outerIndex[
j] = count;
1213 positions[
j] = count;
1216 dest.m_outerIndex[dest.outerSize()] = count;
1218 dest.m_data.resize(count);
1222 for (
typename OtherCopyEval::InnerIterator it(otherCopyEval,
j); it; ++it)
1224 Index pos = positions[it.index()]++;
1225 dest.m_data.index(pos) =
j;
1226 dest.m_data.value(pos) = it.value();
1236 initAssignment(other.
derived());
1239 return Base::operator=(other.
derived());
1243 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1257 m_data.
reserve(2*m_innerSize);
1263 memset(m_innerNonZeros, 0, (m_outerSize)*
sizeof(
StorageIndex));
1268 for(
Index j=1;
j<=m_outerSize; ++
j)
1269 m_outerIndex[
j] = end;
1277 m_innerNonZeros[
j] = m_outerIndex[
j+1]-m_outerIndex[
j];
1286 if(m_outerIndex[outer]==data_end)
1294 while(j>=0 && m_innerNonZeros[j]==0)
1295 m_outerIndex[j--] =
p;
1298 ++m_innerNonZeros[outer];
1309 for(
Index k=outer+1; k<=m_outerSize; ++k)
1310 if(m_outerIndex[k]==data_end)
1311 m_outerIndex[k] = new_end;
1313 return m_data.
value(p);
1318 if(m_outerIndex[outer+1]==data_end && m_outerIndex[outer]+m_innerNonZeros[outer]==m_data.
size())
1323 ++m_innerNonZeros[outer];
1334 for(
Index k=outer+1; k<=m_outerSize; ++k)
1335 if(m_outerIndex[k]==data_end)
1336 m_outerIndex[k] = new_end;
1340 Index startId = m_outerIndex[outer];
1341 Index p = m_outerIndex[outer]+m_innerNonZeros[outer]-1;
1342 while ( (p > startId) && (m_data.
index(p-1) > inner) )
1360 return insertUncompressed(row,col);
1363 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1371 Index room = m_outerIndex[outer+1] - m_outerIndex[outer];
1376 reserve(SingletonVector(outer,std::max<StorageIndex>(2,innerNNZ)));
1379 Index startId = m_outerIndex[outer];
1380 Index p = startId + m_innerNonZeros[outer];
1381 while ( (p > startId) && (m_data.
index(p-1) > inner) )
1387 eigen_assert((p<=startId || m_data.
index(p-1)!=inner) &&
"you cannot insert an element that already exists, you must call coeffRef to this end");
1389 m_innerNonZeros[outer]++;
1391 m_data.
index(p) = inner;
1395 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1403 Index previousOuter = outer;
1404 if (m_outerIndex[outer+1]==0)
1407 while (previousOuter>=0 && m_outerIndex[previousOuter]==0)
1412 m_outerIndex[outer+1] = m_outerIndex[outer];
1418 bool isLastVec = (!(previousOuter==-1 && m_data.
size()!=0))
1424 ++m_outerIndex[outer+1];
1426 double reallocRatio = 1;
1430 if (m_data.
size()==0)
1439 double nnzEstimate = double(m_outerIndex[outer])*double(m_outerSize)/double(outer+1);
1440 reallocRatio = (nnzEstimate-double(m_data.
size()))/double(m_data.
size());
1451 if (previousOuter==-1)
1455 for (
Index k=0; k<=(outer+1); ++k)
1456 m_outerIndex[k] = 0;
1458 while(m_outerIndex[k]==0)
1459 m_outerIndex[k++] = 1;
1460 while (k<=m_outerSize && m_outerIndex[k]!=0)
1461 m_outerIndex[k++]++;
1464 k = m_outerIndex[k]-1;
1477 while (j<=m_outerSize && m_outerIndex[j]!=0)
1478 m_outerIndex[j++]++;
1481 Index k = m_outerIndex[
j]-1;
1491 while ( (p > startId) && (m_data.
index(p-1) > inner) )
1498 m_data.
index(p) = inner;
1504 template<
typename _Scalar,
int _Options,
typename _StorageIndex>
1506 :
evaluator<SparseCompressedBase<SparseMatrix<_Scalar,_Options,_StorageIndex> > >
1518 #endif // EIGEN_SPARSEMATRIX_H
SparseMatrix(const DiagonalBase< OtherDerived > &other)
Copy constructor with in-place evaluation.
bool isCompressed() const
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias(Dst &dst, const Src &src, const Func &func)
#define EIGEN_STRONG_INLINE
EIGEN_DEVICE_FUNC bool isMuchSmallerThan(const Scalar &x, const OtherScalar &y, const typename NumTraits< Scalar >::Real &precision=NumTraits< Scalar >::dummy_precision())
StorageIndex & index(Index i)
StorageIndex * m_outerIndex
SparseMatrix(const SparseSelfAdjointView< OtherDerived, UpLo > &other)
const unsigned int CompressedAccessBit
Expression of the product of two arbitrary matrices or vectors.
remove_reference< MatrixTypeNested >::type _MatrixTypeNested
const StorageIndex * outerIndexPtr() const
void assignDiagonal(const DiagXpr diagXpr, const Func &assignFunc)
SparseMatrix(const SparseMatrix &other)
A versatible sparse matrix representation.
const ConstDiagonalReturnType diagonal() const
void moveChunk(Index from, Index to, Index chunkSize)
SparseMatrix< _Scalar, _Options, _StorageIndex > MatrixType
SparseMatrix(const ReturnByValue< OtherDerived > &other)
Copy constructor with in-place evaluation.
A matrix or vector expression mapping an existing array of data.
EIGEN_DONT_INLINE Scalar & insertCompressed(Index row, Index col)
const int InnerRandomAccessPattern
Base::ReverseInnerIterator ReverseInnerIterator
void resize(Index rows, Index cols)
const unsigned int LvalueBit
const Scalar * valuePtr() const
IndexPosPair(Index a_i, Index a_p)
const StorageIndex * indexPtr() const
Scalar & insertByOuterInner(Index j, Index i)
void swap(SparseMatrix &other)
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
Namespace containing all symbols from the Eigen library.
Scalar coeff(Index row, Index col) const
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
SparseMatrix & operator=(const SparseMatrix &other)
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
evaluator< SparseCompressedBase< SparseMatrix< _Scalar, _Options, _StorageIndex > > > Base
EIGEN_DEVICE_FUNC IndexDest convert_index(const IndexSrc &idx)
evaluator(const SparseMatrixType &mat)
Eigen::Index Index
The interface type of indices.
StorageIndex * innerNonZeroPtr()
const unsigned int RowMajorBit
void resizeNonZeros(Index size)
StorageIndex operator[](Index i) const
#define EIGEN_DONT_INLINE
SparseMatrix< _Scalar, _Options, _StorageIndex > SparseMatrixType
SparseMatrix< Scalar,(Flags &~RowMajorBit)|(IsRowMajor?RowMajorBit:0)> TransposedSparseMatrix
#define EIGEN_DBG_SPARSE(X)
void collapseDuplicates(DupFunctor dup_func=DupFunctor())
void prune(const Scalar &reference, const RealScalar &epsilon=NumTraits< RealScalar >::dummy_precision())
#define EIGEN_SPARSE_CREATE_TEMPORARY_PLUGIN
EIGEN_DEVICE_FUNC void smart_copy(const T *start, const T *end, T *target)
internal::traits< Derived >::StorageIndex StorageIndex
Diagonal< const SparseMatrix > ConstDiagonalReturnType
ref_selector< MatrixType >::type MatrixTypeNested
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
EIGEN_STRONG_INLINE Scalar & insertBackUncompressed(Index row, Index col)
Scalar & insertBack(Index row, Index col)
Base class of any sparse matrices or sparse expressions.
internal::traits< Derived >::Scalar Scalar
void append(const Scalar &v, Index i)
v setLinSpaced(5, 0.5f, 1.5f)
void startVec(Index outer)
SparseMatrix(const SparseMatrixBase< OtherDerived > &other)
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
_StorageIndex StorageIndex
Scalar & insert(Index row, Index col)
Array< int, Dynamic, 1 > v
#define EIGEN_SPARSE_TRANSPOSED_COPY_PLUGIN
Base::InnerIterator InnerIterator
EIGEN_DEVICE_FUNC void evalTo(Dest &dst) const
const StorageIndex * innerNonZeroPtr() const
void resize(Index size, double reserveSizeFactor=0)
Scalar atInRange(Index start, Index end, Index key, const Scalar &defaultValue=Scalar(0)) const
void reserve(const SizesType &reserveSizes, const typename SizesType::value_type &enableif=typename SizesType::value_type())
Base::IndexVector IndexVector
void conservativeResize(Index rows, Index cols)
void prune(const KeepFunc &keep=KeepFunc())
Reference counting helper.
StorageIndex * outerIndexPtr()
Scalar & insertBackByOuterInnerUnordered(Index outer, Index inner)
void swap(GeographicLib::NearestNeighbor< dist_t, pos_t, distfun_t > &a, GeographicLib::NearestNeighbor< dist_t, pos_t, distfun_t > &b)
const StorageIndex * innerIndexPtr() const
default_prunning_func(const Scalar &ref, const RealScalar &eps)
const unsigned int NestByRefBit
EIGEN_CONSTEXPR Index size(const T &x)
StorageIndex * m_innerNonZeros
Scalar & coeffRef(Index row, Index col)
void setFromTriplets(const InputIterators &begin, const InputIterators &end)
Base::ScalarVector ScalarVector
_StorageIndex StorageIndex
SparseCompressedBase< SparseMatrix > Base
static void check_template_parameters()
Derived & const_cast_derived() const
const Storage & data() const
SparseMatrix & operator=(const EigenBase< OtherDerived > &other)
Diagonal< SparseMatrix > DiagonalReturnType
static EIGEN_DEPRECATED const end_t end
SingletonVector(Index i, Index v)
General-purpose arrays with easy API for coefficient-wise operations.
void reserve(Index reserveSize)
const Derived & derived() const
EIGEN_DONT_INLINE Scalar & insertUncompressed(Index row, Index col)
void reserveInnerVectors(const SizesType &reserveSizes)
Expression of a diagonal/subdiagonal/superdiagonal in a matrix.
EIGEN_DEVICE_FUNC void throw_std_bad_alloc()
#define eigen_internal_assert(x)
NumTraits< Scalar >::Real RealScalar
Common base class for sparse [compressed]-{row|column}-storage format.
Index allocatedSize() const
internal::enable_if< internal::valid_indexed_view_overload< RowIndices, ColIndices >::value &&internal::traits< typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::ReturnAsIndexedView, typename EIGEN_INDEXED_VIEW_METHOD_TYPE< RowIndices, ColIndices >::type >::type operator()(const RowIndices &rowIndices, const ColIndices &colIndices) EIGEN_INDEXED_VIEW_METHOD_CONST
EIGEN_DEVICE_FUNC Derived & derived()
DiagonalReturnType diagonal()
StorageIndex * innerIndexPtr()
const Scalar * valuePtr() const
void swap(CompressedStorage &other)
std::ostream & operator<<(std::ostream &s, const Packet16c &v)
Index searchLowerIndex(Index key) const
MappedSparseMatrix< Scalar, Flags > Map
SparseMatrix(Index rows, Index cols)
#define EIGEN_UNUSED_VARIABLE(var)
Scalar & insertBackByOuterInner(Index outer, Index inner)
void set_from_triplets(const InputIterator &begin, const InputIterator &end, SparseMatrixType &mat, DupFunctor dup_func)
void swap(scoped_array< T > &a, scoped_array< T > &b)
internal::CompressedStorage< Scalar, StorageIndex > Storage
void initAssignment(const Other &other)
EIGEN_DEVICE_FUNC const Derived & derived() const