10 #ifndef EIGEN_SPARSE_BLOCK_H 11 #define EIGEN_SPARSE_BLOCK_H 16 template<
typename XprType,
int BlockRows,
int BlockCols>
25 enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
32 : m_matrix(xpr), m_outerStart(
convert_index(
i)), m_outerSize(OuterSize)
36 : m_matrix(xpr), m_outerStart(
convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(
convert_index(IsRowMajor ? blockRows : blockCols))
45 EvaluatorType matEval(m_matrix);
47 Index end = m_outerStart + m_outerSize.value();
49 for(
typename EvaluatorType::InnerIterator it(matEval,
j); it; ++it)
56 return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
61 return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
68 Index blockRows()
const {
return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
69 Index blockCols()
const {
return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
95 template<
typename SparseMatrixType,
int BlockRows,
int BlockCols>
108 enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
112 : m_matrix(xpr), m_outerStart(
convert_index(i)), m_outerSize(OuterSize)
116 : m_matrix(xpr), m_outerStart(
convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(
convert_index(IsRowMajor ? blockRows : blockCols))
119 template<
typename OtherDerived>
123 _NestedMatrixType&
matrix = m_matrix;
132 Index nnz = tmp.nonZeros();
133 Index start = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart];
134 Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()];
135 Index block_size = end - start;
136 Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] -
end;
138 Index free_size = m_matrix.isCompressed()
139 ?
Index(matrix.data().allocatedSize()) + block_size
142 Index tmp_start = tmp.outerIndexPtr()[0];
144 bool update_trailing_pointers =
false;
148 typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
151 internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr());
153 internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start);
154 internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start);
157 internal::smart_copy(matrix.innerIndexPtr()+
end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
159 newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
161 matrix.data().swap(newdata);
163 update_trailing_pointers =
true;
167 if(m_matrix.isCompressed())
170 matrix.data().resize(start + nnz + tail_size);
173 internal::smart_memmove(matrix.innerIndexPtr()+
end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
175 update_trailing_pointers =
true;
178 internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start);
179 internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start);
183 if(IsVectorAtCompileTime)
185 if(!m_matrix.isCompressed())
186 matrix.innerNonZeroPtr()[m_outerStart] =
StorageIndex(nnz);
187 matrix.outerIndexPtr()[m_outerStart] =
StorageIndex(start);
192 for(
Index k=0; k<m_outerSize.value(); ++k)
194 StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());
195 if(!m_matrix.isCompressed())
196 matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;
197 matrix.outerIndexPtr()[m_outerStart+k] =
p;
202 if(update_trailing_pointers)
205 for(
Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
207 matrix.outerIndexPtr()[k] +=
offset;
216 return operator=<BlockType>(other);
220 {
return m_matrix.valuePtr(); }
222 {
return m_matrix.valuePtr(); }
225 {
return m_matrix.innerIndexPtr(); }
227 {
return m_matrix.innerIndexPtr(); }
230 {
return m_matrix.outerIndexPtr() + m_outerStart; }
232 {
return m_matrix.outerIndexPtr() + m_outerStart; }
235 {
return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
237 {
return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
243 return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
248 return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
253 return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
260 if(m_matrix.isCompressed())
261 return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
263 return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
273 Index blockRows()
const {
return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
274 Index blockCols()
const {
return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
286 template<
typename _Scalar,
int _Options,
typename _StorageIndex,
int BlockRows,
int BlockCols>
299 : Base(xpr, startRow, startCol, blockRows, blockCols)
302 using Base::operator=;
305 template<
typename _Scalar,
int _Options,
typename _StorageIndex,
int BlockRows,
int BlockCols>
318 : Base(xpr, startRow, startCol, blockRows, blockCols)
321 using Base::operator=;
332 template<
typename Derived>
339 template<
typename Derived>
346 template<
typename Derived>
351 IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
352 IsRowMajor ? outerSize :
rows(), IsRowMajor ?
cols() : outerSize);
359 template<
typename Derived>
364 IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
365 IsRowMajor ? outerSize :
rows(), IsRowMajor ?
cols() : outerSize);
372 template<
typename XprType,
int BlockRows,
int BlockCols,
bool InnerPanel>
389 m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ?
convert_index(
i) : 0),
390 m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ?
convert_index(i) : 0),
391 m_blockRows(BlockRows==1 ? 1 : xpr.
rows()),
392 m_blockCols(BlockCols==1 ? 1 : xpr.
cols())
401 inline Index rows()
const {
return m_blockRows.value(); }
402 inline Index cols()
const {
return m_blockCols.value(); }
406 return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
411 return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
416 return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
417 m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
422 return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
423 m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
459 template<
typename ArgType,
int BlockRows,
int BlockCols,
bool InnerPanel>
461 :
public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
463 class InnerVectorInnerIterator;
464 class OuterVectorInnerIterator;
471 IsRowMajor = XprType::IsRowMajor,
473 OuterVector = (BlockCols==1 && ArgType::IsRowMajor)
476 (BlockRows==1 && !ArgType::IsRowMajor),
479 Flags = XprType::Flags
485 : m_argImpl(op.nestedExpression()), m_block(op)
489 Index nnz = m_block.nonZeros();
491 return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();
502 template<
typename ArgType,
int BlockRows,
int BlockCols,
bool InnerPanel>
506 enum { IsRowMajor = unary_evaluator::IsRowMajor };
512 :
EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
513 m_block(aEval.m_block),
514 m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
516 while( (EvalIterator::operator
bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) )
517 EvalIterator::operator++();
520 inline StorageIndex
index()
const {
return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); }
521 inline Index outer()
const {
return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
525 inline operator bool()
const {
return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
528 template<
typename ArgType,
int BlockRows,
int BlockCols,
bool InnerPanel>
531 enum { IsRowMajor = unary_evaluator::IsRowMajor };
541 m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
542 m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
543 m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
544 m_it(m_eval.m_argImpl, m_outerPos)
549 while(m_it && m_it.index() < m_innerIndex) ++m_it;
550 if((!m_it) || (m_it.index()!=m_innerIndex))
554 inline StorageIndex
index()
const {
return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
556 inline Index row()
const {
return IsRowMajor ? 0 : index(); }
557 inline Index col()
const {
return IsRowMajor ? index() : 0; }
559 inline Scalar
value()
const {
return m_it.value(); }
560 inline Scalar&
valueRef() {
return m_it.valueRef(); }
565 while(++m_outerPos<m_end)
568 m_it.~EvalIterator();
569 ::new (&m_it)
EvalIterator(m_eval.m_argImpl, m_outerPos);
571 while(m_it && m_it.index() < m_innerIndex) ++m_it;
572 if(m_it && m_it.index()==m_innerIndex)
break;
577 inline operator bool()
const {
return m_outerPos < m_end; }
580 template<
typename _Scalar,
int _Options,
typename _StorageIndex,
int BlockRows,
int BlockCols>
582 :
evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
589 template<
typename _Scalar,
int _Options,
typename _StorageIndex,
int BlockRows,
int BlockCols>
591 :
evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
603 #endif // EIGEN_SPARSE_BLOCK_H StorageIndex index() const
EIGEN_STRONG_INLINE InnerVectorInnerIterator(const unary_evaluator &aEval, Index outer)
const Scalar coeff(Index index) const
evaluator< ArgType >::InnerIterator EvalIterator
const SparseMatrix< _Scalar, _Options, _StorageIndex > SparseMatrixType
#define EIGEN_STRONG_INLINE
evaluator< SparseCompressedBase< XprType > > Base
const Scalar coeff(Index index) const
sparse_matrix_block_impl(SparseMatrixType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
Block< const SparseMatrix< _Scalar, _Options, _StorageIndex >, BlockRows, BlockCols, true > XprType
SparseMatrixType & nestedExpression()
A versatible sparse matrix representation.
Index nonZerosEstimate() const
const Scalar coeff(Index row, Index col) const
const internal::variable_if_dynamic< Index, RowsAtCompileTime > m_blockRows
BlockImpl & operator=(const T &)
unary_evaluator(const XprType &xpr)
unary_evaluator(const XprType &op)
const XprType & nestedExpression() const
set noclip points set clip one set noclip two set bar set border lt lw set xdata set ydata set zdata set x2data set y2data set boxwidth set dummy y set format x g set format y g set format x2 g set format y2 g set format z g set angles radians set nogrid set key title set key left top Right noreverse box linetype linewidth samplen spacing width set nolabel set noarrow set nologscale set logscale x set set pointsize set encoding default set nopolar set noparametric set set set set surface set nocontour set clabel set mapping cartesian set nohidden3d set cntrparam order set cntrparam linear set cntrparam levels auto set cntrparam points set size set set xzeroaxis lt lw set x2zeroaxis lt lw set yzeroaxis lt lw set y2zeroaxis lt lw set tics in set ticslevel set tics set mxtics default set mytics default set mx2tics default set my2tics default set xtics border mirror norotate autofreq set ytics border mirror norotate autofreq set ztics border nomirror norotate autofreq set nox2tics set noy2tics set timestamp bottom norotate offset
BlockImpl & operator=(const T &)
Namespace containing all symbols from the Eigen library.
bool isCompressed() const
internal::sparse_matrix_block_impl< SparseMatrixType, BlockRows, BlockCols > Base
StorageIndex * outerIndexPtr()
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
EIGEN_DEVICE_FUNC IndexDest convert_index(const IndexSrc &idx)
internal::ref_selector< SparseMatrixType >::non_const_type m_matrix
const StorageIndex * outerIndexPtr() const
Eigen::Index Index
The interface type of indices.
Scalar & coeffRef(Index index)
BlockType & operator=(const BlockType &other)
const internal::variable_if_dynamic< Index, OuterSize > m_outerSize
BlockType & operator=(const SparseMatrixBase< OtherDerived > &other)
SparseMatrixBase< BlockType > Base
BlockImpl(XprType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
unary_evaluator(const XprType &xpr)
const Scalar coeff(Index row, Index col) const
const StorageIndex * innerNonZeroPtr() const
Block< XprType, BlockRows, BlockCols, true > BlockType
EIGEN_DEVICE_FUNC void smart_copy(const T *start, const T *end, T *target)
internal::traits< Block< const SparseMatrix< _Scalar, _Options, _StorageIndex >, BlockRows, BlockCols, true > >::StorageIndex StorageIndex
const unary_evaluator & m_eval
StorageIndex * innerNonZeroPtr()
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
XprType::StorageIndex StorageIndex
Base class of any sparse matrices or sparse expressions.
OuterVectorInnerIterator & operator++()
_StorageIndex StorageIndex
EIGEN_STRONG_INLINE Index rows() const
internal::remove_all< typename XprType::Nested >::type _MatrixTypeNested
Scalar & coeffRef(Index row, Index col)
const XprType & nestedExpression() const
evaluator< ArgType > m_argImpl
internal::traits< Block< XprType, BlockRows, BlockCols, true > >::Scalar Scalar
const Scalar & lastCoeff() const
XprType & nestedExpression()
sparse_matrix_block_impl(SparseMatrixType &xpr, Index i)
SparseMatrixBase< BlockType > Base
BlockImpl(SparseMatrixType &xpr, Index i)
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
XprType & nestedExpression()
InnerVectorReturnType innerVector(Index outer)
internal::conditional< OuterVector, OuterVectorInnerIterator, InnerVectorInnerIterator >::type InnerIterator
_StorageIndex StorageIndex
BlockImpl(XprType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
const SparseMatrixType & nestedExpression() const
StorageIndex * innerIndexPtr()
const Scalar * valuePtr() const
evaluator< SparseCompressedBase< XprType > > Base
BlockImpl(SparseMatrixType &xpr, Index i)
Base::IndexVector IndexVector
Scalar & coeffRef(Index row, Index col)
A matrix or vector expression mapping an existing expression.
Block< SparseMatrixType, BlockRows, BlockCols, true > BlockType
const internal::variable_if_dynamic< Index, ColsAtCompileTime > m_blockCols
const internal::variable_if_dynamic< Index, OuterSize > m_outerSize
EIGEN_STRONG_INLINE OuterVectorInnerIterator(const unary_evaluator &aEval, Index outer)
Block< SparseMatrix< _Scalar, _Options, _StorageIndex >, BlockRows, BlockCols, true > XprType
InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize)
Expression of a fixed-size or dynamic-size block.
const Scalar coeff(Index row, Index col) const
const Derived & derived() const
SparseMatrix< _Scalar, _Options, _StorageIndex > SparseMatrixType
Block< XprType, BlockRows, BlockCols, InnerPanel > BlockType
SparseCompressedBase< Block< SparseMatrixType, BlockRows, BlockCols, true > > Base
BlockImpl(SparseMatrixType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
EIGEN_STRONG_INLINE Index cols() const
BlockImpl(SparseMatrixType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
StorageIndex index() const
const StorageIndex * innerIndexPtr() const
const internal::variable_if_dynamic< Index, XprType::RowsAtCompileTime==1?0:Dynamic > m_startRow
internal::ref_selector< XprType >::non_const_type m_matrix
#define eigen_internal_assert(x)
internal::sparse_matrix_block_impl< SparseMatrixType, BlockRows, BlockCols > Base
Map< Matrix< T, Dynamic, Dynamic, ColMajor >, 0, OuterStride<> > matrix(T *data, int rows, int cols, int stride)
const Scalar coeff(Index index) const
const internal::variable_if_dynamic< Index, XprType::ColsAtCompileTime==1?0:Dynamic > m_startCol
Common base class for sparse [compressed]-{row|column}-storage format.
EIGEN_STRONG_INLINE Index rows() const
internal::remove_all< typename SparseMatrixType::Nested >::type _MatrixTypeNested
#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE)
void smart_memmove(const T *start, const T *end, T *target)
Block< ArgType, BlockRows, BlockCols, InnerPanel > XprType
#define EIGEN_UNUSED_VARIABLE(var)
EIGEN_STRONG_INLINE Index cols() const
internal::ref_selector< XprType >::non_const_type m_matrix