SparseBlock.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2008-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSE_BLOCK_H
11 #define EIGEN_SPARSE_BLOCK_H
12 
13 namespace Eigen {
14 
15 // Subset of columns or rows
16 template<typename XprType, int BlockRows, int BlockCols>
17 class BlockImpl<XprType,BlockRows,BlockCols,true,Sparse>
18  : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,true> >
19 {
22 public:
24 protected:
25  enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
27  using Base::convert_index;
28 public:
30 
31  inline BlockImpl(XprType& xpr, Index i)
32  : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
33  {}
34 
35  inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
36  : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
37  {}
38 
39  EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
40  EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
41 
42  Index nonZeros() const
43  {
44  typedef internal::evaluator<XprType> EvaluatorType;
45  EvaluatorType matEval(m_matrix);
46  Index nnz = 0;
47  Index end = m_outerStart + m_outerSize.value();
48  for(Index j=m_outerStart; j<end; ++j)
49  for(typename EvaluatorType::InnerIterator it(matEval, j); it; ++it)
50  ++nnz;
51  return nnz;
52  }
53 
54  inline const Scalar coeff(Index row, Index col) const
55  {
56  return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
57  }
58 
59  inline const Scalar coeff(Index index) const
60  {
61  return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
62  }
63 
64  inline const XprType& nestedExpression() const { return m_matrix; }
65  inline XprType& nestedExpression() { return m_matrix; }
66  Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
67  Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
68  Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
69  Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
70 
71  protected:
72 
76 
77  protected:
78  // Disable assignment with clear error message.
79  // Note that simply removing operator= yields compilation errors with ICC+MSVC
80  template<typename T>
81  BlockImpl& operator=(const T&)
82  {
83  EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
84  return *this;
85  }
86 };
87 
88 
89 /***************************************************************************
90 * specialization for SparseMatrix
91 ***************************************************************************/
92 
93 namespace internal {
94 
95 template<typename SparseMatrixType, int BlockRows, int BlockCols>
97  : public SparseCompressedBase<Block<SparseMatrixType,BlockRows,BlockCols,true> >
98 {
102  using Base::convert_index;
103 public:
106 protected:
107  typedef typename Base::IndexVector IndexVector;
108  enum { OuterSize = IsRowMajor ? BlockRows : BlockCols };
109 public:
110 
111  inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index i)
112  : m_matrix(xpr), m_outerStart(convert_index(i)), m_outerSize(OuterSize)
113  {}
114 
115  inline sparse_matrix_block_impl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
116  : m_matrix(xpr), m_outerStart(convert_index(IsRowMajor ? startRow : startCol)), m_outerSize(convert_index(IsRowMajor ? blockRows : blockCols))
117  {}
118 
119  template<typename OtherDerived>
120  inline BlockType& operator=(const SparseMatrixBase<OtherDerived>& other)
121  {
122  typedef typename internal::remove_all<typename SparseMatrixType::Nested>::type _NestedMatrixType;
123  _NestedMatrixType& matrix = m_matrix;
124  // This assignment is slow if this vector set is not empty
125  // and/or it is not at the end of the nonzeros of the underlying matrix.
126 
127  // 1 - eval to a temporary to avoid transposition and/or aliasing issues
129  eigen_internal_assert(tmp.outerSize()==m_outerSize.value());
130 
131  // 2 - let's check whether there is enough allocated memory
132  Index nnz = tmp.nonZeros();
133  Index start = m_outerStart==0 ? 0 : m_matrix.outerIndexPtr()[m_outerStart]; // starting position of the current block
134  Index end = m_matrix.outerIndexPtr()[m_outerStart+m_outerSize.value()]; // ending position of the current block
135  Index block_size = end - start; // available room in the current block
136  Index tail_size = m_matrix.outerIndexPtr()[m_matrix.outerSize()] - end;
137 
138  Index free_size = m_matrix.isCompressed()
139  ? Index(matrix.data().allocatedSize()) + block_size
140  : block_size;
141 
142  Index tmp_start = tmp.outerIndexPtr()[0];
143 
144  bool update_trailing_pointers = false;
145  if(nnz>free_size)
146  {
147  // realloc manually to reduce copies
148  typename SparseMatrixType::Storage newdata(m_matrix.data().allocatedSize() - block_size + nnz);
149 
150  internal::smart_copy(m_matrix.valuePtr(), m_matrix.valuePtr() + start, newdata.valuePtr());
151  internal::smart_copy(m_matrix.innerIndexPtr(), m_matrix.innerIndexPtr() + start, newdata.indexPtr());
152 
153  internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, newdata.valuePtr() + start);
154  internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, newdata.indexPtr() + start);
155 
156  internal::smart_copy(matrix.valuePtr()+end, matrix.valuePtr()+end + tail_size, newdata.valuePtr()+start+nnz);
157  internal::smart_copy(matrix.innerIndexPtr()+end, matrix.innerIndexPtr()+end + tail_size, newdata.indexPtr()+start+nnz);
158 
159  newdata.resize(m_matrix.outerIndexPtr()[m_matrix.outerSize()] - block_size + nnz);
160 
161  matrix.data().swap(newdata);
162 
163  update_trailing_pointers = true;
164  }
165  else
166  {
167  if(m_matrix.isCompressed())
168  {
169  // no need to realloc, simply copy the tail at its respective position and insert tmp
170  matrix.data().resize(start + nnz + tail_size);
171 
172  internal::smart_memmove(matrix.valuePtr()+end, matrix.valuePtr() + end+tail_size, matrix.valuePtr() + start+nnz);
173  internal::smart_memmove(matrix.innerIndexPtr()+end, matrix.innerIndexPtr() + end+tail_size, matrix.innerIndexPtr() + start+nnz);
174 
175  update_trailing_pointers = true;
176  }
177 
178  internal::smart_copy(tmp.valuePtr() + tmp_start, tmp.valuePtr() + tmp_start + nnz, matrix.valuePtr() + start);
179  internal::smart_copy(tmp.innerIndexPtr() + tmp_start, tmp.innerIndexPtr() + tmp_start + nnz, matrix.innerIndexPtr() + start);
180  }
181 
182  // update outer index pointers and innerNonZeros
183  if(IsVectorAtCompileTime)
184  {
185  if(!m_matrix.isCompressed())
186  matrix.innerNonZeroPtr()[m_outerStart] = StorageIndex(nnz);
187  matrix.outerIndexPtr()[m_outerStart] = StorageIndex(start);
188  }
189  else
190  {
191  StorageIndex p = StorageIndex(start);
192  for(Index k=0; k<m_outerSize.value(); ++k)
193  {
194  StorageIndex nnz_k = internal::convert_index<StorageIndex>(tmp.innerVector(k).nonZeros());
195  if(!m_matrix.isCompressed())
196  matrix.innerNonZeroPtr()[m_outerStart+k] = nnz_k;
197  matrix.outerIndexPtr()[m_outerStart+k] = p;
198  p += nnz_k;
199  }
200  }
201 
202  if(update_trailing_pointers)
203  {
204  StorageIndex offset = internal::convert_index<StorageIndex>(nnz - block_size);
205  for(Index k = m_outerStart + m_outerSize.value(); k<=matrix.outerSize(); ++k)
206  {
207  matrix.outerIndexPtr()[k] += offset;
208  }
209  }
210 
211  return derived();
212  }
213 
214  inline BlockType& operator=(const BlockType& other)
215  {
216  return operator=<BlockType>(other);
217  }
218 
219  inline const Scalar* valuePtr() const
220  { return m_matrix.valuePtr(); }
221  inline Scalar* valuePtr()
222  { return m_matrix.valuePtr(); }
223 
224  inline const StorageIndex* innerIndexPtr() const
225  { return m_matrix.innerIndexPtr(); }
227  { return m_matrix.innerIndexPtr(); }
228 
229  inline const StorageIndex* outerIndexPtr() const
230  { return m_matrix.outerIndexPtr() + m_outerStart; }
232  { return m_matrix.outerIndexPtr() + m_outerStart; }
233 
234  inline const StorageIndex* innerNonZeroPtr() const
235  { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
237  { return isCompressed() ? 0 : (m_matrix.innerNonZeroPtr()+m_outerStart); }
238 
239  bool isCompressed() const { return m_matrix.innerNonZeroPtr()==0; }
240 
242  {
243  return m_matrix.coeffRef(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
244  }
245 
246  inline const Scalar coeff(Index row, Index col) const
247  {
248  return m_matrix.coeff(row + (IsRowMajor ? m_outerStart : 0), col + (IsRowMajor ? 0 : m_outerStart));
249  }
250 
251  inline const Scalar coeff(Index index) const
252  {
253  return m_matrix.coeff(IsRowMajor ? m_outerStart : index, IsRowMajor ? index : m_outerStart);
254  }
255 
256  const Scalar& lastCoeff() const
257  {
259  eigen_assert(Base::nonZeros()>0);
260  if(m_matrix.isCompressed())
261  return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart+1]-1];
262  else
263  return m_matrix.valuePtr()[m_matrix.outerIndexPtr()[m_outerStart]+m_matrix.innerNonZeroPtr()[m_outerStart]-1];
264  }
265 
266  EIGEN_STRONG_INLINE Index rows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
267  EIGEN_STRONG_INLINE Index cols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
268 
269  inline const SparseMatrixType& nestedExpression() const { return m_matrix; }
270  inline SparseMatrixType& nestedExpression() { return m_matrix; }
271  Index startRow() const { return IsRowMajor ? m_outerStart : 0; }
272  Index startCol() const { return IsRowMajor ? 0 : m_outerStart; }
273  Index blockRows() const { return IsRowMajor ? m_outerSize.value() : m_matrix.rows(); }
274  Index blockCols() const { return IsRowMajor ? m_matrix.cols() : m_outerSize.value(); }
275 
276  protected:
277 
281 
282 };
283 
284 } // namespace internal
285 
286 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
287 class BlockImpl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
288  : public internal::sparse_matrix_block_impl<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
289 {
290 public:
291  typedef _StorageIndex StorageIndex;
294  inline BlockImpl(SparseMatrixType& xpr, Index i)
295  : Base(xpr, i)
296  {}
297 
298  inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
299  : Base(xpr, startRow, startCol, blockRows, blockCols)
300  {}
301 
302  using Base::operator=;
303 };
304 
305 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
306 class BlockImpl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true,Sparse>
307  : public internal::sparse_matrix_block_impl<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols>
308 {
309 public:
310  typedef _StorageIndex StorageIndex;
313  inline BlockImpl(SparseMatrixType& xpr, Index i)
314  : Base(xpr, i)
315  {}
316 
317  inline BlockImpl(SparseMatrixType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
318  : Base(xpr, startRow, startCol, blockRows, blockCols)
319  {}
320 
321  using Base::operator=;
322 private:
323  template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr, Index i);
324  template<typename Derived> BlockImpl(const SparseMatrixBase<Derived>& xpr);
325 };
326 
327 //----------
328 
332 template<typename Derived>
334 { return InnerVectorReturnType(derived(), outer); }
335 
339 template<typename Derived>
341 { return ConstInnerVectorReturnType(derived(), outer); }
342 
346 template<typename Derived>
349 {
350  return Block<Derived,Dynamic,Dynamic,true>(derived(),
351  IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
352  IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
353 
354 }
355 
359 template<typename Derived>
362 {
364  IsRowMajor ? outerStart : 0, IsRowMajor ? 0 : outerStart,
365  IsRowMajor ? outerSize : rows(), IsRowMajor ? cols() : outerSize);
366 
367 }
368 
372 template<typename XprType, int BlockRows, int BlockCols, bool InnerPanel>
373 class BlockImpl<XprType,BlockRows,BlockCols,InnerPanel,Sparse>
374  : public SparseMatrixBase<Block<XprType,BlockRows,BlockCols,InnerPanel> >, internal::no_assignment_operator
375 {
378  using Base::convert_index;
379 public:
382 
383  typedef typename internal::remove_all<typename XprType::Nested>::type _MatrixTypeNested;
384 
387  inline BlockImpl(XprType& xpr, Index i)
388  : m_matrix(xpr),
389  m_startRow( (BlockRows==1) && (BlockCols==XprType::ColsAtCompileTime) ? convert_index(i) : 0),
390  m_startCol( (BlockRows==XprType::RowsAtCompileTime) && (BlockCols==1) ? convert_index(i) : 0),
391  m_blockRows(BlockRows==1 ? 1 : xpr.rows()),
392  m_blockCols(BlockCols==1 ? 1 : xpr.cols())
393  {}
394 
397  inline BlockImpl(XprType& xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
398  : m_matrix(xpr), m_startRow(convert_index(startRow)), m_startCol(convert_index(startCol)), m_blockRows(convert_index(blockRows)), m_blockCols(convert_index(blockCols))
399  {}
400 
401  inline Index rows() const { return m_blockRows.value(); }
402  inline Index cols() const { return m_blockCols.value(); }
403 
405  {
406  return m_matrix.coeffRef(row + m_startRow.value(), col + m_startCol.value());
407  }
408 
409  inline const Scalar coeff(Index row, Index col) const
410  {
411  return m_matrix.coeff(row + m_startRow.value(), col + m_startCol.value());
412  }
413 
414  inline Scalar& coeffRef(Index index)
415  {
416  return m_matrix.coeffRef(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
417  m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
418  }
419 
420  inline const Scalar coeff(Index index) const
421  {
422  return m_matrix.coeff(m_startRow.value() + (RowsAtCompileTime == 1 ? 0 : index),
423  m_startCol.value() + (RowsAtCompileTime == 1 ? index : 0));
424  }
425 
426  inline const XprType& nestedExpression() const { return m_matrix; }
427  inline XprType& nestedExpression() { return m_matrix; }
428  Index startRow() const { return m_startRow.value(); }
429  Index startCol() const { return m_startCol.value(); }
430  Index blockRows() const { return m_blockRows.value(); }
431  Index blockCols() const { return m_blockCols.value(); }
432 
433  protected:
434 // friend class internal::GenericSparseBlockInnerIteratorImpl<XprType,BlockRows,BlockCols,InnerPanel>;
435  friend struct internal::unary_evaluator<Block<XprType,BlockRows,BlockCols,InnerPanel>, internal::IteratorBased, Scalar >;
436 
437  Index nonZeros() const { return Dynamic; }
438 
444 
445  protected:
446  // Disable assignment with clear error message.
447  // Note that simply removing operator= yields compilation errors with ICC+MSVC
448  template<typename T>
449  BlockImpl& operator=(const T&)
450  {
451  EIGEN_STATIC_ASSERT(sizeof(T)==0, THIS_SPARSE_BLOCK_SUBEXPRESSION_IS_READ_ONLY);
452  return *this;
453  }
454 
455 };
456 
457 namespace internal {
458 
459 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
460 struct unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased >
461  : public evaluator_base<Block<ArgType,BlockRows,BlockCols,InnerPanel> >
462 {
463  class InnerVectorInnerIterator;
464  class OuterVectorInnerIterator;
465  public:
467  typedef typename XprType::StorageIndex StorageIndex;
468  typedef typename XprType::Scalar Scalar;
469 
470  enum {
471  IsRowMajor = XprType::IsRowMajor,
472 
473  OuterVector = (BlockCols==1 && ArgType::IsRowMajor)
474  | // FIXME | instead of || to please GCC 4.4.0 stupid warning "suggest parentheses around &&".
475  // revert to || as soon as not needed anymore.
476  (BlockRows==1 && !ArgType::IsRowMajor),
477 
479  Flags = XprType::Flags
480  };
481 
483 
484  explicit unary_evaluator(const XprType& op)
485  : m_argImpl(op.nestedExpression()), m_block(op)
486  {}
487 
488  inline Index nonZerosEstimate() const {
489  Index nnz = m_block.nonZeros();
490  if(nnz<0)
491  return m_argImpl.nonZerosEstimate() * m_block.size() / m_block.nestedExpression().size();
492  return nnz;
493  }
494 
495  protected:
497 
499  const XprType &m_block;
500 };
501 
502 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
503 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::InnerVectorInnerIterator
504  : public EvalIterator
505 {
506  enum { IsRowMajor = unary_evaluator::IsRowMajor };
507  const XprType& m_block;
509 public:
510 
512  : EvalIterator(aEval.m_argImpl, outer + (IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol())),
513  m_block(aEval.m_block),
514  m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows())
515  {
516  while( (EvalIterator::operator bool()) && (EvalIterator::index() < (IsRowMajor ? m_block.startCol() : m_block.startRow())) )
517  EvalIterator::operator++();
518  }
519 
520  inline StorageIndex index() const { return EvalIterator::index() - convert_index<StorageIndex>(IsRowMajor ? m_block.startCol() : m_block.startRow()); }
521  inline Index outer() const { return EvalIterator::outer() - (IsRowMajor ? m_block.startRow() : m_block.startCol()); }
522  inline Index row() const { return EvalIterator::row() - m_block.startRow(); }
523  inline Index col() const { return EvalIterator::col() - m_block.startCol(); }
524 
525  inline operator bool() const { return EvalIterator::operator bool() && EvalIterator::index() < m_end; }
526 };
527 
528 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
529 class unary_evaluator<Block<ArgType,BlockRows,BlockCols,InnerPanel>, IteratorBased>::OuterVectorInnerIterator
530 {
531  enum { IsRowMajor = unary_evaluator::IsRowMajor };
537 public:
538 
540  : m_eval(aEval),
541  m_outerPos( (IsRowMajor ? aEval.m_block.startCol() : aEval.m_block.startRow()) ),
542  m_innerIndex(IsRowMajor ? aEval.m_block.startRow() : aEval.m_block.startCol()),
543  m_end(IsRowMajor ? aEval.m_block.startCol()+aEval.m_block.blockCols() : aEval.m_block.startRow()+aEval.m_block.blockRows()),
544  m_it(m_eval.m_argImpl, m_outerPos)
545  {
546  EIGEN_UNUSED_VARIABLE(outer);
547  eigen_assert(outer==0);
548 
549  while(m_it && m_it.index() < m_innerIndex) ++m_it;
550  if((!m_it) || (m_it.index()!=m_innerIndex))
551  ++(*this);
552  }
553 
554  inline StorageIndex index() const { return convert_index<StorageIndex>(m_outerPos - (IsRowMajor ? m_eval.m_block.startCol() : m_eval.m_block.startRow())); }
555  inline Index outer() const { return 0; }
556  inline Index row() const { return IsRowMajor ? 0 : index(); }
557  inline Index col() const { return IsRowMajor ? index() : 0; }
558 
559  inline Scalar value() const { return m_it.value(); }
560  inline Scalar& valueRef() { return m_it.valueRef(); }
561 
562  inline OuterVectorInnerIterator& operator++()
563  {
564  // search next non-zero entry
565  while(++m_outerPos<m_end)
566  {
567  // Restart iterator at the next inner-vector:
568  m_it.~EvalIterator();
569  ::new (&m_it) EvalIterator(m_eval.m_argImpl, m_outerPos);
570  // search for the key m_innerIndex in the current outer-vector
571  while(m_it && m_it.index() < m_innerIndex) ++m_it;
572  if(m_it && m_it.index()==m_innerIndex) break;
573  }
574  return *this;
575  }
576 
577  inline operator bool() const { return m_outerPos < m_end; }
578 };
579 
580 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
581 struct unary_evaluator<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
582  : evaluator<SparseCompressedBase<Block<SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
583 {
586  explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
587 };
588 
589 template<typename _Scalar, int _Options, typename _StorageIndex, int BlockRows, int BlockCols>
590 struct unary_evaluator<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true>, IteratorBased>
591  : evaluator<SparseCompressedBase<Block<const SparseMatrix<_Scalar, _Options, _StorageIndex>,BlockRows,BlockCols,true> > >
592 {
595  explicit unary_evaluator(const XprType &xpr) : Base(xpr) {}
596 };
597 
598 } // end namespace internal
599 
600 
601 } // end namespace Eigen
602 
603 #endif // EIGEN_SPARSE_BLOCK_H
const Scalar coeff(Index index) const
Definition: SparseBlock.h:251
#define EIGEN_STRONG_INLINE
Definition: Macros.h:493
sparse_matrix_block_impl(SparseMatrixType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
Definition: SparseBlock.h:115
A versatible sparse matrix representation.
Definition: SparseMatrix.h:96
const Scalar coeff(Index row, Index col) const
Definition: SparseBlock.h:54
const internal::variable_if_dynamic< Index, RowsAtCompileTime > m_blockRows
Definition: SparseBlock.h:442
Definition: LDLT.h:16
internal::sparse_matrix_block_impl< SparseMatrixType, BlockRows, BlockCols > Base
Definition: SparseBlock.h:312
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:122
EIGEN_DEVICE_FUNC IndexDest convert_index(const IndexSrc &idx)
Definition: XprHelper.h:31
internal::ref_selector< SparseMatrixType >::non_const_type m_matrix
Definition: SparseBlock.h:278
const StorageIndex * outerIndexPtr() const
Definition: SparseBlock.h:229
BlockType & operator=(const BlockType &other)
Definition: SparseBlock.h:214
const internal::variable_if_dynamic< Index, OuterSize > m_outerSize
Definition: SparseBlock.h:280
BlockType & operator=(const SparseMatrixBase< OtherDerived > &other)
Definition: SparseBlock.h:120
BlockImpl(XprType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
Definition: SparseBlock.h:397
const Scalar coeff(Index row, Index col) const
Definition: SparseBlock.h:246
const StorageIndex * innerNonZeroPtr() const
Definition: SparseBlock.h:234
Block< XprType, BlockRows, BlockCols, true > BlockType
Definition: SparseBlock.h:21
EIGEN_DEVICE_FUNC void smart_copy(const T *start, const T *end, T *target)
Definition: Memory.h:485
internal::traits< Block< const SparseMatrix< _Scalar, _Options, _StorageIndex >, BlockRows, BlockCols, true > >::StorageIndex StorageIndex
#define EIGEN_SPARSE_PUBLIC_INTERFACE(Derived)
Definition: SparseUtil.h:43
EIGEN_DEVICE_FUNC ColXpr col(Index i)
This is the const version of col().
Definition: BlockMethods.h:838
Base class of any sparse matrices or sparse expressions.
EIGEN_STRONG_INLINE Index rows() const
Definition: SparseBlock.h:266
internal::remove_all< typename XprType::Nested >::type _MatrixTypeNested
Definition: SparseBlock.h:20
Scalar & coeffRef(Index row, Index col)
Definition: SparseBlock.h:241
internal::traits< Block< XprType, BlockRows, BlockCols, true > >::Scalar Scalar
sparse_matrix_block_impl(SparseMatrixType &xpr, Index i)
Definition: SparseBlock.h:111
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
#define eigen_assert(x)
Definition: Macros.h:577
InnerVectorReturnType innerVector(Index outer)
Definition: SparseBlock.h:333
EIGEN_DEVICE_FUNC RowXpr row(Index i)
This is the const version of row(). */.
Definition: BlockMethods.h:859
internal::conditional< OuterVector, OuterVectorInnerIterator, InnerVectorInnerIterator >::type InnerIterator
Definition: SparseBlock.h:482
BlockImpl(XprType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
Definition: SparseBlock.h:35
const SparseMatrixType & nestedExpression() const
Definition: SparseBlock.h:269
A matrix or vector expression mapping an existing expression.
Definition: Ref.h:190
Block< SparseMatrixType, BlockRows, BlockCols, true > BlockType
Definition: SparseBlock.h:100
const internal::variable_if_dynamic< Index, ColsAtCompileTime > m_blockCols
Definition: SparseBlock.h:443
const internal::variable_if_dynamic< Index, OuterSize > m_outerSize
Definition: SparseBlock.h:75
InnerVectorsReturnType innerVectors(Index outerStart, Index outerSize)
Definition: SparseBlock.h:348
Expression of a fixed-size or dynamic-size block.
Definition: Block.h:103
const Derived & derived() const
Block< XprType, BlockRows, BlockCols, InnerPanel > BlockType
Definition: SparseBlock.h:376
SparseCompressedBase< Block< SparseMatrixType, BlockRows, BlockCols, true > > Base
Definition: SparseBlock.h:101
BlockImpl(SparseMatrixType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
Definition: SparseBlock.h:317
EIGEN_STRONG_INLINE Index cols() const
Definition: SparseBlock.h:267
BlockImpl(SparseMatrixType &xpr, Index startRow, Index startCol, Index blockRows, Index blockCols)
Definition: SparseBlock.h:298
const StorageIndex * innerIndexPtr() const
Definition: SparseBlock.h:224
const internal::variable_if_dynamic< Index, XprType::RowsAtCompileTime==1?0:Dynamic > m_startRow
Definition: SparseBlock.h:440
internal::ref_selector< XprType >::non_const_type m_matrix
Definition: SparseBlock.h:73
const int Dynamic
Definition: Constants.h:21
#define eigen_internal_assert(x)
Definition: Macros.h:583
internal::sparse_matrix_block_impl< SparseMatrixType, BlockRows, BlockCols > Base
Definition: SparseBlock.h:293
const internal::variable_if_dynamic< Index, XprType::ColsAtCompileTime==1?0:Dynamic > m_startCol
Definition: SparseBlock.h:441
Common base class for sparse [compressed]-{row|column}-storage format.
internal::remove_all< typename SparseMatrixType::Nested >::type _MatrixTypeNested
Definition: SparseBlock.h:99
#define EIGEN_STATIC_ASSERT_VECTOR_ONLY(TYPE)
Definition: StaticAssert.h:137
void smart_memmove(const T *start, const T *end, T *target)
Definition: Memory.h:508
#define EIGEN_UNUSED_VARIABLE(var)
Definition: Macros.h:616
internal::ref_selector< XprType >::non_const_type m_matrix
Definition: SparseBlock.h:439


hebiros
Author(s): Xavier Artache , Matthew Tesch
autogenerated on Thu Sep 3 2020 04:08:51