SparseSelfAdjointView.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11 #define EIGEN_SPARSE_SELFADJOINTVIEW_H
12 
13 namespace Eigen {
14 
29 namespace internal {
30 
31 template<typename MatrixType, unsigned int Mode>
32 struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
33 };
34 
35 template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
37 
38 template<int Mode,typename MatrixType,int DestOrder>
40 
41 }
42 
43 template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
44  : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
45 {
46  public:
47 
48  enum {
49  Mode = _Mode,
50  TransposeMode = ((Mode & Upper) ? Lower : 0) | ((Mode & Lower) ? Upper : 0),
53  };
54 
56  typedef typename MatrixType::Scalar Scalar;
57  typedef typename MatrixType::StorageIndex StorageIndex;
61 
62  explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
63  {
64  eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
65  }
66 
67  inline Index rows() const { return m_matrix.rows(); }
68  inline Index cols() const { return m_matrix.cols(); }
69 
71  const _MatrixTypeNested& matrix() const { return m_matrix; }
73 
79  template<typename OtherDerived>
82  {
84  }
85 
91  template<typename OtherDerived> friend
94  {
96  }
97 
99  template<typename OtherDerived>
102  {
103  return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
104  }
105 
107  template<typename OtherDerived> friend
110  {
111  return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
112  }
113 
122  template<typename DerivedU>
123  SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
124 
126  // TODO implement twists in a more evaluator friendly fashion
128  {
130  }
131 
132  template<typename SrcMatrixType,int SrcMode>
134  {
136  return *this;
137  }
138 
140  {
142  return *this = src.twistedBy(pnull);
143  }
144 
145  // Since we override the copy-assignment operator, we need to explicitly re-declare the copy-constructor
147 
148  template<typename SrcMatrixType,unsigned int SrcMode>
150  {
152  return *this = src.twistedBy(pnull);
153  }
154 
156  {
159  eigen_assert(rows == this->rows() && cols == this->cols()
160  && "SparseSelfadjointView::resize() does not actually allow to resize.");
161  }
162 
163  protected:
164 
165  MatrixTypeNested m_matrix;
166  //mutable VectorI m_countPerRow;
167  //mutable VectorI m_countPerCol;
168  private:
169  template<typename Dest> void evalTo(Dest &) const;
170 };
171 
172 /***************************************************************************
173 * Implementation of SparseMatrixBase methods
174 ***************************************************************************/
175 
176 template<typename Derived>
177 template<unsigned int UpLo>
179 {
181 }
182 
183 template<typename Derived>
184 template<unsigned int UpLo>
186 {
187  return SparseSelfAdjointView<Derived, UpLo>(derived());
188 }
189 
190 /***************************************************************************
191 * Implementation of SparseSelfAdjointView methods
192 ***************************************************************************/
193 
194 template<typename MatrixType, unsigned int Mode>
195 template<typename DerivedU>
198 {
200  if(alpha==Scalar(0))
201  m_matrix = tmp.template triangularView<Mode>();
202  else
203  m_matrix += alpha * tmp.template triangularView<Mode>();
204 
205  return *this;
206 }
207 
208 namespace internal {
209 
210 // TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
211 // in the future selfadjoint-ness should be defined by the expression traits
212 // such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
213 template<typename MatrixType, unsigned int Mode>
215 {
218 };
219 
221 
224 
225 template< typename DstXprType, typename SrcXprType, typename Functor>
226 struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
227 {
228  typedef typename DstXprType::StorageIndex StorageIndex;
230 
231  template<typename DestScalar,int StorageOrder>
232  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
233  {
234  internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
235  }
236 
237  // FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to:
238  template<typename DestScalar,int StorageOrder,typename AssignFunc>
239  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)
240  {
241  SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
242  run(tmp, src, AssignOpType());
244  }
245 
246  template<typename DestScalar,int StorageOrder>
247  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
249  {
250  SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
251  run(tmp, src, AssignOpType());
252  dst += tmp;
253  }
254 
255  template<typename DestScalar,int StorageOrder>
256  static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
258  {
259  SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
260  run(tmp, src, AssignOpType());
261  dst -= tmp;
262  }
263 
264  template<typename DestScalar>
265  static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
266  {
267  // TODO directly evaluate into dst;
269  internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
270  dst = tmp;
271  }
272 };
273 
274 } // end namespace internal
275 
276 /***************************************************************************
277 * Implementation of sparse self-adjoint time dense matrix
278 ***************************************************************************/
279 
280 namespace internal {
281 
282 template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
283 inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
284 {
286 
288  typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
290  typedef typename LhsEval::InnerIterator LhsIterator;
291  typedef typename SparseLhsType::Scalar LhsScalar;
292 
293  enum {
294  LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
295  ProcessFirstHalf =
296  ((Mode&(Upper|Lower))==(Upper|Lower))
297  || ( (Mode&Upper) && !LhsIsRowMajor)
298  || ( (Mode&Lower) && LhsIsRowMajor),
299  ProcessSecondHalf = !ProcessFirstHalf
300  };
301 
302  SparseLhsTypeNested lhs_nested(lhs);
303  LhsEval lhsEval(lhs_nested);
304 
305  // work on one column at once
306  for (Index k=0; k<rhs.cols(); ++k)
307  {
308  for (Index j=0; j<lhs.outerSize(); ++j)
309  {
310  LhsIterator i(lhsEval,j);
311  // handle diagonal coeff
312  if (ProcessSecondHalf)
313  {
314  while (i && i.index()<j) ++i;
315  if(i && i.index()==j)
316  {
317  res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
318  ++i;
319  }
320  }
321 
322  // premultiplied rhs for scatters
324  // accumulator for partial scalar product
325  typename DenseResType::Scalar res_j(0);
326  for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
327  {
328  LhsScalar lhs_ij = i.value();
329  if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
330  res_j += lhs_ij * rhs.coeff(i.index(),k);
331  res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
332  }
333  res.coeffRef(j,k) += alpha * res_j;
334 
335  // handle diagonal coeff
336  if (ProcessFirstHalf && i && (i.index()==j))
337  res.coeffRef(j,k) += alpha * i.value() * rhs.coeff(j,k);
338  }
339  }
340 }
341 
342 
343 template<typename LhsView, typename Rhs, int ProductType>
345 : generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
346 {
347  template<typename Dest>
348  static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
349  {
350  typedef typename LhsView::_MatrixTypeNested Lhs;
351  typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
352  typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
353  LhsNested lhsNested(lhsView.matrix());
354  RhsNested rhsNested(rhs);
355 
356  internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
357  }
358 };
359 
360 template<typename Lhs, typename RhsView, int ProductType>
362 : generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
363 {
364  template<typename Dest>
365  static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
366  {
367  typedef typename RhsView::_MatrixTypeNested Rhs;
368  typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
369  typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
370  LhsNested lhsNested(lhs);
371  RhsNested rhsNested(rhsView.matrix());
372 
373  // transpose everything
374  Transpose<Dest> dstT(dst);
375  internal::sparse_selfadjoint_time_dense_product<RhsView::TransposeMode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
376  }
377 };
378 
379 // NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
380 // TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
381 
382 template<typename LhsView, typename Rhs, int ProductTag>
384  : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
385 {
387  typedef typename XprType::PlainObject PlainObject;
389 
390  product_evaluator(const XprType& xpr)
391  : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
392  {
393  ::new (static_cast<Base*>(this)) Base(m_result);
395  }
396 
397 protected:
398  typename Rhs::PlainObject m_lhs;
399  PlainObject m_result;
400 };
401 
402 template<typename Lhs, typename RhsView, int ProductTag>
404  : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
405 {
407  typedef typename XprType::PlainObject PlainObject;
409 
410  product_evaluator(const XprType& xpr)
411  : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
412  {
413  ::new (static_cast<Base*>(this)) Base(m_result);
415  }
416 
417 protected:
418  typename Lhs::PlainObject m_rhs;
419  PlainObject m_result;
420 };
421 
422 } // namespace internal
423 
424 /***************************************************************************
425 * Implementation of symmetric copies and permutations
426 ***************************************************************************/
427 namespace internal {
428 
429 template<int Mode,typename MatrixType,int DestOrder>
431 {
432  typedef typename MatrixType::StorageIndex StorageIndex;
433  typedef typename MatrixType::Scalar Scalar;
435  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
436  typedef evaluator<MatrixType> MatEval;
437  typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
438 
439  MatEval matEval(mat);
440  Dest& dest(_dest.derived());
441  enum {
442  StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
443  };
444 
445  Index size = mat.rows();
446  VectorI count;
447  count.resize(size);
448  count.setZero();
449  dest.resize(size,size);
450  for(Index j = 0; j<size; ++j)
451  {
452  Index jp = perm ? perm[j] : j;
453  for(MatIterator it(matEval,j); it; ++it)
454  {
455  Index i = it.index();
456  Index r = it.row();
457  Index c = it.col();
458  Index ip = perm ? perm[i] : i;
459  if(Mode==int(Upper|Lower))
460  count[StorageOrderMatch ? jp : ip]++;
461  else if(r==c)
462  count[ip]++;
463  else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
464  {
465  count[ip]++;
466  count[jp]++;
467  }
468  }
469  }
470  Index nnz = count.sum();
471 
472  // reserve space
473  dest.resizeNonZeros(nnz);
474  dest.outerIndexPtr()[0] = 0;
475  for(Index j=0; j<size; ++j)
476  dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
477  for(Index j=0; j<size; ++j)
478  count[j] = dest.outerIndexPtr()[j];
479 
480  // copy data
481  for(StorageIndex j = 0; j<size; ++j)
482  {
483  for(MatIterator it(matEval,j); it; ++it)
484  {
485  StorageIndex i = internal::convert_index<StorageIndex>(it.index());
486  Index r = it.row();
487  Index c = it.col();
488 
489  StorageIndex jp = perm ? perm[j] : j;
490  StorageIndex ip = perm ? perm[i] : i;
491 
492  if(Mode==int(Upper|Lower))
493  {
494  Index k = count[StorageOrderMatch ? jp : ip]++;
495  dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
496  dest.valuePtr()[k] = it.value();
497  }
498  else if(r==c)
499  {
500  Index k = count[ip]++;
501  dest.innerIndexPtr()[k] = ip;
502  dest.valuePtr()[k] = it.value();
503  }
504  else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
505  {
506  if(!StorageOrderMatch)
507  std::swap(ip,jp);
508  Index k = count[jp]++;
509  dest.innerIndexPtr()[k] = ip;
510  dest.valuePtr()[k] = it.value();
511  k = count[ip]++;
512  dest.innerIndexPtr()[k] = jp;
513  dest.valuePtr()[k] = numext::conj(it.value());
514  }
515  }
516  }
517 }
518 
519 template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
521 {
522  typedef typename MatrixType::StorageIndex StorageIndex;
523  typedef typename MatrixType::Scalar Scalar;
525  typedef Matrix<StorageIndex,Dynamic,1> VectorI;
526  typedef evaluator<MatrixType> MatEval;
527  typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
528 
529  enum {
530  SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
531  StorageOrderMatch = int(SrcOrder) == int(DstOrder),
532  DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
533  SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
534  };
535 
536  MatEval matEval(mat);
537 
538  Index size = mat.rows();
539  VectorI count(size);
540  count.setZero();
541  dest.resize(size,size);
542  for(StorageIndex j = 0; j<size; ++j)
543  {
544  StorageIndex jp = perm ? perm[j] : j;
545  for(MatIterator it(matEval,j); it; ++it)
546  {
547  StorageIndex i = it.index();
548  if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
549  continue;
550 
551  StorageIndex ip = perm ? perm[i] : i;
552  count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
553  }
554  }
555  dest.outerIndexPtr()[0] = 0;
556  for(Index j=0; j<size; ++j)
557  dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
558  dest.resizeNonZeros(dest.outerIndexPtr()[size]);
559  for(Index j=0; j<size; ++j)
560  count[j] = dest.outerIndexPtr()[j];
561 
562  for(StorageIndex j = 0; j<size; ++j)
563  {
564 
565  for(MatIterator it(matEval,j); it; ++it)
566  {
567  StorageIndex i = it.index();
568  if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
569  continue;
570 
571  StorageIndex jp = perm ? perm[j] : j;
572  StorageIndex ip = perm? perm[i] : i;
573 
574  Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
575  dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
576 
577  if(!StorageOrderMatch) std::swap(ip,jp);
578  if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
579  dest.valuePtr()[k] = numext::conj(it.value());
580  else
581  dest.valuePtr()[k] = it.value();
582  }
583  }
584 }
585 
586 }
587 
588 // TODO implement twists in a more evaluator friendly fashion
589 
590 namespace internal {
591 
592 template<typename MatrixType, int Mode>
594 };
595 
596 }
597 
598 template<typename MatrixType,int Mode>
600  : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
601 {
602  public:
603  typedef typename MatrixType::Scalar Scalar;
604  typedef typename MatrixType::StorageIndex StorageIndex;
605  enum {
608  };
609  protected:
611  public:
613  typedef typename MatrixType::Nested MatrixTypeNested;
615 
617  : m_matrix(mat), m_perm(perm)
618  {}
619 
620  inline Index rows() const { return m_matrix.rows(); }
621  inline Index cols() const { return m_matrix.cols(); }
622 
623  const NestedExpression& matrix() const { return m_matrix; }
624  const Perm& perm() const { return m_perm; }
625 
626  protected:
627  MatrixTypeNested m_matrix;
628  const Perm& m_perm;
629 
630 };
631 
632 namespace internal {
633 
634 template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
635 struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
636 {
638  typedef typename DstXprType::StorageIndex DstIndex;
639  template<int Options>
641  {
642  // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
644  internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
645  dst = tmp;
646  }
647 
648  template<typename DestType,unsigned int DestMode>
650  {
651  internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
652  }
653 };
654 
655 } // end namespace internal
656 
657 } // end namespace Eigen
658 
659 #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
SCALAR Scalar
Definition: bench_gemm.cpp:46
#define max(a, b)
Definition: datatypes.h:20
EigenBase< SparseSelfAdjointView > Base
Product< SparseSelfAdjointView, OtherDerived > operator*(const SparseMatrixBase< OtherDerived > &rhs) const
Expression of the product of two arbitrary matrices or vectors.
Definition: Product.h:71
SparseSelfAdjointView(MatrixType &matrix)
A versatible sparse matrix representation.
Definition: SparseMatrix.h:96
#define min(a, b)
Definition: datatypes.h:19
PermutationMatrix< Dynamic, Dynamic, StorageIndex > Perm
static void run(SparseMatrix< DestScalar, StorageOrder, StorageIndex > &dst, const SrcXprType &src, const AssignFunc &func)
Expression of the transpose of a matrix.
Definition: Transpose.h:52
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const SparseMatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
friend Product< OtherDerived, SparseSelfAdjointView > operator*(const MatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
static void run(SparseSelfAdjointView< DestType, DestMode > &dst, const SrcXprType &src, const internal::assign_op< Scalar, typename MatrixType::Scalar > &)
SparseSymmetricPermutationProduct(const MatrixType &mat, const Perm &perm)
Scalar Scalar * c
Definition: benchVecAdd.cpp:17
internal::remove_reference< MatrixTypeNested >::type & matrix()
Namespace containing all symbols from the Eigen library.
Definition: jet.h:637
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
Matrix< StorageIndex, Dynamic, 1 > VectorI
MatrixXf MatrixType
void sparse_selfadjoint_time_dense_product(const SparseLhsType &lhs, const DenseRhsType &rhs, DenseResType &res, const AlphaType &alpha)
#define EIGEN_DEFAULT_COPY_CONSTRUCTOR(CLASS)
Definition: Macros.h:1221
internal::remove_all< MatrixTypeNested >::type _MatrixTypeNested
static void scaleAndAddTo(Dest &dst, const Lhs &lhs, const RhsView &rhsView, const typename Dest::Scalar &alpha)
Eigen::Index Index
The interface type of indices.
Definition: EigenBase.h:39
static void scaleAndAddTo(Dest &dst, const LhsView &lhsView, const Rhs &rhs, const typename Dest::Scalar &alpha)
const unsigned int RowMajorBit
Definition: Constants.h:66
AnnoyingScalar conj(const AnnoyingScalar &x)
cout<< "Here is the matrix m:"<< endl<< m<< endl;Matrix< ptrdiff_t, 3, 1 > res
const NestedExpression & matrix() const
SparseSelfAdjointView & operator=(const SparseSelfAdjointView &src)
Base class of any sparse matrices or sparse expressions.
storage_kind_to_evaluator_kind< typename MatrixType::StorageKind >::Kind Kind
const _MatrixTypeNested & matrix() const
static void run(SparseMatrix< Scalar, Options, DstIndex > &dst, const SrcXprType &src, const internal::assign_op< Scalar, typename MatrixType::Scalar > &)
const IndicesType & indices() const
SparseSelfAdjointView & operator=(const SparseSymmetricPermutationProduct< SrcMatrixType, SrcMode > &permutedMatrix)
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:74
#define eigen_assert(x)
Definition: Macros.h:1037
idx_t idx_t idx_t idx_t idx_t * perm
RealScalar alpha
A sparse matrix class designed for matrix assembly purpose.
Definition: SparseUtil.h:53
void permute_symm_to_symm(const MatrixType &mat, SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex > &_dest, const typename MatrixType::StorageIndex *perm=0)
const AdjointReturnType adjoint() const
void swap(GeographicLib::NearestNeighbor< dist_t, pos_t, distfun_t > &a, GeographicLib::NearestNeighbor< dist_t, pos_t, distfun_t > &b)
internal::ref_selector< MatrixType >::non_const_type MatrixTypeNested
void permute_symm_to_fullsymm(const MatrixType &mat, SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::StorageIndex > &_dest, const typename MatrixType::StorageIndex *perm=0)
static void run(SparseMatrix< DestScalar, StorageOrder, StorageIndex > &dst, const SrcXprType &src, const AssignOpType &)
EIGEN_CONSTEXPR Index size(const T &x)
Definition: Meta.h:479
Product< SparseSelfAdjointView, OtherDerived > operator*(const MatrixBase< OtherDerived > &rhs) const
static void run(SparseMatrix< DestScalar, StorageOrder, StorageIndex > &dst, const SrcXprType &src, const internal::add_assign_op< typename DstXprType::Scalar, typename SrcXprType::Scalar > &)
ConstSelfAdjointViewReturnType< UpLo >::Type selfadjointView() const
SparseSelfAdjointView & rankUpdate(const SparseMatrixBase< DerivedU > &u, const Scalar &alpha=Scalar(1))
MatrixType::StorageIndex StorageIndex
static void run(DynamicSparseMatrix< DestScalar, ColMajor, StorageIndex > &dst, const SrcXprType &src, const AssignOpType &)
static void run(SparseMatrix< DestScalar, StorageOrder, StorageIndex > &dst, const SrcXprType &src, const internal::sub_assign_op< typename DstXprType::Scalar, typename SrcXprType::Scalar > &)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void call_assignment_no_alias_no_transpose(Dst &dst, const Src &src, const Func &func)
SparseSymmetricPermutationProduct< _MatrixTypeNested, Mode > twistedBy(const PermutationMatrix< Dynamic, Dynamic, StorageIndex > &perm) const
Matrix< StorageIndex, Dynamic, 1 > VectorI
internal::assign_op< typename DstXprType::Scalar, typename SrcXprType::Scalar > AssignOpType
const Derived & derived() const
SparseSelfAdjointView & operator=(const SparseSelfAdjointView< SrcMatrixType, SrcMode > &src)
Determines whether the given binary operation of two numeric types is allowed and what the scalar ret...
Definition: XprHelper.h:801
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const LhsNestedCleaned & lhs() const
Definition: Product.h:107
Generic expression where a coefficient-wise unary operator is applied to an expression.
Definition: CwiseUnaryOp.h:55
internal::remove_all< MatrixTypeNested >::type NestedExpression
Map< Matrix< T, Dynamic, Dynamic, ColMajor >, 0, OuterStride<> > matrix(T *data, int rows, int cols, int stride)
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const RhsNestedCleaned & rhs() const
Definition: Product.h:109
Base class for all dense matrices, vectors, and expressions.
Definition: MatrixBase.h:48
std::ptrdiff_t j
void resize(Index rows, Index cols)
#define EIGEN_ONLY_USED_FOR_DEBUG(x)
Definition: Macros.h:1049


gtsam
Author(s):
autogenerated on Tue Jul 4 2023 02:36:10