SparseSelfAdjointView.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2009 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11 #define EIGEN_SPARSE_SELFADJOINTVIEW_H
12 
13 namespace Eigen {
14 
29 template<typename Lhs, typename Rhs, int UpLo>
31 
32 template<typename Lhs, typename Rhs, int UpLo>
34 
35 namespace internal {
36 
37 template<typename MatrixType, unsigned int UpLo>
38 struct traits<SparseSelfAdjointView<MatrixType,UpLo> > : traits<MatrixType> {
39 };
40 
41 template<int SrcUpLo,int DstUpLo,typename MatrixType,int DestOrder>
42 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
43 
44 template<int UpLo,typename MatrixType,int DestOrder>
45 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm = 0);
46 
47 }
48 
49 template<typename MatrixType, unsigned int UpLo> class SparseSelfAdjointView
50  : public EigenBase<SparseSelfAdjointView<MatrixType,UpLo> >
51 {
52  public:
53 
54  typedef typename MatrixType::Scalar Scalar;
55  typedef typename MatrixType::Index Index;
57  typedef typename MatrixType::Nested MatrixTypeNested;
59 
60  inline SparseSelfAdjointView(const MatrixType& matrix) : m_matrix(matrix)
61  {
62  eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
63  }
64 
65  inline Index rows() const { return m_matrix.rows(); }
66  inline Index cols() const { return m_matrix.cols(); }
67 
69  const _MatrixTypeNested& matrix() const { return m_matrix; }
70  _MatrixTypeNested& matrix() { return m_matrix.const_cast_derived(); }
71 
77  template<typename OtherDerived>
80  {
82  }
83 
89  template<typename OtherDerived> friend
92  {
94  }
95 
97  template<typename OtherDerived>
100  {
102  }
103 
105  template<typename OtherDerived> friend
108  {
110  }
111 
120  template<typename DerivedU>
121  SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
122 
124  template<typename DestScalar,int StorageOrder> void evalTo(SparseMatrix<DestScalar,StorageOrder,Index>& _dest) const
125  {
126  internal::permute_symm_to_fullsymm<UpLo>(m_matrix, _dest);
127  }
128 
129  template<typename DestScalar> void evalTo(DynamicSparseMatrix<DestScalar,ColMajor,Index>& _dest) const
130  {
131  // TODO directly evaluate into _dest;
132  SparseMatrix<DestScalar,ColMajor,Index> tmp(_dest.rows(),_dest.cols());
133  internal::permute_symm_to_fullsymm<UpLo>(m_matrix, tmp);
134  _dest = tmp;
135  }
136 
139  {
141  }
142 
143  template<typename SrcMatrixType,int SrcUpLo>
145  {
146  permutedMatrix.evalTo(*this);
147  return *this;
148  }
149 
150 
152  {
154  return *this = src.twistedBy(pnull);
155  }
156 
157  template<typename SrcMatrixType,unsigned int SrcUpLo>
159  {
161  return *this = src.twistedBy(pnull);
162  }
163 
164 
165  // const SparseLLT<PlainObject, UpLo> llt() const;
166  // const SparseLDLT<PlainObject, UpLo> ldlt() const;
167 
168  protected:
169 
170  typename MatrixType::Nested m_matrix;
171  mutable VectorI m_countPerRow;
172  mutable VectorI m_countPerCol;
173 };
174 
175 /***************************************************************************
176 * Implementation of SparseMatrixBase methods
177 ***************************************************************************/
178 
179 template<typename Derived>
180 template<unsigned int UpLo>
182 {
183  return derived();
184 }
185 
186 template<typename Derived>
187 template<unsigned int UpLo>
189 {
190  return derived();
191 }
192 
193 /***************************************************************************
194 * Implementation of SparseSelfAdjointView methods
195 ***************************************************************************/
196 
197 template<typename MatrixType, unsigned int UpLo>
198 template<typename DerivedU>
201 {
203  if(alpha==Scalar(0))
204  m_matrix.const_cast_derived() = tmp.template triangularView<UpLo>();
205  else
206  m_matrix.const_cast_derived() += alpha * tmp.template triangularView<UpLo>();
207 
208  return *this;
209 }
210 
211 /***************************************************************************
212 * Implementation of sparse self-adjoint time dense matrix
213 ***************************************************************************/
214 
215 namespace internal {
216 template<typename Lhs, typename Rhs, int UpLo>
218  : traits<ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
219 {
221 };
222 }
223 
224 template<typename Lhs, typename Rhs, int UpLo>
226  : public ProductBase<SparseSelfAdjointTimeDenseProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
227 {
228  public:
230 
231  SparseSelfAdjointTimeDenseProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
232  {}
233 
234  template<typename Dest> void scaleAndAddTo(Dest& dest, const Scalar& alpha) const
235  {
237  // TODO use alpha
238  eigen_assert(alpha==Scalar(1) && "alpha != 1 is not implemented yet, sorry");
239  typedef typename internal::remove_all<Lhs>::type _Lhs;
240  typedef typename _Lhs::InnerIterator LhsInnerIterator;
241  enum {
242  LhsIsRowMajor = (_Lhs::Flags&RowMajorBit)==RowMajorBit,
243  ProcessFirstHalf =
244  ((UpLo&(Upper|Lower))==(Upper|Lower))
245  || ( (UpLo&Upper) && !LhsIsRowMajor)
246  || ( (UpLo&Lower) && LhsIsRowMajor),
247  ProcessSecondHalf = !ProcessFirstHalf
248  };
249  for (Index j=0; j<m_lhs.outerSize(); ++j)
250  {
251  LhsInnerIterator i(m_lhs,j);
252  if (ProcessSecondHalf)
253  {
254  while (i && i.index()<j) ++i;
255  if(i && i.index()==j)
256  {
257  dest.row(j) += i.value() * m_rhs.row(j);
258  ++i;
259  }
260  }
261  for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
262  {
263  Index a = LhsIsRowMajor ? j : i.index();
264  Index b = LhsIsRowMajor ? i.index() : j;
265  typename Lhs::Scalar v = i.value();
266  dest.row(a) += (v) * m_rhs.row(b);
267  dest.row(b) += numext::conj(v) * m_rhs.row(a);
268  }
269  if (ProcessFirstHalf && i && (i.index()==j))
270  dest.row(j) += i.value() * m_rhs.row(j);
271  }
272  }
273 
274  private:
276 };
277 
278 namespace internal {
279 template<typename Lhs, typename Rhs, int UpLo>
281  : traits<ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs> >
282 {};
283 }
284 
285 template<typename Lhs, typename Rhs, int UpLo>
287  : public ProductBase<DenseTimeSparseSelfAdjointProduct<Lhs,Rhs,UpLo>, Lhs, Rhs>
288 {
289  public:
291 
292  DenseTimeSparseSelfAdjointProduct(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs)
293  {}
294 
295  template<typename Dest> void scaleAndAddTo(Dest& /*dest*/, const Scalar& /*alpha*/) const
296  {
297  // TODO
298  }
299 
300  private:
302 };
303 
304 /***************************************************************************
305 * Implementation of symmetric copies and permutations
306 ***************************************************************************/
307 namespace internal {
308 
309 template<typename MatrixType, int UpLo>
310 struct traits<SparseSymmetricPermutationProduct<MatrixType,UpLo> > : traits<MatrixType> {
311 };
312 
313 template<int UpLo,typename MatrixType,int DestOrder>
314 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
315 {
316  typedef typename MatrixType::Index Index;
317  typedef typename MatrixType::Scalar Scalar;
319  typedef Matrix<Index,Dynamic,1> VectorI;
320 
321  Dest& dest(_dest.derived());
322  enum {
323  StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
324  };
325 
326  Index size = mat.rows();
327  VectorI count;
328  count.resize(size);
329  count.setZero();
330  dest.resize(size,size);
331  for(Index j = 0; j<size; ++j)
332  {
333  Index jp = perm ? perm[j] : j;
334  for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
335  {
336  Index i = it.index();
337  Index r = it.row();
338  Index c = it.col();
339  Index ip = perm ? perm[i] : i;
340  if(UpLo==(Upper|Lower))
341  count[StorageOrderMatch ? jp : ip]++;
342  else if(r==c)
343  count[ip]++;
344  else if(( UpLo==Lower && r>c) || ( UpLo==Upper && r<c))
345  {
346  count[ip]++;
347  count[jp]++;
348  }
349  }
350  }
351  Index nnz = count.sum();
352 
353  // reserve space
354  dest.resizeNonZeros(nnz);
355  dest.outerIndexPtr()[0] = 0;
356  for(Index j=0; j<size; ++j)
357  dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
358  for(Index j=0; j<size; ++j)
359  count[j] = dest.outerIndexPtr()[j];
360 
361  // copy data
362  for(Index j = 0; j<size; ++j)
363  {
364  for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
365  {
366  Index i = it.index();
367  Index r = it.row();
368  Index c = it.col();
369 
370  Index jp = perm ? perm[j] : j;
371  Index ip = perm ? perm[i] : i;
372 
373  if(UpLo==(Upper|Lower))
374  {
375  Index k = count[StorageOrderMatch ? jp : ip]++;
376  dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
377  dest.valuePtr()[k] = it.value();
378  }
379  else if(r==c)
380  {
381  Index k = count[ip]++;
382  dest.innerIndexPtr()[k] = ip;
383  dest.valuePtr()[k] = it.value();
384  }
385  else if(( (UpLo&Lower)==Lower && r>c) || ( (UpLo&Upper)==Upper && r<c))
386  {
387  if(!StorageOrderMatch)
388  std::swap(ip,jp);
389  Index k = count[jp]++;
390  dest.innerIndexPtr()[k] = ip;
391  dest.valuePtr()[k] = it.value();
392  k = count[ip]++;
393  dest.innerIndexPtr()[k] = jp;
394  dest.valuePtr()[k] = numext::conj(it.value());
395  }
396  }
397  }
398 }
399 
400 template<int _SrcUpLo,int _DstUpLo,typename MatrixType,int DstOrder>
401 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::Index>& _dest, const typename MatrixType::Index* perm)
402 {
403  typedef typename MatrixType::Index Index;
404  typedef typename MatrixType::Scalar Scalar;
406  typedef Matrix<Index,Dynamic,1> VectorI;
407  enum {
408  SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
409  StorageOrderMatch = int(SrcOrder) == int(DstOrder),
410  DstUpLo = DstOrder==RowMajor ? (_DstUpLo==Upper ? Lower : Upper) : _DstUpLo,
411  SrcUpLo = SrcOrder==RowMajor ? (_SrcUpLo==Upper ? Lower : Upper) : _SrcUpLo
412  };
413 
414  Index size = mat.rows();
415  VectorI count(size);
416  count.setZero();
417  dest.resize(size,size);
418  for(Index j = 0; j<size; ++j)
419  {
420  Index jp = perm ? perm[j] : j;
421  for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
422  {
423  Index i = it.index();
424  if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
425  continue;
426 
427  Index ip = perm ? perm[i] : i;
428  count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
429  }
430  }
431  dest.outerIndexPtr()[0] = 0;
432  for(Index j=0; j<size; ++j)
433  dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
434  dest.resizeNonZeros(dest.outerIndexPtr()[size]);
435  for(Index j=0; j<size; ++j)
436  count[j] = dest.outerIndexPtr()[j];
437 
438  for(Index j = 0; j<size; ++j)
439  {
440 
441  for(typename MatrixType::InnerIterator it(mat,j); it; ++it)
442  {
443  Index i = it.index();
444  if((int(SrcUpLo)==int(Lower) && i<j) || (int(SrcUpLo)==int(Upper) && i>j))
445  continue;
446 
447  Index jp = perm ? perm[j] : j;
448  Index ip = perm? perm[i] : i;
449 
450  Index k = count[int(DstUpLo)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
451  dest.innerIndexPtr()[k] = int(DstUpLo)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
452 
453  if(!StorageOrderMatch) std::swap(ip,jp);
454  if( ((int(DstUpLo)==int(Lower) && ip<jp) || (int(DstUpLo)==int(Upper) && ip>jp)))
455  dest.valuePtr()[k] = numext::conj(it.value());
456  else
457  dest.valuePtr()[k] = it.value();
458  }
459  }
460 }
461 
462 }
463 
464 template<typename MatrixType,int UpLo>
466  : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,UpLo> >
467 {
468  public:
469  typedef typename MatrixType::Scalar Scalar;
470  typedef typename MatrixType::Index Index;
471  protected:
473  public:
475  typedef typename MatrixType::Nested MatrixTypeNested;
477 
478  SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
479  : m_matrix(mat), m_perm(perm)
480  {}
481 
482  inline Index rows() const { return m_matrix.rows(); }
483  inline Index cols() const { return m_matrix.cols(); }
484 
485  template<typename DestScalar, int Options, typename DstIndex>
487  {
488 // internal::permute_symm_to_fullsymm<UpLo>(m_matrix,_dest,m_perm.indices().data());
490  internal::permute_symm_to_fullsymm<UpLo>(m_matrix,tmp,m_perm.indices().data());
491  _dest = tmp;
492  }
493 
494  template<typename DestType,unsigned int DestUpLo> void evalTo(SparseSelfAdjointView<DestType,DestUpLo>& dest) const
495  {
496  internal::permute_symm_to_symm<UpLo,DestUpLo>(m_matrix,dest.matrix(),m_perm.indices().data());
497  }
498 
499  protected:
500  MatrixTypeNested m_matrix;
501  const Perm& m_perm;
502 
503 };
504 
505 } // end namespace Eigen
506 
507 #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
const _MatrixTypeNested & matrix() const
void scaleAndAddTo(Dest &, const Scalar &) const
#define EIGEN_PRODUCT_PUBLIC_INTERFACE(Derived)
Definition: ProductBase.h:46
const AdjointReturnType adjoint() const
internal::traits< Derived >::Scalar Scalar
Definition: DenseBase.h:63
#define EIGEN_ONLY_USED_FOR_DEBUG(x)
A versatible sparse matrix representation.
Definition: SparseMatrix.h:85
const SparseSelfAdjointView< Derived, UpLo > selfadjointView() const
void permute_symm_to_symm(const MatrixType &mat, SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::Index > &_dest, const typename MatrixType::Index *perm=0)
internal::traits< Derived >::Index Index
The type of indices.
Definition: DenseBase.h:61
SparseSelfAdjointView(const MatrixType &matrix)
internal::remove_all< MatrixTypeNested >::type _MatrixTypeNested
PermutationMatrix< Dynamic, Dynamic, Index > Perm
Matrix< Index, Dynamic, 1 > VectorI
Definition: LDLT.h:16
Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
void scaleAndAddTo(Dest &dest, const Scalar &alpha) const
const unsigned int RowMajorBit
Definition: Constants.h:53
SparseSelfAdjointTimeDenseProduct< MatrixType, OtherDerived, UpLo > operator*(const MatrixBase< OtherDerived > &rhs) const
EIGEN_STRONG_INLINE Index rows() const
friend DenseTimeSparseSelfAdjointProduct< OtherDerived, MatrixType, UpLo > operator*(const MatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
Base class of any sparse matrices or sparse expressions.
SparseSymmetricPermutationProduct(const MatrixType &mat, const Perm &perm)
SparseSparseProduct< typename OtherDerived::PlainObject, OtherDerived > operator*(const SparseMatrixBase< OtherDerived > &rhs) const
SparseSelfAdjointView & operator=(const SparseSymmetricPermutationProduct< SrcMatrixType, SrcUpLo > &permutedMatrix)
SparseSelfAdjointView & rankUpdate(const SparseMatrixBase< DerivedU > &u, const Scalar &alpha=Scalar(1))
SparseSelfAdjointView & operator=(const SparseSelfAdjointView< SrcMatrixType, SrcUpLo > &src)
SparseSymmetricPermutationProduct< _MatrixTypeNested, UpLo > twistedBy(const PermutationMatrix< Dynamic, Dynamic, Index > &perm) const
SparseSelfAdjointView & operator=(const SparseSelfAdjointView &src)
friend SparseSparseProduct< OtherDerived, typename OtherDerived::PlainObject > operator*(const SparseMatrixBase< OtherDerived > &lhs, const SparseSelfAdjointView &rhs)
const Derived & derived() const
void evalTo(SparseSelfAdjointView< DestType, DestUpLo > &dest) const
void evalTo(SparseMatrix< DestScalar, StorageOrder, Index > &_dest) const
internal::remove_all< MatrixTypeNested >::type _MatrixTypeNested
#define eigen_assert(x)
void evalTo(DynamicSparseMatrix< DestScalar, ColMajor, Index > &_dest) const
Base class for all dense matrices, vectors, and expressions.
Definition: MatrixBase.h:48
void permute_symm_to_fullsymm(const MatrixType &mat, SparseMatrix< typename MatrixType::Scalar, DestOrder, typename MatrixType::Index > &_dest, const typename MatrixType::Index *perm=0)
SparseMatrix< _Scalar, _Options, _Index > & const_cast_derived() const
void evalTo(SparseMatrix< DestScalar, Options, DstIndex > &_dest) const


tuw_aruco
Author(s): Lukas Pfeifhofer
autogenerated on Mon Jun 10 2019 15:40:59