BDCSVD.h
Go to the documentation of this file.
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 // 
00004 // We used the "A Divide-And-Conquer Algorithm for the Bidiagonal SVD"
00005 // research report written by Ming Gu and Stanley C.Eisenstat
00006 // The code variable names correspond to the names they used in their 
00007 // report
00008 //
00009 // Copyright (C) 2013 Gauthier Brun <brun.gauthier@gmail.com>
00010 // Copyright (C) 2013 Nicolas Carre <nicolas.carre@ensimag.fr>
00011 // Copyright (C) 2013 Jean Ceccato <jean.ceccato@ensimag.fr>
00012 // Copyright (C) 2013 Pierre Zoppitelli <pierre.zoppitelli@ensimag.fr>
00013 //
00014 // Source Code Form is subject to the terms of the Mozilla
00015 // Public License v. 2.0. If a copy of the MPL was not distributed
00016 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00017 
00018 #ifndef EIGEN_BDCSVD_H
00019 #define EIGEN_BDCSVD_H
00020 
00021 #define EPSILON 0.0000000000000001
00022 
00023 #define ALGOSWAP 32
00024 
00025 namespace Eigen {
00037 template<typename _MatrixType> 
00038 class BDCSVD : public SVDBase<_MatrixType>
00039 {
00040   typedef SVDBase<_MatrixType> Base;
00041     
00042 public:
00043   using Base::rows;
00044   using Base::cols;
00045   
00046   typedef _MatrixType MatrixType;
00047   typedef typename MatrixType::Scalar Scalar;
00048   typedef typename NumTraits<typename MatrixType::Scalar>::Real RealScalar;
00049   typedef typename MatrixType::Index Index;
00050   enum {
00051     RowsAtCompileTime = MatrixType::RowsAtCompileTime, 
00052     ColsAtCompileTime = MatrixType::ColsAtCompileTime, 
00053     DiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_DYNAMIC(RowsAtCompileTime, ColsAtCompileTime), 
00054     MaxRowsAtCompileTime = MatrixType::MaxRowsAtCompileTime, 
00055     MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime, 
00056     MaxDiagSizeAtCompileTime = EIGEN_SIZE_MIN_PREFER_FIXED(MaxRowsAtCompileTime, MaxColsAtCompileTime), 
00057     MatrixOptions = MatrixType::Options
00058   };
00059 
00060   typedef Matrix<Scalar, RowsAtCompileTime, RowsAtCompileTime, 
00061                  MatrixOptions, MaxRowsAtCompileTime, MaxRowsAtCompileTime>
00062   MatrixUType;
00063   typedef Matrix<Scalar, ColsAtCompileTime, ColsAtCompileTime, 
00064                  MatrixOptions, MaxColsAtCompileTime, MaxColsAtCompileTime>
00065   MatrixVType;
00066   typedef typename internal::plain_diag_type<MatrixType, RealScalar>::type SingularValuesType;
00067   typedef typename internal::plain_row_type<MatrixType>::type RowType;
00068   typedef typename internal::plain_col_type<MatrixType>::type ColType;
00069   typedef Matrix<Scalar, Dynamic, Dynamic> MatrixX;
00070   typedef Matrix<RealScalar, Dynamic, Dynamic> MatrixXr;
00071   typedef Matrix<RealScalar, Dynamic, 1> VectorType;
00072 
00078   BDCSVD()
00079     : SVDBase<_MatrixType>::SVDBase(), 
00080       algoswap(ALGOSWAP)
00081   {}
00082 
00083 
00090   BDCSVD(Index rows, Index cols, unsigned int computationOptions = 0)
00091     : SVDBase<_MatrixType>::SVDBase(), 
00092       algoswap(ALGOSWAP)
00093   {
00094     allocate(rows, cols, computationOptions);
00095   }
00096 
00107   BDCSVD(const MatrixType& matrix, unsigned int computationOptions = 0)
00108     : SVDBase<_MatrixType>::SVDBase(), 
00109       algoswap(ALGOSWAP)
00110   {
00111     compute(matrix, computationOptions);
00112   }
00113 
00114   ~BDCSVD() 
00115   {
00116   }
00127   SVDBase<MatrixType>& compute(const MatrixType& matrix, unsigned int computationOptions);
00128 
00135   SVDBase<MatrixType>& compute(const MatrixType& matrix)
00136   {
00137     return compute(matrix, this->m_computationOptions);
00138   }
00139 
00140   void setSwitchSize(int s) 
00141   {
00142     eigen_assert(s>3 && "BDCSVD the size of the algo switch has to be greater than 4");
00143     algoswap = s;
00144   }
00145 
00146 
00156   template<typename Rhs>
00157   inline const internal::solve_retval<BDCSVD, Rhs>
00158   solve(const MatrixBase<Rhs>& b) const
00159   {
00160     eigen_assert(this->m_isInitialized && "BDCSVD is not initialized.");
00161     eigen_assert(SVDBase<_MatrixType>::computeU() && SVDBase<_MatrixType>::computeV() && 
00162                  "BDCSVD::solve() requires both unitaries U and V to be computed (thin unitaries suffice).");
00163     return internal::solve_retval<BDCSVD, Rhs>(*this, b.derived());
00164   }
00165 
00166  
00167   const MatrixUType& matrixU() const
00168   {
00169     eigen_assert(this->m_isInitialized && "SVD is not initialized.");
00170     if (isTranspose){
00171       eigen_assert(this->computeV() && "This SVD decomposition didn't compute U. Did you ask for it?");
00172       return this->m_matrixV;
00173     }
00174     else 
00175     {
00176       eigen_assert(this->computeU() && "This SVD decomposition didn't compute U. Did you ask for it?");
00177       return this->m_matrixU;
00178     }
00179      
00180   }
00181 
00182 
00183   const MatrixVType& matrixV() const
00184   {
00185     eigen_assert(this->m_isInitialized && "SVD is not initialized.");
00186     if (isTranspose){
00187       eigen_assert(this->computeU() && "This SVD decomposition didn't compute V. Did you ask for it?");
00188       return this->m_matrixU;
00189     }
00190     else
00191     {
00192       eigen_assert(this->computeV() && "This SVD decomposition didn't compute V. Did you ask for it?");
00193       return this->m_matrixV;
00194     }
00195   }
00196  
00197 private:
00198   void allocate(Index rows, Index cols, unsigned int computationOptions);
00199   void divide (Index firstCol, Index lastCol, Index firstRowW, 
00200                Index firstColW, Index shift);
00201   void deflation43(Index firstCol, Index shift, Index i, Index size);
00202   void deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size);
00203   void deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift);
00204   void copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX houseHolderV);
00205 
00206 protected:
00207   MatrixXr m_naiveU, m_naiveV;
00208   MatrixXr m_computed;
00209   Index nRec;
00210   int algoswap;
00211   bool isTranspose, compU, compV;
00212   
00213 }; //end class BDCSVD
00214 
00215 
00216 // Methode to allocate ans initialize matrix and attributs
00217 template<typename MatrixType>
00218 void BDCSVD<MatrixType>::allocate(Index rows, Index cols, unsigned int computationOptions)
00219 {
00220   isTranspose = (cols > rows);
00221   if (SVDBase<MatrixType>::allocate(rows, cols, computationOptions)) return;
00222   m_computed = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize );
00223   if (isTranspose){
00224     compU = this->computeU();
00225     compV = this->computeV();    
00226   } 
00227   else
00228   {
00229     compV = this->computeU();
00230     compU = this->computeV();   
00231   }
00232   if (compU) m_naiveU = MatrixXr::Zero(this->m_diagSize + 1, this->m_diagSize + 1 );
00233   else m_naiveU = MatrixXr::Zero(2, this->m_diagSize + 1 );
00234   
00235   if (compV) m_naiveV = MatrixXr::Zero(this->m_diagSize, this->m_diagSize);
00236   
00237 
00238   //should be changed for a cleaner implementation
00239   if (isTranspose){
00240     bool aux;
00241     if (this->computeU()||this->computeV()){
00242       aux = this->m_computeFullU;
00243       this->m_computeFullU = this->m_computeFullV;
00244       this->m_computeFullV = aux;
00245       aux = this->m_computeThinU;
00246       this->m_computeThinU = this->m_computeThinV;
00247       this->m_computeThinV = aux;
00248     } 
00249   }
00250 }// end allocate
00251 
00252 // Methode which compute the BDCSVD for the int
00253 template<>
00254 SVDBase<Matrix<int, Dynamic, Dynamic> >&
00255 BDCSVD<Matrix<int, Dynamic, Dynamic> >::compute(const MatrixType& matrix, unsigned int computationOptions) {
00256   allocate(matrix.rows(), matrix.cols(), computationOptions);
00257   this->m_nonzeroSingularValues = 0;
00258   m_computed = Matrix<int, Dynamic, Dynamic>::Zero(rows(), cols());
00259   for (int i=0; i<this->m_diagSize; i++)   {
00260     this->m_singularValues.coeffRef(i) = 0;
00261   }
00262   if (this->m_computeFullU) this->m_matrixU = Matrix<int, Dynamic, Dynamic>::Zero(rows(), rows());
00263   if (this->m_computeFullV) this->m_matrixV = Matrix<int, Dynamic, Dynamic>::Zero(cols(), cols()); 
00264   this->m_isInitialized = true;
00265   return *this;
00266 }
00267 
00268 
00269 // Methode which compute the BDCSVD
00270 template<typename MatrixType>
00271 SVDBase<MatrixType>&
00272 BDCSVD<MatrixType>::compute(const MatrixType& matrix, unsigned int computationOptions) 
00273 {
00274   allocate(matrix.rows(), matrix.cols(), computationOptions);
00275   using std::abs;
00276 
00277   //**** step 1 Bidiagonalization  isTranspose = (matrix.cols()>matrix.rows()) ;
00278   MatrixType copy;
00279   if (isTranspose) copy = matrix.adjoint();
00280   else copy = matrix;
00281   
00282   internal::UpperBidiagonalization<MatrixX > bid(copy);
00283 
00284   //**** step 2 Divide
00285   // this is ugly and has to be redone (care of complex cast)
00286   MatrixXr temp;
00287   temp = bid.bidiagonal().toDenseMatrix().transpose();
00288   m_computed.setZero();
00289   for (int i=0; i<this->m_diagSize - 1; i++)   {
00290     m_computed(i, i) = temp(i, i);
00291     m_computed(i + 1, i) = temp(i + 1, i);
00292   }
00293   m_computed(this->m_diagSize - 1, this->m_diagSize - 1) = temp(this->m_diagSize - 1, this->m_diagSize - 1);
00294   divide(0, this->m_diagSize - 1, 0, 0, 0);
00295 
00296   //**** step 3 copy
00297   for (int i=0; i<this->m_diagSize; i++)   {
00298     RealScalar a = abs(m_computed.coeff(i, i));
00299     this->m_singularValues.coeffRef(i) = a;
00300     if (a == 0){
00301       this->m_nonzeroSingularValues = i;
00302       break;
00303     }
00304     else  if (i == this->m_diagSize - 1)
00305     {
00306       this->m_nonzeroSingularValues = i + 1;
00307       break;
00308     }
00309   }
00310   copyUV(m_naiveV, m_naiveU, bid.householderU(), bid.householderV());
00311   this->m_isInitialized = true;
00312   return *this;
00313 }// end compute
00314 
00315 
00316 template<typename MatrixType>
00317 void BDCSVD<MatrixType>::copyUV(MatrixXr naiveU, MatrixXr naiveV, MatrixX householderU, MatrixX householderV){
00318   if (this->computeU()){
00319     MatrixX temp = MatrixX::Zero(naiveU.rows(), naiveU.cols());
00320     temp.real() = naiveU;
00321     if (this->m_computeThinU){
00322       this->m_matrixU = MatrixX::Identity(householderU.cols(), this->m_nonzeroSingularValues );
00323       this->m_matrixU.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues) = 
00324         temp.block(0, 0, this->m_diagSize, this->m_nonzeroSingularValues);
00325       this->m_matrixU = householderU * this->m_matrixU ;
00326     }
00327     else
00328     {
00329       this->m_matrixU = MatrixX::Identity(householderU.cols(), householderU.cols());
00330       this->m_matrixU.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
00331       this->m_matrixU = householderU * this->m_matrixU ;
00332     }
00333   }
00334   if (this->computeV()){
00335     MatrixX temp = MatrixX::Zero(naiveV.rows(), naiveV.cols());
00336     temp.real() = naiveV;
00337     if (this->m_computeThinV){
00338       this->m_matrixV = MatrixX::Identity(householderV.cols(),this->m_nonzeroSingularValues );
00339       this->m_matrixV.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues) = 
00340         temp.block(0, 0, this->m_nonzeroSingularValues, this->m_nonzeroSingularValues);
00341       this->m_matrixV = householderV * this->m_matrixV ;
00342     }
00343     else  
00344     {
00345       this->m_matrixV = MatrixX::Identity(householderV.cols(), householderV.cols());
00346       this->m_matrixV.block(0, 0, this->m_diagSize, this->m_diagSize) = temp.block(0, 0, this->m_diagSize, this->m_diagSize);
00347       this->m_matrixV = householderV * this->m_matrixV;
00348     }
00349   }
00350 }
00351 
00352 // The divide algorithm is done "in place", we are always working on subsets of the same matrix. The divide methods takes as argument the 
00353 // place of the submatrix we are currently working on.
00354 
00355 //@param firstCol : The Index of the first column of the submatrix of m_computed and for m_naiveU;
00356 //@param lastCol : The Index of the last column of the submatrix of m_computed and for m_naiveU; 
00357 // lastCol + 1 - firstCol is the size of the submatrix.
00358 //@param firstRowW : The Index of the first row of the matrix W that we are to change. (see the reference paper section 1 for more information on W)
00359 //@param firstRowW : Same as firstRowW with the column.
00360 //@param shift : Each time one takes the left submatrix, one must add 1 to the shift. Why? Because! We actually want the last column of the U submatrix 
00361 // to become the first column (*coeff) and to shift all the other columns to the right. There are more details on the reference paper.
00362 template<typename MatrixType>
00363 void BDCSVD<MatrixType>::divide (Index firstCol, Index lastCol, Index firstRowW, 
00364                                  Index firstColW, Index shift)
00365 {
00366   // requires nbRows = nbCols + 1;
00367   using std::pow;
00368   using std::sqrt;
00369   using std::abs;
00370   const Index n = lastCol - firstCol + 1;
00371   const Index k = n/2;
00372   RealScalar alphaK;
00373   RealScalar betaK; 
00374   RealScalar r0; 
00375   RealScalar lambda, phi, c0, s0;
00376   MatrixXr l, f;
00377   // We use the other algorithm which is more efficient for small 
00378   // matrices.
00379   if (n < algoswap){
00380     JacobiSVD<MatrixXr> b(m_computed.block(firstCol, firstCol, n + 1, n), 
00381                           ComputeFullU | (ComputeFullV * compV)) ;
00382     if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1).real() << b.matrixU();
00383     else 
00384     {
00385       m_naiveU.row(0).segment(firstCol, n + 1).real() << b.matrixU().row(0);
00386       m_naiveU.row(1).segment(firstCol, n + 1).real() << b.matrixU().row(n);
00387     }
00388     if (compV) m_naiveV.block(firstRowW, firstColW, n, n).real() << b.matrixV();
00389     m_computed.block(firstCol + shift, firstCol + shift, n + 1, n).setZero();
00390     for (int i=0; i<n; i++)
00391     {
00392       m_computed(firstCol + shift + i, firstCol + shift +i) = b.singularValues().coeffRef(i);
00393     }
00394     return;
00395   }
00396   // We use the divide and conquer algorithm
00397   alphaK =  m_computed(firstCol + k, firstCol + k);
00398   betaK = m_computed(firstCol + k + 1, firstCol + k);
00399   // The divide must be done in that order in order to have good results. Divide change the data inside the submatrices
00400   // and the divide of the right submatrice reads one column of the left submatrice. That's why we need to treat the 
00401   // right submatrix before the left one. 
00402   divide(k + 1 + firstCol, lastCol, k + 1 + firstRowW, k + 1 + firstColW, shift);
00403   divide(firstCol, k - 1 + firstCol, firstRowW, firstColW + 1, shift + 1);
00404   if (compU)
00405   {
00406     lambda = m_naiveU(firstCol + k, firstCol + k);
00407     phi = m_naiveU(firstCol + k + 1, lastCol + 1);
00408   } 
00409   else 
00410   {
00411     lambda = m_naiveU(1, firstCol + k);
00412     phi = m_naiveU(0, lastCol + 1);
00413   }
00414   r0 = sqrt((abs(alphaK * lambda) * abs(alphaK * lambda))
00415             + abs(betaK * phi) * abs(betaK * phi));
00416   if (compU)
00417   {
00418     l = m_naiveU.row(firstCol + k).segment(firstCol, k);
00419     f = m_naiveU.row(firstCol + k + 1).segment(firstCol + k + 1, n - k - 1);
00420   } 
00421   else 
00422   {
00423     l = m_naiveU.row(1).segment(firstCol, k);
00424     f = m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1);
00425   }
00426   if (compV) m_naiveV(firstRowW+k, firstColW) = 1;
00427   if (r0 == 0)
00428   {
00429     c0 = 1;
00430     s0 = 0;
00431   }
00432   else
00433   {
00434     c0 = alphaK * lambda / r0;
00435     s0 = betaK * phi / r0;
00436   }
00437   if (compU)
00438   {
00439     MatrixXr q1 (m_naiveU.col(firstCol + k).segment(firstCol, k + 1));     
00440     // we shiftW Q1 to the right
00441     for (Index i = firstCol + k - 1; i >= firstCol; i--) 
00442     {
00443       m_naiveU.col(i + 1).segment(firstCol, k + 1) << m_naiveU.col(i).segment(firstCol, k + 1);
00444     }
00445     // we shift q1 at the left with a factor c0
00446     m_naiveU.col(firstCol).segment( firstCol, k + 1) << (q1 * c0);
00447     // last column = q1 * - s0
00448     m_naiveU.col(lastCol + 1).segment(firstCol, k + 1) << (q1 * ( - s0));
00449     // first column = q2 * s0
00450     m_naiveU.col(firstCol).segment(firstCol + k + 1, n - k) << 
00451       m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *s0; 
00452     // q2 *= c0
00453     m_naiveU.col(lastCol + 1).segment(firstCol + k + 1, n - k) *= c0; 
00454   } 
00455   else 
00456   {
00457     RealScalar q1 = (m_naiveU(0, firstCol + k));
00458     // we shift Q1 to the right
00459     for (Index i = firstCol + k - 1; i >= firstCol; i--) 
00460     {
00461       m_naiveU(0, i + 1) = m_naiveU(0, i);
00462     }
00463     // we shift q1 at the left with a factor c0
00464     m_naiveU(0, firstCol) = (q1 * c0);
00465     // last column = q1 * - s0
00466     m_naiveU(0, lastCol + 1) = (q1 * ( - s0));
00467     // first column = q2 * s0
00468     m_naiveU(1, firstCol) = m_naiveU(1, lastCol + 1) *s0; 
00469     // q2 *= c0
00470     m_naiveU(1, lastCol + 1) *= c0;
00471     m_naiveU.row(1).segment(firstCol + 1, k).setZero();
00472     m_naiveU.row(0).segment(firstCol + k + 1, n - k - 1).setZero();
00473   }
00474   m_computed(firstCol + shift, firstCol + shift) = r0;
00475   m_computed.col(firstCol + shift).segment(firstCol + shift + 1, k) << alphaK * l.transpose().real();
00476   m_computed.col(firstCol + shift).segment(firstCol + shift + k + 1, n - k - 1) << betaK * f.transpose().real();
00477 
00478 
00479   // the line below do the deflation of the matrix for the third part of the algorithm
00480   // Here the deflation is commented because the third part of the algorithm is not implemented
00481   // the third part of the algorithm is a fast SVD on the matrix m_computed which works thanks to the deflation
00482 
00483   deflation(firstCol, lastCol, k, firstRowW, firstColW, shift);
00484 
00485   // Third part of the algorithm, since the real third part of the algorithm is not implemeted we use a JacobiSVD
00486   JacobiSVD<MatrixXr> res= JacobiSVD<MatrixXr>(m_computed.block(firstCol + shift, firstCol +shift, n + 1, n), 
00487                                                ComputeFullU | (ComputeFullV * compV)) ;
00488   if (compU) m_naiveU.block(firstCol, firstCol, n + 1, n + 1) *= res.matrixU();
00489   else m_naiveU.block(0, firstCol, 2, n + 1) *= res.matrixU();
00490   
00491   if (compV) m_naiveV.block(firstRowW, firstColW, n, n) *= res.matrixV();
00492   m_computed.block(firstCol + shift, firstCol + shift, n, n) << MatrixXr::Zero(n, n);
00493   for (int i=0; i<n; i++)
00494     m_computed(firstCol + shift + i, firstCol + shift +i) = res.singularValues().coeffRef(i);
00495   // end of the third part
00496 
00497 
00498 }// end divide
00499 
00500 
00501 // page 12_13
00502 // i >= 1, di almost null and zi non null.
00503 // We use a rotation to zero out zi applied to the left of M
00504 template <typename MatrixType>
00505 void BDCSVD<MatrixType>::deflation43(Index firstCol, Index shift, Index i, Index size){
00506   using std::abs;
00507   using std::sqrt;
00508   using std::pow;
00509   RealScalar c = m_computed(firstCol + shift, firstCol + shift);
00510   RealScalar s = m_computed(i, firstCol + shift);
00511   RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
00512   if (r == 0){
00513     m_computed(i, i)=0;
00514     return;
00515   }
00516   c/=r;
00517   s/=r;
00518   m_computed(firstCol + shift, firstCol + shift) = r;  
00519   m_computed(i, firstCol + shift) = 0;
00520   m_computed(i, i) = 0;
00521   if (compU){
00522     m_naiveU.col(firstCol).segment(firstCol,size) = 
00523       c * m_naiveU.col(firstCol).segment(firstCol, size) - 
00524       s * m_naiveU.col(i).segment(firstCol, size) ;
00525 
00526     m_naiveU.col(i).segment(firstCol, size) = 
00527       (c + s*s/c) * m_naiveU.col(i).segment(firstCol, size) + 
00528       (s/c) * m_naiveU.col(firstCol).segment(firstCol,size);
00529   }
00530 }// end deflation 43
00531 
00532 
00533 // page 13
00534 // i,j >= 1, i != j and |di - dj| < epsilon * norm2(M)
00535 // We apply two rotations to have zj = 0;
00536 template <typename MatrixType>
00537 void BDCSVD<MatrixType>::deflation44(Index firstColu , Index firstColm, Index firstRowW, Index firstColW, Index i, Index j, Index size){
00538   using std::abs;
00539   using std::sqrt;
00540   using std::conj;
00541   using std::pow;
00542   RealScalar c = m_computed(firstColm, firstColm + j - 1);
00543   RealScalar s = m_computed(firstColm, firstColm + i - 1);
00544   RealScalar r = sqrt(pow(abs(c), 2) + pow(abs(s), 2));
00545   if (r==0){
00546     m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
00547     return;
00548   }
00549   c/=r;
00550   s/=r;
00551   m_computed(firstColm + i, firstColm) = r;  
00552   m_computed(firstColm + i, firstColm + i) = m_computed(firstColm + j, firstColm + j);
00553   m_computed(firstColm + j, firstColm) = 0;
00554   if (compU){
00555     m_naiveU.col(firstColu + i).segment(firstColu, size) = 
00556       c * m_naiveU.col(firstColu + i).segment(firstColu, size) - 
00557       s * m_naiveU.col(firstColu + j).segment(firstColu, size) ;
00558 
00559     m_naiveU.col(firstColu + j).segment(firstColu, size) = 
00560       (c + s*s/c) *  m_naiveU.col(firstColu + j).segment(firstColu, size) + 
00561       (s/c) * m_naiveU.col(firstColu + i).segment(firstColu, size);
00562   } 
00563   if (compV){
00564     m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) = 
00565       c * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1) + 
00566       s * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) ;
00567 
00568     m_naiveV.col(firstColW + j).segment(firstRowW, size - 1)  = 
00569       (c + s*s/c) * m_naiveV.col(firstColW + j).segment(firstRowW, size - 1) - 
00570       (s/c) * m_naiveV.col(firstColW + i).segment(firstRowW, size - 1);
00571   }
00572 }// end deflation 44
00573 
00574 
00575 
00576 template <typename MatrixType>
00577 void BDCSVD<MatrixType>::deflation(Index firstCol, Index lastCol, Index k, Index firstRowW, Index firstColW, Index shift){
00578   //condition 4.1
00579   RealScalar EPS = EPSILON * (std::max<RealScalar>(m_computed(firstCol + shift + 1, firstCol + shift + 1), m_computed(firstCol + k, firstCol + k)));
00580   const Index length = lastCol + 1 - firstCol;
00581   if (m_computed(firstCol + shift, firstCol + shift) < EPS){
00582     m_computed(firstCol + shift, firstCol + shift) = EPS;
00583   }
00584   //condition 4.2
00585   for (Index i=firstCol + shift + 1;i<=lastCol + shift;i++){
00586     if (std::abs(m_computed(i, firstCol + shift)) < EPS){
00587       m_computed(i, firstCol + shift) = 0;
00588     }
00589   }
00590 
00591   //condition 4.3
00592   for (Index i=firstCol + shift + 1;i<=lastCol + shift; i++){
00593     if (m_computed(i, i) < EPS){
00594       deflation43(firstCol, shift, i, length);
00595     }
00596   }
00597 
00598   //condition 4.4
00599  
00600   Index i=firstCol + shift + 1, j=firstCol + shift + k + 1;
00601   //we stock the final place of each line
00602   Index *permutation = new Index[length];
00603 
00604   for (Index p =1; p < length; p++) {
00605     if (i> firstCol + shift + k){
00606       permutation[p] = j;
00607       j++;
00608     } else if (j> lastCol + shift) 
00609     {
00610       permutation[p] = i;
00611       i++;
00612     }
00613     else 
00614     {
00615       if (m_computed(i, i) < m_computed(j, j)){
00616         permutation[p] = j;
00617         j++;
00618       } 
00619       else
00620       {
00621         permutation[p] = i;
00622         i++;
00623       }
00624     }
00625   }
00626   //we do the permutation
00627   RealScalar aux;
00628   //we stock the current index of each col
00629   //and the column of each index
00630   Index *realInd = new Index[length];
00631   Index *realCol = new Index[length];
00632   for (int pos = 0; pos< length; pos++){
00633     realCol[pos] = pos + firstCol + shift;
00634     realInd[pos] = pos;
00635   }
00636   const Index Zero = firstCol + shift;
00637   VectorType temp;
00638   for (int i = 1; i < length - 1; i++){
00639     const Index I = i + Zero;
00640     const Index realI = realInd[i];
00641     const Index j  = permutation[length - i] - Zero;
00642     const Index J = realCol[j];
00643     
00644     //diag displace
00645     aux = m_computed(I, I); 
00646     m_computed(I, I) = m_computed(J, J);
00647     m_computed(J, J) = aux;
00648     
00649     //firstrow displace
00650     aux = m_computed(I, Zero); 
00651     m_computed(I, Zero) = m_computed(J, Zero);
00652     m_computed(J, Zero) = aux;
00653 
00654     // change columns
00655     if (compU) {
00656       temp = m_naiveU.col(I - shift).segment(firstCol, length + 1);
00657       m_naiveU.col(I - shift).segment(firstCol, length + 1) << 
00658         m_naiveU.col(J - shift).segment(firstCol, length + 1);
00659       m_naiveU.col(J - shift).segment(firstCol, length + 1) << temp;
00660     } 
00661     else
00662     {
00663       temp = m_naiveU.col(I - shift).segment(0, 2);
00664       m_naiveU.col(I - shift).segment(0, 2) << 
00665         m_naiveU.col(J - shift).segment(0, 2);
00666       m_naiveU.col(J - shift).segment(0, 2) << temp;      
00667     }
00668     if (compV) {
00669       const Index CWI = I + firstColW - Zero;
00670       const Index CWJ = J + firstColW - Zero;
00671       temp = m_naiveV.col(CWI).segment(firstRowW, length);
00672       m_naiveV.col(CWI).segment(firstRowW, length) << m_naiveV.col(CWJ).segment(firstRowW, length);
00673       m_naiveV.col(CWJ).segment(firstRowW, length) << temp;
00674     }
00675 
00676     //update real pos
00677     realCol[realI] = J;
00678     realCol[j] = I;
00679     realInd[J - Zero] = realI;
00680     realInd[I - Zero] = j;
00681   }
00682   for (Index i = firstCol + shift + 1; i<lastCol + shift;i++){
00683     if ((m_computed(i + 1, i + 1) - m_computed(i, i)) < EPS){
00684       deflation44(firstCol , 
00685                   firstCol + shift, 
00686                   firstRowW, 
00687                   firstColW, 
00688                   i - Zero, 
00689                   i + 1 - Zero, 
00690                   length);
00691     }
00692   }
00693   delete [] permutation;
00694   delete [] realInd;
00695   delete [] realCol;
00696 
00697 }//end deflation
00698 
00699 
00700 namespace internal{
00701 
00702 template<typename _MatrixType, typename Rhs>
00703 struct solve_retval<BDCSVD<_MatrixType>, Rhs>
00704   : solve_retval_base<BDCSVD<_MatrixType>, Rhs>
00705 {
00706   typedef BDCSVD<_MatrixType> BDCSVDType;
00707   EIGEN_MAKE_SOLVE_HELPERS(BDCSVDType, Rhs)
00708 
00709   template<typename Dest> void evalTo(Dest& dst) const
00710   {
00711     eigen_assert(rhs().rows() == dec().rows());
00712     // A = U S V^*
00713     // So A^{ - 1} = V S^{ - 1} U^*    
00714     Index diagSize = (std::min)(dec().rows(), dec().cols());
00715     typename BDCSVDType::SingularValuesType invertedSingVals(diagSize);
00716     Index nonzeroSingVals = dec().nonzeroSingularValues();
00717     invertedSingVals.head(nonzeroSingVals) = dec().singularValues().head(nonzeroSingVals).array().inverse();
00718     invertedSingVals.tail(diagSize - nonzeroSingVals).setZero();
00719     
00720     dst = dec().matrixV().leftCols(diagSize)
00721       * invertedSingVals.asDiagonal()
00722       * dec().matrixU().leftCols(diagSize).adjoint()
00723       * rhs();  
00724     return;
00725   }
00726 };
00727 
00728 } //end namespace internal
00729 
00737 /*
00738 template<typename Derived>
00739 BDCSVD<typename MatrixBase<Derived>::PlainObject>
00740 MatrixBase<Derived>::bdcSvd(unsigned int computationOptions) const
00741 {
00742   return BDCSVD<PlainObject>(*this, computationOptions);
00743 }
00744 */
00745 
00746 } // end namespace Eigen
00747 
00748 #endif


shape_reconstruction
Author(s): Roberto Martín-Martín
autogenerated on Sat Jun 8 2019 18:29:12