SelfadjointMatrixVector.h
Go to the documentation of this file.
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
00005 //
00006 // Eigen is free software; you can redistribute it and/or
00007 // modify it under the terms of the GNU Lesser General Public
00008 // License as published by the Free Software Foundation; either
00009 // version 3 of the License, or (at your option) any later version.
00010 //
00011 // Alternatively, you can redistribute it and/or
00012 // modify it under the terms of the GNU General Public License as
00013 // published by the Free Software Foundation; either version 2 of
00014 // the License, or (at your option) any later version.
00015 //
00016 // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
00017 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00018 // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
00019 // GNU General Public License for more details.
00020 //
00021 // You should have received a copy of the GNU Lesser General Public
00022 // License and a copy of the GNU General Public License along with
00023 // Eigen. If not, see <http://www.gnu.org/licenses/>.
00024 
00025 #ifndef EIGEN_SELFADJOINT_MATRIX_VECTOR_H
00026 #define EIGEN_SELFADJOINT_MATRIX_VECTOR_H
00027 
00028 namespace internal {
00029 
00030 /* Optimized selfadjoint matrix * vector product:
00031  * This algorithm processes 2 columns at onces that allows to both reduce
00032  * the number of load/stores of the result by a factor 2 and to reduce
00033  * the instruction dependency.
00034  */
00035 template<typename Scalar, typename Index, int StorageOrder, int UpLo, bool ConjugateLhs, bool ConjugateRhs>
00036 static EIGEN_DONT_INLINE void product_selfadjoint_vector(
00037   Index size,
00038   const Scalar*  lhs, Index lhsStride,
00039   const Scalar* _rhs, Index rhsIncr,
00040   Scalar* res,
00041   Scalar alpha)
00042 {
00043   typedef typename packet_traits<Scalar>::type Packet;
00044   typedef typename NumTraits<Scalar>::Real RealScalar;
00045   const Index PacketSize = sizeof(Packet)/sizeof(Scalar);
00046 
00047   enum {
00048     IsRowMajor = StorageOrder==RowMajor ? 1 : 0,
00049     IsLower = UpLo == Lower ? 1 : 0,
00050     FirstTriangular = IsRowMajor == IsLower
00051   };
00052 
00053   conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs,  IsRowMajor), ConjugateRhs> cj0;
00054   conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> cj1;
00055   conj_helper<Scalar,Scalar,NumTraits<Scalar>::IsComplex, ConjugateRhs> cjd;
00056 
00057   conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs,  IsRowMajor), ConjugateRhs> pcj0;
00058   conj_helper<Packet,Packet,NumTraits<Scalar>::IsComplex && EIGEN_LOGICAL_XOR(ConjugateLhs, !IsRowMajor), ConjugateRhs> pcj1;
00059 
00060   Scalar cjAlpha = ConjugateRhs ? conj(alpha) : alpha;
00061 
00062   // FIXME this copy is now handled outside product_selfadjoint_vector, so it could probably be removed.
00063   // if the rhs is not sequentially stored in memory we copy it to a temporary buffer,
00064   // this is because we need to extract packets
00065   ei_declare_aligned_stack_constructed_variable(Scalar,rhs,size,rhsIncr==1 ? const_cast<Scalar*>(_rhs) : 0);  
00066   if (rhsIncr!=1)
00067   {
00068     const Scalar* it = _rhs;
00069     for (Index i=0; i<size; ++i, it+=rhsIncr)
00070       rhs[i] = *it;
00071   }
00072 
00073   Index bound = std::max(Index(0),size-8) & 0xfffffffe;
00074   if (FirstTriangular)
00075     bound = size - bound;
00076 
00077   for (Index j=FirstTriangular ? bound : 0;
00078        j<(FirstTriangular ? size : bound);j+=2)
00079   {
00080     register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
00081     register const Scalar* EIGEN_RESTRICT A1 = lhs + (j+1)*lhsStride;
00082 
00083     Scalar t0 = cjAlpha * rhs[j];
00084     Packet ptmp0 = pset1<Packet>(t0);
00085     Scalar t1 = cjAlpha * rhs[j+1];
00086     Packet ptmp1 = pset1<Packet>(t1);
00087 
00088     Scalar t2 = 0;
00089     Packet ptmp2 = pset1<Packet>(t2);
00090     Scalar t3 = 0;
00091     Packet ptmp3 = pset1<Packet>(t3);
00092 
00093     size_t starti = FirstTriangular ? 0 : j+2;
00094     size_t endi   = FirstTriangular ? j : size;
00095     size_t alignedStart = (starti) + first_aligned(&res[starti], endi-starti);
00096     size_t alignedEnd = alignedStart + ((endi-alignedStart)/(PacketSize))*(PacketSize);
00097 
00098     // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
00099     res[j]   += cjd.pmul(internal::real(A0[j]), t0);
00100     res[j+1] += cjd.pmul(internal::real(A1[j+1]), t1);
00101     if(FirstTriangular)
00102     {
00103       res[j]   += cj0.pmul(A1[j],   t1);
00104       t3       += cj1.pmul(A1[j],   rhs[j]);
00105     }
00106     else
00107     {
00108       res[j+1] += cj0.pmul(A0[j+1],t0);
00109       t2 += cj1.pmul(A0[j+1], rhs[j+1]);
00110     }
00111 
00112     for (size_t i=starti; i<alignedStart; ++i)
00113     {
00114       res[i] += t0 * A0[i] + t1 * A1[i];
00115       t2 += conj(A0[i]) * rhs[i];
00116       t3 += conj(A1[i]) * rhs[i];
00117     }
00118     // Yes this an optimization for gcc 4.3 and 4.4 (=> huge speed up)
00119     // gcc 4.2 does this optimization automatically.
00120     const Scalar* EIGEN_RESTRICT a0It  = A0  + alignedStart;
00121     const Scalar* EIGEN_RESTRICT a1It  = A1  + alignedStart;
00122     const Scalar* EIGEN_RESTRICT rhsIt = rhs + alignedStart;
00123           Scalar* EIGEN_RESTRICT resIt = res + alignedStart;
00124     for (size_t i=alignedStart; i<alignedEnd; i+=PacketSize)
00125     {
00126       Packet A0i = ploadu<Packet>(a0It);  a0It  += PacketSize;
00127       Packet A1i = ploadu<Packet>(a1It);  a1It  += PacketSize;
00128       Packet Bi  = ploadu<Packet>(rhsIt); rhsIt += PacketSize; // FIXME should be aligned in most cases
00129       Packet Xi  = pload <Packet>(resIt);
00130 
00131       Xi    = pcj0.pmadd(A0i,ptmp0, pcj0.pmadd(A1i,ptmp1,Xi));
00132       ptmp2 = pcj1.pmadd(A0i,  Bi, ptmp2);
00133       ptmp3 = pcj1.pmadd(A1i,  Bi, ptmp3);
00134       pstore(resIt,Xi); resIt += PacketSize;
00135     }
00136     for (size_t i=alignedEnd; i<endi; i++)
00137     {
00138       res[i] += cj0.pmul(A0[i], t0) + cj0.pmul(A1[i],t1);
00139       t2 += cj1.pmul(A0[i], rhs[i]);
00140       t3 += cj1.pmul(A1[i], rhs[i]);
00141     }
00142 
00143     res[j]   += alpha * (t2 + predux(ptmp2));
00144     res[j+1] += alpha * (t3 + predux(ptmp3));
00145   }
00146   for (Index j=FirstTriangular ? 0 : bound;j<(FirstTriangular ? bound : size);j++)
00147   {
00148     register const Scalar* EIGEN_RESTRICT A0 = lhs + j*lhsStride;
00149 
00150     Scalar t1 = cjAlpha * rhs[j];
00151     Scalar t2 = 0;
00152     // TODO make sure this product is a real * complex and that the rhs is properly conjugated if needed
00153     res[j] += cjd.pmul(internal::real(A0[j]), t1);
00154     for (Index i=FirstTriangular ? 0 : j+1; i<(FirstTriangular ? j : size); i++)
00155     {
00156       res[i] += cj0.pmul(A0[i], t1);
00157       t2 += cj1.pmul(A0[i], rhs[i]);
00158     }
00159     res[j] += alpha * t2;
00160   }
00161 }
00162 
00163 } // end namespace internal 
00164 
00165 /***************************************************************************
00166 * Wrapper to product_selfadjoint_vector
00167 ***************************************************************************/
00168 
00169 namespace internal {
00170 template<typename Lhs, int LhsMode, typename Rhs>
00171 struct traits<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true> >
00172   : traits<ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>, Lhs, Rhs> >
00173 {};
00174 }
00175 
00176 template<typename Lhs, int LhsMode, typename Rhs>
00177 struct SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>
00178   : public ProductBase<SelfadjointProductMatrix<Lhs,LhsMode,false,Rhs,0,true>, Lhs, Rhs >
00179 {
00180   EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
00181 
00182   enum {
00183     LhsUpLo = LhsMode&(Upper|Lower)
00184   };
00185 
00186   SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
00187 
00188   template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
00189   {
00190     typedef typename Dest::Scalar ResScalar;
00191     typedef typename Base::RhsScalar RhsScalar;
00192     typedef Map<Matrix<ResScalar,Dynamic,1>, Aligned> MappedDest;
00193     
00194     eigen_assert(dest.rows()==m_lhs.rows() && dest.cols()==m_rhs.cols());
00195 
00196     const ActualLhsType lhs = LhsBlasTraits::extract(m_lhs);
00197     const ActualRhsType rhs = RhsBlasTraits::extract(m_rhs);
00198 
00199     Scalar actualAlpha = alpha * LhsBlasTraits::extractScalarFactor(m_lhs)
00200                                * RhsBlasTraits::extractScalarFactor(m_rhs);
00201 
00202     enum {
00203       EvalToDest = (Dest::InnerStrideAtCompileTime==1),
00204       UseRhs = (_ActualRhsType::InnerStrideAtCompileTime==1)
00205     };
00206     
00207     internal::gemv_static_vector_if<ResScalar,Dest::SizeAtCompileTime,Dest::MaxSizeAtCompileTime,!EvalToDest> static_dest;
00208     internal::gemv_static_vector_if<RhsScalar,_ActualRhsType::SizeAtCompileTime,_ActualRhsType::MaxSizeAtCompileTime,!UseRhs> static_rhs;
00209 
00210     ei_declare_aligned_stack_constructed_variable(ResScalar,actualDestPtr,dest.size(),
00211                                                   EvalToDest ? dest.data() : static_dest.data());
00212                                                   
00213     ei_declare_aligned_stack_constructed_variable(RhsScalar,actualRhsPtr,rhs.size(),
00214         UseRhs ? const_cast<RhsScalar*>(rhs.data()) : static_rhs.data());
00215     
00216     if(!EvalToDest)
00217     {
00218       #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
00219       int size = dest.size();
00220       EIGEN_DENSE_STORAGE_CTOR_PLUGIN
00221       #endif
00222       MappedDest(actualDestPtr, dest.size()) = dest;
00223     }
00224       
00225     if(!UseRhs)
00226     {
00227       #ifdef EIGEN_DENSE_STORAGE_CTOR_PLUGIN
00228       int size = rhs.size();
00229       EIGEN_DENSE_STORAGE_CTOR_PLUGIN
00230       #endif
00231       Map<typename _ActualRhsType::PlainObject>(actualRhsPtr, rhs.size()) = rhs;
00232     }
00233       
00234       
00235     internal::product_selfadjoint_vector<Scalar, Index, (internal::traits<_ActualLhsType>::Flags&RowMajorBit) ? RowMajor : ColMajor, int(LhsUpLo), bool(LhsBlasTraits::NeedToConjugate), bool(RhsBlasTraits::NeedToConjugate)>
00236       (
00237         lhs.rows(),                             // size
00238         &lhs.coeffRef(0,0),  lhs.outerStride(), // lhs info
00239         actualRhsPtr, 1,                        // rhs info
00240         actualDestPtr,                          // result info
00241         actualAlpha                             // scale factor
00242       );
00243     
00244     if(!EvalToDest)
00245       dest = MappedDest(actualDestPtr, dest.size());
00246   }
00247 };
00248 
00249 namespace internal {
00250 template<typename Lhs, typename Rhs, int RhsMode>
00251 struct traits<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false> >
00252   : traits<ProductBase<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>, Lhs, Rhs> >
00253 {};
00254 }
00255 
00256 template<typename Lhs, typename Rhs, int RhsMode>
00257 struct SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>
00258   : public ProductBase<SelfadjointProductMatrix<Lhs,0,true,Rhs,RhsMode,false>, Lhs, Rhs >
00259 {
00260   EIGEN_PRODUCT_PUBLIC_INTERFACE(SelfadjointProductMatrix)
00261 
00262   enum {
00263     RhsUpLo = RhsMode&(Upper|Lower)
00264   };
00265 
00266   SelfadjointProductMatrix(const Lhs& lhs, const Rhs& rhs) : Base(lhs,rhs) {}
00267 
00268   template<typename Dest> void scaleAndAddTo(Dest& dest, Scalar alpha) const
00269   {
00270     // let's simply transpose the product
00271     Transpose<Dest> destT(dest);
00272     SelfadjointProductMatrix<Transpose<const Rhs>, int(RhsUpLo)==Upper ? Lower : Upper, false,
00273                              Transpose<const Lhs>, 0, true>(m_rhs.transpose(), m_lhs.transpose()).scaleAndAddTo(destT, alpha);
00274   }
00275 };
00276 
00277 
00278 #endif // EIGEN_SELFADJOINT_MATRIX_VECTOR_H


re_vision
Author(s): Dorian Galvez-Lopez
autogenerated on Sun Jan 5 2014 11:32:26