PacketMath.h
Go to the documentation of this file.
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2008 Konstantinos Margaritis <markos@codex.gr>
00005 //
00006 // Eigen is free software; you can redistribute it and/or
00007 // modify it under the terms of the GNU Lesser General Public
00008 // License as published by the Free Software Foundation; either
00009 // version 3 of the License, or (at your option) any later version.
00010 //
00011 // Alternatively, you can redistribute it and/or
00012 // modify it under the terms of the GNU General Public License as
00013 // published by the Free Software Foundation; either version 2 of
00014 // the License, or (at your option) any later version.
00015 //
00016 // Eigen is distributed in the hope that it will be useful, but WITHOUT ANY
00017 // WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
00018 // FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License or the
00019 // GNU General Public License for more details.
00020 //
00021 // You should have received a copy of the GNU Lesser General Public
00022 // License and a copy of the GNU General Public License along with
00023 // Eigen. If not, see <http://www.gnu.org/licenses/>.
00024 
00025 #ifndef EIGEN_PACKET_MATH_ALTIVEC_H
00026 #define EIGEN_PACKET_MATH_ALTIVEC_H
00027 
00028 namespace internal {
00029 
00030 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
00031 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4
00032 #endif
00033 
00034 #ifndef EIGEN_HAS_FUSE_CJMADD
00035 #define EIGEN_HAS_FUSE_CJMADD 1
00036 #endif
00037 
00038 // NOTE Altivec has 32 registers, but Eigen only accepts a value of 8 or 16
00039 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
00040 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
00041 #endif
00042 
00043 typedef __vector float          Packet4f;
00044 typedef __vector int            Packet4i;
00045 typedef __vector unsigned int   Packet4ui;
00046 typedef __vector __bool int     Packet4bi;
00047 typedef __vector short int      Packet8i;
00048 typedef __vector unsigned char  Packet16uc;
00049 
00050 // We don't want to write the same code all the time, but we need to reuse the constants
00051 // and it doesn't really work to declare them global, so we define macros instead
00052 
00053 #define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \
00054   Packet4f p4f_##NAME = (Packet4f) vec_splat_s32(X)
00055 
00056 #define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \
00057   Packet4i p4i_##NAME = vec_splat_s32(X)
00058 
00059 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
00060   Packet4f p4f_##NAME = pset1<Packet4f>(X)
00061 
00062 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
00063   Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int>(X))
00064 
00065 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
00066   Packet4i p4i_##NAME = pset1<Packet4i>(X)
00067 
00068 #define DST_CHAN 1
00069 #define DST_CTRL(size, count, stride) (((size) << 24) | ((count) << 16) | (stride))
00070 
00071 // Define global static constants:
00072 static Packet4f p4f_COUNTDOWN = { 3.0, 2.0, 1.0, 0.0 };
00073 static Packet4i p4i_COUNTDOWN = { 3, 2, 1, 0 };
00074 static Packet16uc p16uc_REVERSE = {12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3};
00075 static Packet16uc p16uc_FORWARD = vec_lvsl(0, (float*)0);
00076 static Packet16uc p16uc_DUPLICATE = {0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7};
00077 
00078 static _EIGEN_DECLARE_CONST_FAST_Packet4f(ZERO, 0);
00079 static _EIGEN_DECLARE_CONST_FAST_Packet4i(ZERO, 0);
00080 static _EIGEN_DECLARE_CONST_FAST_Packet4i(ONE,1);
00081 static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS16,-16);
00082 static _EIGEN_DECLARE_CONST_FAST_Packet4i(MINUS1,-1);
00083 static Packet4f p4f_ONE = vec_ctf(p4i_ONE, 0);
00084 static Packet4f p4f_ZERO_ = (Packet4f) vec_sl((Packet4ui)p4i_MINUS1, (Packet4ui)p4i_MINUS1);
00085 
00086 template<> struct packet_traits<float>  : default_packet_traits
00087 {
00088   typedef Packet4f type;
00089   enum {
00090     Vectorizable = 1,
00091     AlignedOnScalar = 1,
00092     size=4,
00093 
00094     // FIXME check the Has*
00095     HasSin  = 0,
00096     HasCos  = 0,
00097     HasLog  = 0,
00098     HasExp  = 0,
00099     HasSqrt = 0
00100   };
00101 };
00102 template<> struct packet_traits<int>    : default_packet_traits
00103 {
00104   typedef Packet4i type;
00105   enum {
00106     // FIXME check the Has*
00107     Vectorizable = 1,
00108     AlignedOnScalar = 1,
00109     size=4
00110   };
00111 };
00112 
00113 template<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4}; };
00114 template<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4}; };
00115 /*
00116 inline std::ostream & operator <<(std::ostream & s, const Packet4f & v)
00117 {
00118   union {
00119     Packet4f   v;
00120     float n[4];
00121   } vt;
00122   vt.v = v;
00123   s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
00124   return s;
00125 }
00126 
00127 inline std::ostream & operator <<(std::ostream & s, const Packet4i & v)
00128 {
00129   union {
00130     Packet4i   v;
00131     int n[4];
00132   } vt;
00133   vt.v = v;
00134   s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
00135   return s;
00136 }
00137 
00138 inline std::ostream & operator <<(std::ostream & s, const Packet4ui & v)
00139 {
00140   union {
00141     Packet4ui   v;
00142     unsigned int n[4];
00143   } vt;
00144   vt.v = v;
00145   s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
00146   return s;
00147 }
00148 
00149 inline std::ostream & operator <<(std::ostream & s, const Packetbi & v)
00150 {
00151   union {
00152     Packet4bi v;
00153     unsigned int n[4];
00154   } vt;
00155   vt.v = v;
00156   s << vt.n[0] << ", " << vt.n[1] << ", " << vt.n[2] << ", " << vt.n[3];
00157   return s;
00158 }
00159 */
00160 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) {
00161   // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
00162   float EIGEN_ALIGN16 af[4];
00163   af[0] = from;
00164   Packet4f vc = vec_ld(0, af);
00165   vc = vec_splat(vc, 0);
00166   return vc;
00167 }
00168 
00169 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from)   {
00170   int EIGEN_ALIGN16 ai[4];
00171   ai[0] = from;
00172   Packet4i vc = vec_ld(0, ai);
00173   vc = vec_splat(vc, 0);
00174   return vc;
00175 }
00176 
00177 template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return vec_add(pset1<Packet4f>(a), p4f_COUNTDOWN); }
00178 template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a)     { return vec_add(pset1<Packet4i>(a), p4i_COUNTDOWN); }
00179 
00180 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_add(a,b); }
00181 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_add(a,b); }
00182 
00183 template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_sub(a,b); }
00184 template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_sub(a,b); }
00185 
00186 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a) { return psub<Packet4f>(p4f_ZERO, a); }
00187 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a) { return psub<Packet4i>(p4i_ZERO, a); }
00188 
00189 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_madd(a,b,p4f_ZERO); }
00190 /* Commented out: it's actually slower than processing it scalar
00191  *
00192 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
00193 {
00194   // Detailed in: http://freevec.org/content/32bit_signed_integer_multiplication_altivec
00195   //Set up constants, variables
00196   Packet4i a1, b1, bswap, low_prod, high_prod, prod, prod_, v1sel;
00197 
00198   // Get the absolute values
00199   a1  = vec_abs(a);
00200   b1  = vec_abs(b);
00201 
00202   // Get the signs using xor
00203   Packet4bi sgn = (Packet4bi) vec_cmplt(vec_xor(a, b), p4i_ZERO);
00204 
00205   // Do the multiplication for the asbolute values.
00206   bswap = (Packet4i) vec_rl((Packet4ui) b1, (Packet4ui) p4i_MINUS16 );
00207   low_prod = vec_mulo((Packet8i) a1, (Packet8i)b1);
00208   high_prod = vec_msum((Packet8i) a1, (Packet8i) bswap, p4i_ZERO);
00209   high_prod = (Packet4i) vec_sl((Packet4ui) high_prod, (Packet4ui) p4i_MINUS16);
00210   prod = vec_add( low_prod, high_prod );
00211 
00212   // NOR the product and select only the negative elements according to the sign mask
00213   prod_ = vec_nor(prod, prod);
00214   prod_ = vec_sel(p4i_ZERO, prod_, sgn);
00215 
00216   // Add 1 to the result to get the negative numbers
00217   v1sel = vec_sel(p4i_ZERO, p4i_ONE, sgn);
00218   prod_ = vec_add(prod_, v1sel);
00219 
00220   // Merge the results back to the final vector.
00221   prod = vec_sel(prod, prod_, sgn);
00222 
00223   return prod;
00224 }
00225 */
00226 template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b)
00227 {
00228   Packet4f t, y_0, y_1, res;
00229 
00230   // Altivec does not offer a divide instruction, we have to do a reciprocal approximation
00231   y_0 = vec_re(b);
00232 
00233   // Do one Newton-Raphson iteration to get the needed accuracy
00234   t   = vec_nmsub(y_0, b, p4f_ONE);
00235   y_1 = vec_madd(y_0, t, y_0);
00236 
00237   res = vec_madd(a, y_1, p4f_ZERO);
00238   return res;
00239 }
00240 
00241 template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
00242 { eigen_assert(false && "packet integer division are not supported by AltiVec");
00243   return pset1<Packet4i>(0);
00244 }
00245 
00246 // for some weird raisons, it has to be overloaded for packet of integers
00247 template<> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f& a, const Packet4f& b, const Packet4f& c) { return vec_madd(a, b, c); }
00248 template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
00249 
00250 template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_min(a, b); }
00251 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_min(a, b); }
00252 
00253 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_max(a, b); }
00254 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_max(a, b); }
00255 
00256 // Logical Operations are not supported for float, so we have to reinterpret casts using NEON intrinsics
00257 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, b); }
00258 template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, b); }
00259 
00260 template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_or(a, b); }
00261 template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_or(a, b); }
00262 
00263 template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_xor(a, b); }
00264 template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_xor(a, b); }
00265 
00266 template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return vec_and(a, vec_nor(b, b)); }
00267 template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return vec_and(a, vec_nor(b, b)); }
00268 
00269 template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); }
00270 template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*     from) { EIGEN_DEBUG_ALIGNED_LOAD return vec_ld(0, from); }
00271 
00272 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
00273 {
00274   EIGEN_DEBUG_ALIGNED_LOAD
00275   // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
00276   Packet16uc MSQ, LSQ;
00277   Packet16uc mask;
00278   MSQ = vec_ld(0, (unsigned char *)from);          // most significant quadword
00279   LSQ = vec_ld(15, (unsigned char *)from);         // least significant quadword
00280   mask = vec_lvsl(0, from);                        // create the permute mask
00281   return (Packet4f) vec_perm(MSQ, LSQ, mask);           // align the data
00282 
00283 }
00284 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
00285 {
00286   EIGEN_DEBUG_ALIGNED_LOAD
00287   // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
00288   Packet16uc MSQ, LSQ;
00289   Packet16uc mask;
00290   MSQ = vec_ld(0, (unsigned char *)from);          // most significant quadword
00291   LSQ = vec_ld(15, (unsigned char *)from);         // least significant quadword
00292   mask = vec_lvsl(0, from);                        // create the permute mask
00293   return (Packet4i) vec_perm(MSQ, LSQ, mask);    // align the data
00294 }
00295 
00296 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
00297 {
00298   Packet4f p;
00299   if((ptrdiff_t(&from) % 16) == 0)  p = pload<Packet4f>(from);
00300   else                              p = ploadu<Packet4f>(from);
00301   return vec_perm(p, p, p16uc_DUPLICATE);
00302 }
00303 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
00304 {
00305   Packet4i p;
00306   if((ptrdiff_t(&from) % 16) == 0)  p = pload<Packet4i>(from);
00307   else                              p = ploadu<Packet4i>(from);
00308   return vec_perm(p, p, p16uc_DUPLICATE);
00309 }
00310 
00311 template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); }
00312 template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE vec_st(from, 0, to); }
00313 
00314 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const Packet4f& from)
00315 {
00316   EIGEN_DEBUG_UNALIGNED_STORE
00317   // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
00318   // Warning: not thread safe!
00319   Packet16uc MSQ, LSQ, edges;
00320   Packet16uc edgeAlign, align;
00321 
00322   MSQ = vec_ld(0, (unsigned char *)to);                     // most significant quadword
00323   LSQ = vec_ld(15, (unsigned char *)to);                    // least significant quadword
00324   edgeAlign = vec_lvsl(0, to);                              // permute map to extract edges
00325   edges=vec_perm(LSQ,MSQ,edgeAlign);                        // extract the edges
00326   align = vec_lvsr( 0, to );                                // permute map to misalign data
00327   MSQ = vec_perm(edges,(Packet16uc)from,align);             // misalign the data (MSQ)
00328   LSQ = vec_perm((Packet16uc)from,edges,align);             // misalign the data (LSQ)
00329   vec_st( LSQ, 15, (unsigned char *)to );                   // Store the LSQ part first
00330   vec_st( MSQ, 0, (unsigned char *)to );                    // Store the MSQ part
00331 }
00332 template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*      to, const Packet4i& from)
00333 {
00334   EIGEN_DEBUG_UNALIGNED_STORE
00335   // Taken from http://developer.apple.com/hardwaredrivers/ve/alignment.html
00336   // Warning: not thread safe!
00337   Packet16uc MSQ, LSQ, edges;
00338   Packet16uc edgeAlign, align;
00339 
00340   MSQ = vec_ld(0, (unsigned char *)to);                     // most significant quadword
00341   LSQ = vec_ld(15, (unsigned char *)to);                    // least significant quadword
00342   edgeAlign = vec_lvsl(0, to);                              // permute map to extract edges
00343   edges=vec_perm(LSQ, MSQ, edgeAlign);                      // extract the edges
00344   align = vec_lvsr( 0, to );                                // permute map to misalign data
00345   MSQ = vec_perm(edges, (Packet16uc) from, align);          // misalign the data (MSQ)
00346   LSQ = vec_perm((Packet16uc) from, edges, align);          // misalign the data (LSQ)
00347   vec_st( LSQ, 15, (unsigned char *)to );                   // Store the LSQ part first
00348   vec_st( MSQ, 0, (unsigned char *)to );                    // Store the MSQ part
00349 }
00350 
00351 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); }
00352 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*     addr) { vec_dstt(addr, DST_CTRL(2,2,32), DST_CHAN); }
00353 
00354 template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; }
00355 template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int   EIGEN_ALIGN16 x[4]; vec_st(a, 0, x); return x[0]; }
00356 
00357 template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a) { return (Packet4f)vec_perm((Packet16uc)a,(Packet16uc)a, p16uc_REVERSE); }
00358 template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a) { return (Packet4i)vec_perm((Packet16uc)a,(Packet16uc)a, p16uc_REVERSE); }
00359 
00360 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a) { return vec_abs(a); }
00361 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a) { return vec_abs(a); }
00362 
00363 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
00364 {
00365   Packet4f b, sum;
00366   b   = (Packet4f) vec_sld(a, a, 8);
00367   sum = vec_add(a, b);
00368   b   = (Packet4f) vec_sld(sum, sum, 4);
00369   sum = vec_add(sum, b);
00370   return pfirst(sum);
00371 }
00372 
00373 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
00374 {
00375   Packet4f v[4], sum[4];
00376 
00377   // It's easier and faster to transpose then add as columns
00378   // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
00379   // Do the transpose, first set of moves
00380   v[0] = vec_mergeh(vecs[0], vecs[2]);
00381   v[1] = vec_mergel(vecs[0], vecs[2]);
00382   v[2] = vec_mergeh(vecs[1], vecs[3]);
00383   v[3] = vec_mergel(vecs[1], vecs[3]);
00384   // Get the resulting vectors
00385   sum[0] = vec_mergeh(v[0], v[2]);
00386   sum[1] = vec_mergel(v[0], v[2]);
00387   sum[2] = vec_mergeh(v[1], v[3]);
00388   sum[3] = vec_mergel(v[1], v[3]);
00389 
00390   // Now do the summation:
00391   // Lines 0+1
00392   sum[0] = vec_add(sum[0], sum[1]);
00393   // Lines 2+3
00394   sum[1] = vec_add(sum[2], sum[3]);
00395   // Add the results
00396   sum[0] = vec_add(sum[0], sum[1]);
00397 
00398   return sum[0];
00399 }
00400 
00401 template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
00402 {
00403   Packet4i sum;
00404   sum = vec_sums(a, p4i_ZERO);
00405   sum = vec_sld(sum, p4i_ZERO, 12);
00406   return pfirst(sum);
00407 }
00408 
00409 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
00410 {
00411   Packet4i v[4], sum[4];
00412 
00413   // It's easier and faster to transpose then add as columns
00414   // Check: http://www.freevec.org/function/matrix_4x4_transpose_floats for explanation
00415   // Do the transpose, first set of moves
00416   v[0] = vec_mergeh(vecs[0], vecs[2]);
00417   v[1] = vec_mergel(vecs[0], vecs[2]);
00418   v[2] = vec_mergeh(vecs[1], vecs[3]);
00419   v[3] = vec_mergel(vecs[1], vecs[3]);
00420   // Get the resulting vectors
00421   sum[0] = vec_mergeh(v[0], v[2]);
00422   sum[1] = vec_mergel(v[0], v[2]);
00423   sum[2] = vec_mergeh(v[1], v[3]);
00424   sum[3] = vec_mergel(v[1], v[3]);
00425 
00426   // Now do the summation:
00427   // Lines 0+1
00428   sum[0] = vec_add(sum[0], sum[1]);
00429   // Lines 2+3
00430   sum[1] = vec_add(sum[2], sum[3]);
00431   // Add the results
00432   sum[0] = vec_add(sum[0], sum[1]);
00433 
00434   return sum[0];
00435 }
00436 
00437 // Other reduction functions:
00438 // mul
00439 template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
00440 {
00441   Packet4f prod;
00442   prod = pmul(a, (Packet4f)vec_sld(a, a, 8));
00443   return pfirst(pmul(prod, (Packet4f)vec_sld(prod, prod, 4)));
00444 }
00445 
00446 template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
00447 {
00448   EIGEN_ALIGN16 int aux[4];
00449   pstore(aux, a);
00450   return aux[0] * aux[1] * aux[2] * aux[3];
00451 }
00452 
00453 // min
00454 template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
00455 {
00456   Packet4f b, res;
00457   b = vec_min(a, vec_sld(a, a, 8));
00458   res = vec_min(b, vec_sld(b, b, 4));
00459   return pfirst(res);
00460 }
00461 
00462 template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
00463 {
00464   Packet4i b, res;
00465   b = vec_min(a, vec_sld(a, a, 8));
00466   res = vec_min(b, vec_sld(b, b, 4));
00467   return pfirst(res);
00468 }
00469 
00470 // max
00471 template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
00472 {
00473   Packet4f b, res;
00474   b = vec_max(a, vec_sld(a, a, 8));
00475   res = vec_max(b, vec_sld(b, b, 4));
00476   return pfirst(res);
00477 }
00478 
00479 template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
00480 {
00481   Packet4i b, res;
00482   b = vec_max(a, vec_sld(a, a, 8));
00483   res = vec_max(b, vec_sld(b, b, 4));
00484   return pfirst(res);
00485 }
00486 
00487 template<int Offset>
00488 struct palign_impl<Offset,Packet4f>
00489 {
00490   EIGEN_STRONG_INLINE static void run(Packet4f& first, const Packet4f& second)
00491   {
00492     if (Offset!=0)
00493       first = vec_sld(first, second, Offset*4);
00494   }
00495 };
00496 
00497 template<int Offset>
00498 struct palign_impl<Offset,Packet4i>
00499 {
00500   EIGEN_STRONG_INLINE static void run(Packet4i& first, const Packet4i& second)
00501   {
00502     if (Offset!=0)
00503       first = vec_sld(first, second, Offset*4);
00504   }
00505 };
00506 
00507 } // end namespace internal
00508 
00509 #endif // EIGEN_PACKET_MATH_ALTIVEC_H


libicr
Author(s): Robert Krug
autogenerated on Mon Jan 6 2014 11:33:08