PacketMath.h
Go to the documentation of this file.
00001 // This file is part of Eigen, a lightweight C++ template library
00002 // for linear algebra.
00003 //
00004 // Copyright (C) 2008-2009 Gael Guennebaud <gael.guennebaud@inria.fr>
00005 //
00006 // This Source Code Form is subject to the terms of the Mozilla
00007 // Public License v. 2.0. If a copy of the MPL was not distributed
00008 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
00009 
00010 #ifndef EIGEN_PACKET_MATH_SSE_H
00011 #define EIGEN_PACKET_MATH_SSE_H
00012 
00013 namespace Eigen {
00014 
00015 namespace internal {
00016 
00017 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
00018 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
00019 #endif
00020 
00021 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
00022 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
00023 #endif
00024 
00025 typedef __m128  Packet4f;
00026 typedef __m128i Packet4i;
00027 typedef __m128d Packet2d;
00028 
00029 template<> struct is_arithmetic<__m128>  { enum { value = true }; };
00030 template<> struct is_arithmetic<__m128i> { enum { value = true }; };
00031 template<> struct is_arithmetic<__m128d> { enum { value = true }; };
00032 
00033 #define vec4f_swizzle1(v,p,q,r,s) \
00034   (_mm_castsi128_ps(_mm_shuffle_epi32( _mm_castps_si128(v), ((s)<<6|(r)<<4|(q)<<2|(p)))))
00035 
00036 #define vec4i_swizzle1(v,p,q,r,s) \
00037   (_mm_shuffle_epi32( v, ((s)<<6|(r)<<4|(q)<<2|(p))))
00038 
00039 #define vec2d_swizzle1(v,p,q) \
00040   (_mm_castsi128_pd(_mm_shuffle_epi32( _mm_castpd_si128(v), ((q*2+1)<<6|(q*2)<<4|(p*2+1)<<2|(p*2)))))
00041   
00042 #define vec4f_swizzle2(a,b,p,q,r,s) \
00043   (_mm_shuffle_ps( (a), (b), ((s)<<6|(r)<<4|(q)<<2|(p))))
00044 
00045 #define vec4i_swizzle2(a,b,p,q,r,s) \
00046   (_mm_castps_si128( (_mm_shuffle_ps( _mm_castsi128_ps(a), _mm_castsi128_ps(b), ((s)<<6|(r)<<4|(q)<<2|(p))))))
00047 
00048 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
00049   const Packet4f p4f_##NAME = pset1<Packet4f>(X)
00050 
00051 #define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
00052   const Packet2d p2d_##NAME = pset1<Packet2d>(X)
00053 
00054 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
00055   const Packet4f p4f_##NAME = _mm_castsi128_ps(pset1<Packet4i>(X))
00056 
00057 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
00058   const Packet4i p4i_##NAME = pset1<Packet4i>(X)
00059 
00060 
00061 template<> struct packet_traits<float>  : default_packet_traits
00062 {
00063   typedef Packet4f type;
00064   enum {
00065     Vectorizable = 1,
00066     AlignedOnScalar = 1,
00067     size=4,
00068 
00069     HasDiv  = 1,
00070     HasSin  = EIGEN_FAST_MATH,
00071     HasCos  = EIGEN_FAST_MATH,
00072     HasLog  = 1,
00073     HasExp  = 1,
00074     HasSqrt = 1
00075   };
00076 };
00077 template<> struct packet_traits<double> : default_packet_traits
00078 {
00079   typedef Packet2d type;
00080   enum {
00081     Vectorizable = 1,
00082     AlignedOnScalar = 1,
00083     size=2,
00084 
00085     HasDiv  = 1,
00086     HasExp  = 1,
00087     HasSqrt = 1
00088   };
00089 };
00090 template<> struct packet_traits<int>    : default_packet_traits
00091 {
00092   typedef Packet4i type;
00093   enum {
00094     // FIXME check the Has*
00095     Vectorizable = 1,
00096     AlignedOnScalar = 1,
00097     size=4
00098   };
00099 };
00100 
00101 template<> struct unpacket_traits<Packet4f> { typedef float  type; enum {size=4}; };
00102 template<> struct unpacket_traits<Packet2d> { typedef double type; enum {size=2}; };
00103 template<> struct unpacket_traits<Packet4i> { typedef int    type; enum {size=4}; };
00104 
00105 #if defined(_MSC_VER) && (_MSC_VER==1500)
00106 // Workaround MSVC 9 internal compiler error.
00107 // TODO: It has been detected with win64 builds (amd64), so let's check whether it also happens in 32bits+SSE mode
00108 // TODO: let's check whether there does not exist a better fix, like adding a pset0() function. (it crashed on pset1(0)).
00109 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set_ps(from,from,from,from); }
00110 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set_pd(from,from); }
00111 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set_epi32(from,from,from,from); }
00112 #else
00113 template<> EIGEN_STRONG_INLINE Packet4f pset1<Packet4f>(const float&  from) { return _mm_set1_ps(from); }
00114 template<> EIGEN_STRONG_INLINE Packet2d pset1<Packet2d>(const double& from) { return _mm_set1_pd(from); }
00115 template<> EIGEN_STRONG_INLINE Packet4i pset1<Packet4i>(const int&    from) { return _mm_set1_epi32(from); }
00116 #endif
00117 
00118 template<> EIGEN_STRONG_INLINE Packet4f plset<float>(const float& a) { return _mm_add_ps(pset1<Packet4f>(a), _mm_set_ps(3,2,1,0)); }
00119 template<> EIGEN_STRONG_INLINE Packet2d plset<double>(const double& a) { return _mm_add_pd(pset1<Packet2d>(a),_mm_set_pd(1,0)); }
00120 template<> EIGEN_STRONG_INLINE Packet4i plset<int>(const int& a) { return _mm_add_epi32(pset1<Packet4i>(a),_mm_set_epi32(3,2,1,0)); }
00121 
00122 template<> EIGEN_STRONG_INLINE Packet4f padd<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_add_ps(a,b); }
00123 template<> EIGEN_STRONG_INLINE Packet2d padd<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_add_pd(a,b); }
00124 template<> EIGEN_STRONG_INLINE Packet4i padd<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_add_epi32(a,b); }
00125 
00126 template<> EIGEN_STRONG_INLINE Packet4f psub<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_sub_ps(a,b); }
00127 template<> EIGEN_STRONG_INLINE Packet2d psub<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_sub_pd(a,b); }
00128 template<> EIGEN_STRONG_INLINE Packet4i psub<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_sub_epi32(a,b); }
00129 
00130 template<> EIGEN_STRONG_INLINE Packet4f pnegate(const Packet4f& a)
00131 {
00132   const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x80000000,0x80000000,0x80000000,0x80000000));
00133   return _mm_xor_ps(a,mask);
00134 }
00135 template<> EIGEN_STRONG_INLINE Packet2d pnegate(const Packet2d& a)
00136 {
00137   const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0x0,0x80000000,0x0,0x80000000));
00138   return _mm_xor_pd(a,mask);
00139 }
00140 template<> EIGEN_STRONG_INLINE Packet4i pnegate(const Packet4i& a)
00141 {
00142   return psub(_mm_setr_epi32(0,0,0,0), a);
00143 }
00144 
00145 template<> EIGEN_STRONG_INLINE Packet4f pconj(const Packet4f& a) { return a; }
00146 template<> EIGEN_STRONG_INLINE Packet2d pconj(const Packet2d& a) { return a; }
00147 template<> EIGEN_STRONG_INLINE Packet4i pconj(const Packet4i& a) { return a; }
00148 
00149 template<> EIGEN_STRONG_INLINE Packet4f pmul<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_mul_ps(a,b); }
00150 template<> EIGEN_STRONG_INLINE Packet2d pmul<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_mul_pd(a,b); }
00151 template<> EIGEN_STRONG_INLINE Packet4i pmul<Packet4i>(const Packet4i& a, const Packet4i& b)
00152 {
00153 #ifdef EIGEN_VECTORIZE_SSE4_1
00154   return _mm_mullo_epi32(a,b);
00155 #else
00156   // this version is slightly faster than 4 scalar products
00157   return vec4i_swizzle1(
00158             vec4i_swizzle2(
00159               _mm_mul_epu32(a,b),
00160               _mm_mul_epu32(vec4i_swizzle1(a,1,0,3,2),
00161                             vec4i_swizzle1(b,1,0,3,2)),
00162               0,2,0,2),
00163             0,2,1,3);
00164 #endif
00165 }
00166 
00167 template<> EIGEN_STRONG_INLINE Packet4f pdiv<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_div_ps(a,b); }
00168 template<> EIGEN_STRONG_INLINE Packet2d pdiv<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_div_pd(a,b); }
00169 template<> EIGEN_STRONG_INLINE Packet4i pdiv<Packet4i>(const Packet4i& /*a*/, const Packet4i& /*b*/)
00170 { eigen_assert(false && "packet integer division are not supported by SSE");
00171   return pset1<Packet4i>(0);
00172 }
00173 
00174 // for some weird raisons, it has to be overloaded for packet of integers
00175 template<> EIGEN_STRONG_INLINE Packet4i pmadd(const Packet4i& a, const Packet4i& b, const Packet4i& c) { return padd(pmul(a,b), c); }
00176 
00177 template<> EIGEN_STRONG_INLINE Packet4f pmin<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_min_ps(a,b); }
00178 template<> EIGEN_STRONG_INLINE Packet2d pmin<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_min_pd(a,b); }
00179 template<> EIGEN_STRONG_INLINE Packet4i pmin<Packet4i>(const Packet4i& a, const Packet4i& b)
00180 {
00181 #ifdef EIGEN_VECTORIZE_SSE4_1
00182   return _mm_min_epi32(a,b);
00183 #else
00184   // after some bench, this version *is* faster than a scalar implementation
00185   Packet4i mask = _mm_cmplt_epi32(a,b);
00186   return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
00187 #endif
00188 }
00189 
00190 template<> EIGEN_STRONG_INLINE Packet4f pmax<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_max_ps(a,b); }
00191 template<> EIGEN_STRONG_INLINE Packet2d pmax<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_max_pd(a,b); }
00192 template<> EIGEN_STRONG_INLINE Packet4i pmax<Packet4i>(const Packet4i& a, const Packet4i& b)
00193 {
00194 #ifdef EIGEN_VECTORIZE_SSE4_1
00195   return _mm_max_epi32(a,b);
00196 #else
00197   // after some bench, this version *is* faster than a scalar implementation
00198   Packet4i mask = _mm_cmpgt_epi32(a,b);
00199   return _mm_or_si128(_mm_and_si128(mask,a),_mm_andnot_si128(mask,b));
00200 #endif
00201 }
00202 
00203 template<> EIGEN_STRONG_INLINE Packet4f pand<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_and_ps(a,b); }
00204 template<> EIGEN_STRONG_INLINE Packet2d pand<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_and_pd(a,b); }
00205 template<> EIGEN_STRONG_INLINE Packet4i pand<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_and_si128(a,b); }
00206 
00207 template<> EIGEN_STRONG_INLINE Packet4f por<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_or_ps(a,b); }
00208 template<> EIGEN_STRONG_INLINE Packet2d por<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_or_pd(a,b); }
00209 template<> EIGEN_STRONG_INLINE Packet4i por<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_or_si128(a,b); }
00210 
00211 template<> EIGEN_STRONG_INLINE Packet4f pxor<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_xor_ps(a,b); }
00212 template<> EIGEN_STRONG_INLINE Packet2d pxor<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_xor_pd(a,b); }
00213 template<> EIGEN_STRONG_INLINE Packet4i pxor<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_xor_si128(a,b); }
00214 
00215 template<> EIGEN_STRONG_INLINE Packet4f pandnot<Packet4f>(const Packet4f& a, const Packet4f& b) { return _mm_andnot_ps(a,b); }
00216 template<> EIGEN_STRONG_INLINE Packet2d pandnot<Packet2d>(const Packet2d& a, const Packet2d& b) { return _mm_andnot_pd(a,b); }
00217 template<> EIGEN_STRONG_INLINE Packet4i pandnot<Packet4i>(const Packet4i& a, const Packet4i& b) { return _mm_andnot_si128(a,b); }
00218 
00219 template<> EIGEN_STRONG_INLINE Packet4f pload<Packet4f>(const float*   from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_ps(from); }
00220 template<> EIGEN_STRONG_INLINE Packet2d pload<Packet2d>(const double*  from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_pd(from); }
00221 template<> EIGEN_STRONG_INLINE Packet4i pload<Packet4i>(const int*     from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm_load_si128(reinterpret_cast<const Packet4i*>(from)); }
00222 
00223 #if defined(_MSC_VER)
00224   template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float*  from) {
00225     EIGEN_DEBUG_UNALIGNED_LOAD
00226     #if (_MSC_VER==1600)
00227     // NOTE Some version of MSVC10 generates bad code when using _mm_loadu_ps
00228     // (i.e., it does not generate an unaligned load!!
00229     // TODO On most architectures this version should also be faster than a single _mm_loadu_ps
00230     // so we could also enable it for MSVC08 but first we have to make this later does not generate crap when doing so...
00231     __m128 res = _mm_loadl_pi(_mm_set1_ps(0.0f), (const __m64*)(from));
00232     res = _mm_loadh_pi(res, (const __m64*)(from+2));
00233     return res;
00234     #else
00235     return _mm_loadu_ps(from);
00236     #endif
00237   }
00238   template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_pd(from); }
00239   template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int*    from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from)); }
00240 #else
00241 // Fast unaligned loads. Note that here we cannot directly use intrinsics: this would
00242 // require pointer casting to incompatible pointer types and leads to invalid code
00243 // because of the strict aliasing rule. The "dummy" stuff are required to enforce
00244 // a correct instruction dependency.
00245 // TODO: do the same for MSVC (ICC is compatible)
00246 // NOTE: with the code below, MSVC's compiler crashes!
00247 
00248 #if defined(__GNUC__) && defined(__i386__)
00249   // bug 195: gcc/i386 emits weird x87 fldl/fstpl instructions for _mm_load_sd
00250   #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
00251 #elif defined(__clang__)
00252   // bug 201: Segfaults in __mm_loadh_pd with clang 2.8
00253   #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 1
00254 #else
00255   #define EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS 0
00256 #endif
00257 
00258 template<> EIGEN_STRONG_INLINE Packet4f ploadu<Packet4f>(const float* from)
00259 {
00260   EIGEN_DEBUG_UNALIGNED_LOAD
00261 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
00262   return _mm_loadu_ps(from);
00263 #else
00264   __m128d res;
00265   res =  _mm_load_sd((const double*)(from)) ;
00266   res =  _mm_loadh_pd(res, (const double*)(from+2)) ;
00267   return _mm_castpd_ps(res);
00268 #endif
00269 }
00270 template<> EIGEN_STRONG_INLINE Packet2d ploadu<Packet2d>(const double* from)
00271 {
00272   EIGEN_DEBUG_UNALIGNED_LOAD
00273 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
00274   return _mm_loadu_pd(from);
00275 #else
00276   __m128d res;
00277   res = _mm_load_sd(from) ;
00278   res = _mm_loadh_pd(res,from+1);
00279   return res;
00280 #endif
00281 }
00282 template<> EIGEN_STRONG_INLINE Packet4i ploadu<Packet4i>(const int* from)
00283 {
00284   EIGEN_DEBUG_UNALIGNED_LOAD
00285 #if EIGEN_AVOID_CUSTOM_UNALIGNED_LOADS
00286   return _mm_loadu_si128(reinterpret_cast<const Packet4i*>(from));
00287 #else
00288   __m128d res;
00289   res =  _mm_load_sd((const double*)(from)) ;
00290   res =  _mm_loadh_pd(res, (const double*)(from+2)) ;
00291   return _mm_castpd_si128(res);
00292 #endif
00293 }
00294 #endif
00295 
00296 template<> EIGEN_STRONG_INLINE Packet4f ploaddup<Packet4f>(const float*   from)
00297 {
00298   return vec4f_swizzle1(_mm_castpd_ps(_mm_load_sd(reinterpret_cast<const double*>(from))), 0, 0, 1, 1);
00299 }
00300 template<> EIGEN_STRONG_INLINE Packet2d ploaddup<Packet2d>(const double*  from)
00301 { return pset1<Packet2d>(from[0]); }
00302 template<> EIGEN_STRONG_INLINE Packet4i ploaddup<Packet4i>(const int*     from)
00303 {
00304   Packet4i tmp;
00305   tmp = _mm_loadl_epi64(reinterpret_cast<const Packet4i*>(from));
00306   return vec4i_swizzle1(tmp, 0, 0, 1, 1);
00307 }
00308 
00309 template<> EIGEN_STRONG_INLINE void pstore<float>(float*   to, const Packet4f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_ps(to, from); }
00310 template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet2d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_pd(to, from); }
00311 template<> EIGEN_STRONG_INLINE void pstore<int>(int*       to, const Packet4i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm_store_si128(reinterpret_cast<Packet4i*>(to), from); }
00312 
00313 template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet2d& from) {
00314   EIGEN_DEBUG_UNALIGNED_STORE
00315   _mm_storel_pd((to), from);
00316   _mm_storeh_pd((to+1), from);
00317 }
00318 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float*  to, const Packet4f& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castps_pd(from)); }
00319 template<> EIGEN_STRONG_INLINE void pstoreu<int>(int*      to, const Packet4i& from) { EIGEN_DEBUG_UNALIGNED_STORE pstoreu(reinterpret_cast<double*>(to), _mm_castsi128_pd(from)); }
00320 
00321 // some compilers might be tempted to perform multiple moves instead of using a vector path.
00322 template<> EIGEN_STRONG_INLINE void pstore1<Packet4f>(float* to, const float& a)
00323 {
00324   Packet4f pa = _mm_set_ss(a);
00325   pstore(to, vec4f_swizzle1(pa,0,0,0,0));
00326 }
00327 // some compilers might be tempted to perform multiple moves instead of using a vector path.
00328 template<> EIGEN_STRONG_INLINE void pstore1<Packet2d>(double* to, const double& a)
00329 {
00330   Packet2d pa = _mm_set_sd(a);
00331   pstore(to, vec2d_swizzle1(pa,0,0));
00332 }
00333 
00334 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float*   addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
00335 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
00336 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int*       addr) { _mm_prefetch((const char*)(addr), _MM_HINT_T0); }
00337 
00338 #if defined(_MSC_VER) && defined(_WIN64) && !defined(__INTEL_COMPILER)
00339 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
00340 // Direct of the struct members fixed bug #62.
00341 template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return a.m128_f32[0]; }
00342 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return a.m128d_f64[0]; }
00343 template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
00344 #elif defined(_MSC_VER) && !defined(__INTEL_COMPILER)
00345 // The temporary variable fixes an internal compilation error in vs <= 2008 and a wrong-result bug in vs 2010
00346 template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { float x = _mm_cvtss_f32(a); return x; }
00347 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { double x = _mm_cvtsd_f64(a); return x; }
00348 template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { int x = _mm_cvtsi128_si32(a); return x; }
00349 #else
00350 template<> EIGEN_STRONG_INLINE float  pfirst<Packet4f>(const Packet4f& a) { return _mm_cvtss_f32(a); }
00351 template<> EIGEN_STRONG_INLINE double pfirst<Packet2d>(const Packet2d& a) { return _mm_cvtsd_f64(a); }
00352 template<> EIGEN_STRONG_INLINE int    pfirst<Packet4i>(const Packet4i& a) { return _mm_cvtsi128_si32(a); }
00353 #endif
00354 
00355 template<> EIGEN_STRONG_INLINE Packet4f preverse(const Packet4f& a)
00356 { return _mm_shuffle_ps(a,a,0x1B); }
00357 template<> EIGEN_STRONG_INLINE Packet2d preverse(const Packet2d& a)
00358 { return _mm_shuffle_pd(a,a,0x1); }
00359 template<> EIGEN_STRONG_INLINE Packet4i preverse(const Packet4i& a)
00360 { return _mm_shuffle_epi32(a,0x1B); }
00361 
00362 
00363 template<> EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f& a)
00364 {
00365   const Packet4f mask = _mm_castsi128_ps(_mm_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
00366   return _mm_and_ps(a,mask);
00367 }
00368 template<> EIGEN_STRONG_INLINE Packet2d pabs(const Packet2d& a)
00369 {
00370   const Packet2d mask = _mm_castsi128_pd(_mm_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
00371   return _mm_and_pd(a,mask);
00372 }
00373 template<> EIGEN_STRONG_INLINE Packet4i pabs(const Packet4i& a)
00374 {
00375   #ifdef EIGEN_VECTORIZE_SSSE3
00376   return _mm_abs_epi32(a);
00377   #else
00378   Packet4i aux = _mm_srai_epi32(a,31);
00379   return _mm_sub_epi32(_mm_xor_si128(a,aux),aux);
00380   #endif
00381 }
00382 
00383 EIGEN_STRONG_INLINE void punpackp(Packet4f* vecs)
00384 {
00385   vecs[1] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x55));
00386   vecs[2] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xAA));
00387   vecs[3] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0xFF));
00388   vecs[0] = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(vecs[0]), 0x00));
00389 }
00390 
00391 #ifdef EIGEN_VECTORIZE_SSE3
00392 // TODO implement SSE2 versions as well as integer versions
00393 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
00394 {
00395   return _mm_hadd_ps(_mm_hadd_ps(vecs[0], vecs[1]),_mm_hadd_ps(vecs[2], vecs[3]));
00396 }
00397 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
00398 {
00399   return _mm_hadd_pd(vecs[0], vecs[1]);
00400 }
00401 // SSSE3 version:
00402 // EIGEN_STRONG_INLINE Packet4i preduxp(const Packet4i* vecs)
00403 // {
00404 //   return _mm_hadd_epi32(_mm_hadd_epi32(vecs[0], vecs[1]),_mm_hadd_epi32(vecs[2], vecs[3]));
00405 // }
00406 
00407 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
00408 {
00409   Packet4f tmp0 = _mm_hadd_ps(a,a);
00410   return pfirst(_mm_hadd_ps(tmp0, tmp0));
00411 }
00412 
00413 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a) { return pfirst(_mm_hadd_pd(a, a)); }
00414 
00415 // SSSE3 version:
00416 // EIGEN_STRONG_INLINE float predux(const Packet4i& a)
00417 // {
00418 //   Packet4i tmp0 = _mm_hadd_epi32(a,a);
00419 //   return pfirst(_mm_hadd_epi32(tmp0, tmp0));
00420 // }
00421 #else
00422 // SSE2 versions
00423 template<> EIGEN_STRONG_INLINE float predux<Packet4f>(const Packet4f& a)
00424 {
00425   Packet4f tmp = _mm_add_ps(a, _mm_movehl_ps(a,a));
00426   return pfirst(_mm_add_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
00427 }
00428 template<> EIGEN_STRONG_INLINE double predux<Packet2d>(const Packet2d& a)
00429 {
00430   return pfirst(_mm_add_sd(a, _mm_unpackhi_pd(a,a)));
00431 }
00432 
00433 template<> EIGEN_STRONG_INLINE Packet4f preduxp<Packet4f>(const Packet4f* vecs)
00434 {
00435   Packet4f tmp0, tmp1, tmp2;
00436   tmp0 = _mm_unpacklo_ps(vecs[0], vecs[1]);
00437   tmp1 = _mm_unpackhi_ps(vecs[0], vecs[1]);
00438   tmp2 = _mm_unpackhi_ps(vecs[2], vecs[3]);
00439   tmp0 = _mm_add_ps(tmp0, tmp1);
00440   tmp1 = _mm_unpacklo_ps(vecs[2], vecs[3]);
00441   tmp1 = _mm_add_ps(tmp1, tmp2);
00442   tmp2 = _mm_movehl_ps(tmp1, tmp0);
00443   tmp0 = _mm_movelh_ps(tmp0, tmp1);
00444   return _mm_add_ps(tmp0, tmp2);
00445 }
00446 
00447 template<> EIGEN_STRONG_INLINE Packet2d preduxp<Packet2d>(const Packet2d* vecs)
00448 {
00449   return _mm_add_pd(_mm_unpacklo_pd(vecs[0], vecs[1]), _mm_unpackhi_pd(vecs[0], vecs[1]));
00450 }
00451 #endif  // SSE3
00452 
00453 template<> EIGEN_STRONG_INLINE int predux<Packet4i>(const Packet4i& a)
00454 {
00455   Packet4i tmp = _mm_add_epi32(a, _mm_unpackhi_epi64(a,a));
00456   return pfirst(tmp) + pfirst(_mm_shuffle_epi32(tmp, 1));
00457 }
00458 
00459 template<> EIGEN_STRONG_INLINE Packet4i preduxp<Packet4i>(const Packet4i* vecs)
00460 {
00461   Packet4i tmp0, tmp1, tmp2;
00462   tmp0 = _mm_unpacklo_epi32(vecs[0], vecs[1]);
00463   tmp1 = _mm_unpackhi_epi32(vecs[0], vecs[1]);
00464   tmp2 = _mm_unpackhi_epi32(vecs[2], vecs[3]);
00465   tmp0 = _mm_add_epi32(tmp0, tmp1);
00466   tmp1 = _mm_unpacklo_epi32(vecs[2], vecs[3]);
00467   tmp1 = _mm_add_epi32(tmp1, tmp2);
00468   tmp2 = _mm_unpacklo_epi64(tmp0, tmp1);
00469   tmp0 = _mm_unpackhi_epi64(tmp0, tmp1);
00470   return _mm_add_epi32(tmp0, tmp2);
00471 }
00472 
00473 // Other reduction functions:
00474 
00475 // mul
00476 template<> EIGEN_STRONG_INLINE float predux_mul<Packet4f>(const Packet4f& a)
00477 {
00478   Packet4f tmp = _mm_mul_ps(a, _mm_movehl_ps(a,a));
00479   return pfirst(_mm_mul_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
00480 }
00481 template<> EIGEN_STRONG_INLINE double predux_mul<Packet2d>(const Packet2d& a)
00482 {
00483   return pfirst(_mm_mul_sd(a, _mm_unpackhi_pd(a,a)));
00484 }
00485 template<> EIGEN_STRONG_INLINE int predux_mul<Packet4i>(const Packet4i& a)
00486 {
00487   // after some experiments, it is seems this is the fastest way to implement it
00488   // for GCC (eg., reusing pmul is very slow !)
00489   // TODO try to call _mm_mul_epu32 directly
00490   EIGEN_ALIGN16 int aux[4];
00491   pstore(aux, a);
00492   return  (aux[0] * aux[1]) * (aux[2] * aux[3]);;
00493 }
00494 
00495 // min
00496 template<> EIGEN_STRONG_INLINE float predux_min<Packet4f>(const Packet4f& a)
00497 {
00498   Packet4f tmp = _mm_min_ps(a, _mm_movehl_ps(a,a));
00499   return pfirst(_mm_min_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
00500 }
00501 template<> EIGEN_STRONG_INLINE double predux_min<Packet2d>(const Packet2d& a)
00502 {
00503   return pfirst(_mm_min_sd(a, _mm_unpackhi_pd(a,a)));
00504 }
00505 template<> EIGEN_STRONG_INLINE int predux_min<Packet4i>(const Packet4i& a)
00506 {
00507   // after some experiments, it is seems this is the fastest way to implement it
00508   // for GCC (eg., it does not like using std::min after the pstore !!)
00509   EIGEN_ALIGN16 int aux[4];
00510   pstore(aux, a);
00511   int aux0 = aux[0]<aux[1] ? aux[0] : aux[1];
00512   int aux2 = aux[2]<aux[3] ? aux[2] : aux[3];
00513   return aux0<aux2 ? aux0 : aux2;
00514 }
00515 
00516 // max
00517 template<> EIGEN_STRONG_INLINE float predux_max<Packet4f>(const Packet4f& a)
00518 {
00519   Packet4f tmp = _mm_max_ps(a, _mm_movehl_ps(a,a));
00520   return pfirst(_mm_max_ss(tmp, _mm_shuffle_ps(tmp,tmp, 1)));
00521 }
00522 template<> EIGEN_STRONG_INLINE double predux_max<Packet2d>(const Packet2d& a)
00523 {
00524   return pfirst(_mm_max_sd(a, _mm_unpackhi_pd(a,a)));
00525 }
00526 template<> EIGEN_STRONG_INLINE int predux_max<Packet4i>(const Packet4i& a)
00527 {
00528   // after some experiments, it is seems this is the fastest way to implement it
00529   // for GCC (eg., it does not like using std::min after the pstore !!)
00530   EIGEN_ALIGN16 int aux[4];
00531   pstore(aux, a);
00532   int aux0 = aux[0]>aux[1] ? aux[0] : aux[1];
00533   int aux2 = aux[2]>aux[3] ? aux[2] : aux[3];
00534   return aux0>aux2 ? aux0 : aux2;
00535 }
00536 
00537 #if (defined __GNUC__)
00538 // template <> EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f&  a, const Packet4f&  b, const Packet4f&  c)
00539 // {
00540 //   Packet4f res = b;
00541 //   asm("mulps %[a], %[b] \n\taddps %[c], %[b]" : [b] "+x" (res) : [a] "x" (a), [c] "x" (c));
00542 //   return res;
00543 // }
00544 // EIGEN_STRONG_INLINE Packet4i _mm_alignr_epi8(const Packet4i&  a, const Packet4i&  b, const int i)
00545 // {
00546 //   Packet4i res = a;
00547 //   asm("palignr %[i], %[a], %[b] " : [b] "+x" (res) : [a] "x" (a), [i] "i" (i));
00548 //   return res;
00549 // }
00550 #endif
00551 
00552 #ifdef EIGEN_VECTORIZE_SSSE3
00553 // SSSE3 versions
00554 template<int Offset>
00555 struct palign_impl<Offset,Packet4f>
00556 {
00557   static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
00558   {
00559     if (Offset!=0)
00560       first = _mm_castsi128_ps(_mm_alignr_epi8(_mm_castps_si128(second), _mm_castps_si128(first), Offset*4));
00561   }
00562 };
00563 
00564 template<int Offset>
00565 struct palign_impl<Offset,Packet4i>
00566 {
00567   static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
00568   {
00569     if (Offset!=0)
00570       first = _mm_alignr_epi8(second,first, Offset*4);
00571   }
00572 };
00573 
00574 template<int Offset>
00575 struct palign_impl<Offset,Packet2d>
00576 {
00577   static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
00578   {
00579     if (Offset==1)
00580       first = _mm_castsi128_pd(_mm_alignr_epi8(_mm_castpd_si128(second), _mm_castpd_si128(first), 8));
00581   }
00582 };
00583 #else
00584 // SSE2 versions
00585 template<int Offset>
00586 struct palign_impl<Offset,Packet4f>
00587 {
00588   static EIGEN_STRONG_INLINE void run(Packet4f& first, const Packet4f& second)
00589   {
00590     if (Offset==1)
00591     {
00592       first = _mm_move_ss(first,second);
00593       first = _mm_castsi128_ps(_mm_shuffle_epi32(_mm_castps_si128(first),0x39));
00594     }
00595     else if (Offset==2)
00596     {
00597       first = _mm_movehl_ps(first,first);
00598       first = _mm_movelh_ps(first,second);
00599     }
00600     else if (Offset==3)
00601     {
00602       first = _mm_move_ss(first,second);
00603       first = _mm_shuffle_ps(first,second,0x93);
00604     }
00605   }
00606 };
00607 
00608 template<int Offset>
00609 struct palign_impl<Offset,Packet4i>
00610 {
00611   static EIGEN_STRONG_INLINE void run(Packet4i& first, const Packet4i& second)
00612   {
00613     if (Offset==1)
00614     {
00615       first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
00616       first = _mm_shuffle_epi32(first,0x39);
00617     }
00618     else if (Offset==2)
00619     {
00620       first = _mm_castps_si128(_mm_movehl_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(first)));
00621       first = _mm_castps_si128(_mm_movelh_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
00622     }
00623     else if (Offset==3)
00624     {
00625       first = _mm_castps_si128(_mm_move_ss(_mm_castsi128_ps(first),_mm_castsi128_ps(second)));
00626       first = _mm_castps_si128(_mm_shuffle_ps(_mm_castsi128_ps(first),_mm_castsi128_ps(second),0x93));
00627     }
00628   }
00629 };
00630 
00631 template<int Offset>
00632 struct palign_impl<Offset,Packet2d>
00633 {
00634   static EIGEN_STRONG_INLINE void run(Packet2d& first, const Packet2d& second)
00635   {
00636     if (Offset==1)
00637     {
00638       first = _mm_castps_pd(_mm_movehl_ps(_mm_castpd_ps(first),_mm_castpd_ps(first)));
00639       first = _mm_castps_pd(_mm_movelh_ps(_mm_castpd_ps(first),_mm_castpd_ps(second)));
00640     }
00641   }
00642 };
00643 #endif
00644 
00645 } // end namespace internal
00646 
00647 } // end namespace Eigen
00648 
00649 #endif // EIGEN_PACKET_MATH_SSE_H


turtlebot_exploration_3d
Author(s): Bona , Shawn
autogenerated on Thu Jun 6 2019 20:59:07