10 #ifndef EIGEN_PACKET_MATH_AVX_H    11 #define EIGEN_PACKET_MATH_AVX_H    17 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD    18 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8    21 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS    22 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))    26 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD    27 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD    39 #define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \    40   const Packet8f p8f_##NAME = pset1<Packet8f>(X)    42 #define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \    43   const Packet4d p4d_##NAME = pset1<Packet4d>(X)    45 #define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \    46   const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1<Packet8i>(X))    48 #define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \    49   const Packet8i p8i_##NAME = pset1<Packet8i>(X)    53 #ifndef EIGEN_VECTORIZE_AVX512    54 template<> 
struct packet_traits<float>  : default_packet_traits
   138   return _mm256_sub_ps(_mm256_set1_ps(0.0),a);
   142   return _mm256_sub_pd(_mm256_set1_pd(0.0),a);
   156 { 
eigen_assert(
false && 
"packet integer division are not supported by AVX");
   162 #if ( EIGEN_COMP_GNUC_STRICT || (EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<308)) )   168   __asm__(
"vfmadd231ps %[a], %[b], %[c]" : [c] 
"+x" (res) : [a] 
"x" (a), [b] 
"x" (b));
   171   return _mm256_fmadd_ps(a,b,c);
   175 #if ( EIGEN_COMP_GNUC_STRICT || (EIGEN_COMP_CLANG && (EIGEN_COMP_CLANG<308)) )   178   __asm__(
"vfmadd231pd %[a], %[b], %[c]" : [c] 
"+x" (res) : [a] 
"x" (a), [b] 
"x" (b));
   181   return _mm256_fmadd_pd(a,b,c);
   230   Packet8f tmp = _mm256_broadcast_ps((
const __m128*)(
const void*)from);
   232   tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
   234   return  _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
   239   Packet4d tmp = _mm256_broadcast_pd((
const __m128d*)(
const void*)from);
   240   return  _mm256_permute_pd(tmp, 3<<2);
   246   Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
   247   return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
   262   return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
   263                        from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
   267   return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
   272   __m128 low = _mm256_extractf128_ps(from, 0);
   273   to[stride*0] = _mm_cvtss_f32(low);
   274   to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
   275   to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
   276   to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
   278   __m128 high = _mm256_extractf128_ps(from, 1);
   279   to[stride*4] = _mm_cvtss_f32(high);
   280   to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
   281   to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
   282   to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
   286   __m128d low = _mm256_extractf128_pd(from, 0);
   287   to[stride*0] = _mm_cvtsd_f64(low);
   288   to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
   289   __m128d high = _mm256_extractf128_pd(from, 1);
   290   to[stride*2] = _mm_cvtsd_f64(high);
   291   to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
   310 #ifndef EIGEN_VECTORIZE_AVX512   317   return _mm_cvtss_f32(_mm256_castps256_ps128(a));
   320   return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
   323   return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
   329   __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
   330   return _mm256_permute2f128_ps(tmp, tmp, 1);
   334    __m256d tmp = _mm256_shuffle_pd(a,a,5);
   335   return _mm256_permute2f128_pd(tmp, tmp, 1);
   337   __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
   338     return _mm256_permute_pd(swap_halves,5);
   344   const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
   345   return _mm256_and_ps(a,mask);
   349   const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
   350   return _mm256_and_pd(a,mask);
   357     __m256 hsum1 = _mm256_hadd_ps(vecs[0], vecs[1]);
   358     __m256 hsum2 = _mm256_hadd_ps(vecs[2], vecs[3]);
   359     __m256 hsum3 = _mm256_hadd_ps(vecs[4], vecs[5]);
   360     __m256 hsum4 = _mm256_hadd_ps(vecs[6], vecs[7]);
   362     __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
   363     __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
   364     __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
   365     __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
   367     __m256 perm1 =  _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
   368     __m256 perm2 =  _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
   369     __m256 perm3 =  _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
   370     __m256 perm4 =  _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
   372     __m256 sum1 = _mm256_add_ps(perm1, hsum5);
   373     __m256 sum2 = _mm256_add_ps(perm2, hsum6);
   374     __m256 sum3 = _mm256_add_ps(perm3, hsum7);
   375     __m256 sum4 = _mm256_add_ps(perm4, hsum8);
   377     __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
   378     __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
   380     __m256 
final = _mm256_blend_ps(blend1, blend2, 0xf0);
   387   tmp0 = _mm256_hadd_pd(vecs[0], vecs[1]);
   388   tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
   390   tmp1 = _mm256_hadd_pd(vecs[2], vecs[3]);
   391   tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
   393   return _mm256_blend_pd(tmp0, tmp1, 0xC);
   398   Packet8f tmp0 = _mm256_hadd_ps(a,_mm256_permute2f128_ps(a,a,1));
   399   tmp0 = _mm256_hadd_ps(tmp0,tmp0);
   400   return pfirst(_mm256_hadd_ps(tmp0, tmp0));
   404   Packet4d tmp0 = _mm256_hadd_pd(a,_mm256_permute2f128_pd(a,a,1));
   405   return pfirst(_mm256_hadd_pd(tmp0,tmp0));
   410   return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
   416   tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
   417   tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
   418   return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
   423   tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
   424   return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
   429   Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
   430   tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
   431   return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
   435   Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
   436   return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
   441   Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
   442   tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
   443   return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
   448   Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
   449   return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
   460       first = _mm256_blend_ps(first, second, 1);
   461       Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
   462       Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
   463       first = _mm256_blend_ps(tmp1, tmp2, 0x88);
   467       first = _mm256_blend_ps(first, second, 3);
   468       Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
   469       Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
   470       first = _mm256_blend_ps(tmp1, tmp2, 0xcc);
   474       first = _mm256_blend_ps(first, second, 7);
   475       Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
   476       Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
   477       first = _mm256_blend_ps(tmp1, tmp2, 0xee);
   481       first = _mm256_blend_ps(first, second, 15);
   482       Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(3,2,1,0));
   483       Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
   484       first = _mm256_permute_ps(tmp2, _MM_SHUFFLE(3,2,1,0));
   488       first = _mm256_blend_ps(first, second, 31);
   489       first = _mm256_permute2f128_ps(first, first, 1);
   490       Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
   491       first = _mm256_permute2f128_ps(tmp, tmp, 1);
   492       first = _mm256_blend_ps(tmp, first, 0x88);
   496       first = _mm256_blend_ps(first, second, 63);
   497       first = _mm256_permute2f128_ps(first, first, 1);
   498       Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
   499       first = _mm256_permute2f128_ps(tmp, tmp, 1);
   500       first = _mm256_blend_ps(tmp, first, 0xcc);
   504       first = _mm256_blend_ps(first, second, 127);
   505       first = _mm256_permute2f128_ps(first, first, 1);
   506       Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
   507       first = _mm256_permute2f128_ps(tmp, tmp, 1);
   508       first = _mm256_blend_ps(tmp, first, 0xee);
   520       first = _mm256_blend_pd(first, second, 1);
   521       __m256d tmp = _mm256_permute_pd(first, 5);
   522       first = _mm256_permute2f128_pd(tmp, tmp, 1);
   523       first = _mm256_blend_pd(tmp, first, 0xA);
   527       first = _mm256_blend_pd(first, second, 3);
   528       first = _mm256_permute2f128_pd(first, first, 1);
   532       first = _mm256_blend_pd(first, second, 7);
   533       __m256d tmp = _mm256_permute_pd(first, 5);
   534       first = _mm256_permute2f128_pd(tmp, tmp, 1);
   535       first = _mm256_blend_pd(tmp, first, 5);
   540 EIGEN_DEVICE_FUNC 
inline void   542   __m256 T0 = _mm256_unpacklo_ps(kernel.
packet[0], kernel.
packet[1]);
   543   __m256 T1 = _mm256_unpackhi_ps(kernel.
packet[0], kernel.
packet[1]);
   544   __m256 T2 = _mm256_unpacklo_ps(kernel.
packet[2], kernel.
packet[3]);
   545   __m256 T3 = _mm256_unpackhi_ps(kernel.
packet[2], kernel.
packet[3]);
   546   __m256 T4 = _mm256_unpacklo_ps(kernel.
packet[4], kernel.
packet[5]);
   547   __m256 T5 = _mm256_unpackhi_ps(kernel.
packet[4], kernel.
packet[5]);
   548   __m256 T6 = _mm256_unpacklo_ps(kernel.
packet[6], kernel.
packet[7]);
   549   __m256 T7 = _mm256_unpackhi_ps(kernel.
packet[6], kernel.
packet[7]);
   550   __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
   551   __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
   552   __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
   553   __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
   554   __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
   555   __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
   556   __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
   557   __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
   558   kernel.
packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
   559   kernel.
packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
   560   kernel.
packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
   561   kernel.
packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
   562   kernel.
packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
   563   kernel.
packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
   564   kernel.
packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
   565   kernel.
packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
   568 EIGEN_DEVICE_FUNC 
inline void   570   __m256 T0 = _mm256_unpacklo_ps(kernel.
packet[0], kernel.
packet[1]);
   571   __m256 T1 = _mm256_unpackhi_ps(kernel.
packet[0], kernel.
packet[1]);
   572   __m256 T2 = _mm256_unpacklo_ps(kernel.
packet[2], kernel.
packet[3]);
   573   __m256 T3 = _mm256_unpackhi_ps(kernel.
packet[2], kernel.
packet[3]);
   575   __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
   576   __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
   577   __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
   578   __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
   580   kernel.
packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
   581   kernel.
packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
   582   kernel.
packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
   583   kernel.
packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
   586 EIGEN_DEVICE_FUNC 
inline void   588   __m256d T0 = _mm256_shuffle_pd(kernel.
packet[0], kernel.
packet[1], 15);
   589   __m256d T1 = _mm256_shuffle_pd(kernel.
packet[0], kernel.
packet[1], 0);
   590   __m256d T2 = _mm256_shuffle_pd(kernel.
packet[2], kernel.
packet[3], 15);
   591   __m256d T3 = _mm256_shuffle_pd(kernel.
packet[2], kernel.
packet[3], 0);
   593   kernel.
packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
   594   kernel.
packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
   595   kernel.
packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
   596   kernel.
packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
   600   const __m256 zero = _mm256_setzero_ps();
   602   __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
   603   return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
   606   const __m256d zero = _mm256_setzero_pd();
   607   const __m256d select = _mm256_set_pd(ifPacket.
select[3], ifPacket.
select[2], ifPacket.
select[1], ifPacket.
select[0]);
   608   __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
   609   return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
   636 #endif // EIGEN_PACKET_MATH_AVX_H EIGEN_STRONG_INLINE Packet8f pround< Packet8f >(const Packet8f &a)
EIGEN_STRONG_INLINE Packet8f plset< Packet8f >(const float &a)
EIGEN_STRONG_INLINE Packet4d preduxp< Packet4d >(const Packet4d *vecs)
#define EIGEN_STRONG_INLINE
EIGEN_STRONG_INLINE Packet4cf pinsertfirst(const Packet4cf &a, std::complex< float > b)
#define EIGEN_DEBUG_UNALIGNED_LOAD
EIGEN_STRONG_INLINE void pstore1< Packet8i >(int *to, const int &a)
EIGEN_STRONG_INLINE Packet8f pfloor< Packet8f >(const Packet8f &a)
EIGEN_STRONG_INLINE Packet8f pceil< Packet8f >(const Packet8f &a)
static EIGEN_STRONG_INLINE void run(Packet8f &first, const Packet8f &second)
EIGEN_DEVICE_FUNC Packet4d pgather< double, Packet4d >(const double *from, Index stride)
EIGEN_STRONG_INLINE Packet8f padd< Packet8f >(const Packet8f &a, const Packet8f &b)
#define EIGEN_DEBUG_ALIGNED_STORE
EIGEN_STRONG_INLINE Packet4d ploaddup< Packet4d >(const double *from)
EIGEN_STRONG_INLINE Packet4d pmin< Packet4d >(const Packet4d &a, const Packet4d &b)
EIGEN_STRONG_INLINE Packet8f ploadquad< Packet8f >(const float *from)
EIGEN_DEVICE_FUNC Packet8f pgather< float, Packet8f >(const float *from, Index stride)
#define EIGEN_DEBUG_UNALIGNED_STORE
EIGEN_STRONG_INLINE Packet8f pandnot< Packet8f >(const Packet8f &a, const Packet8f &b)
EIGEN_STRONG_INLINE void prefetch< float >(const float *addr)
EIGEN_STRONG_INLINE void pstore1< Packet8f >(float *to, const float &a)
EIGEN_STRONG_INLINE void pstoreu< double >(double *to, const Packet4d &from)
EIGEN_STRONG_INLINE Packet8i pload< Packet8i >(const int *from)
#define EIGEN_DEBUG_ALIGNED_LOAD
EIGEN_STRONG_INLINE Packet4d ploadu< Packet4d >(const double *from)
EIGEN_STRONG_INLINE double predux< Packet4d >(const Packet4d &a)
EIGEN_STRONG_INLINE Packet4d pxor< Packet4d >(const Packet4d &a, const Packet4d &b)
EIGEN_STRONG_INLINE Packet8f ploaddup< Packet8f >(const float *from)
EIGEN_STRONG_INLINE Packet4d pmax< Packet4d >(const Packet4d &a, const Packet4d &b)
EIGEN_DEVICE_FUNC void pscatter< double, Packet4d >(double *to, const Packet4d &from, Index stride)
EIGEN_STRONG_INLINE Packet4d pandnot< Packet4d >(const Packet4d &a, const Packet4d &b)
EIGEN_STRONG_INLINE Packet4d psub< Packet4d >(const Packet4d &a, const Packet4d &b)
EIGEN_STRONG_INLINE Packet4d pload1< Packet4d >(const double *from)
EIGEN_STRONG_INLINE void pstore< double >(double *to, const Packet4d &from)
EIGEN_STRONG_INLINE Packet4d pround< Packet4d >(const Packet4d &a)
EIGEN_STRONG_INLINE double predux_min< Packet4d >(const Packet4d &a)
EIGEN_STRONG_INLINE double pfirst< Packet4d >(const Packet4d &a)
EIGEN_STRONG_INLINE void prefetch< int >(const int *addr)
EIGEN_STRONG_INLINE Packet8i pdiv< Packet8i >(const Packet8i &, const Packet8i &)
EIGEN_STRONG_INLINE Packet4f predux_downto4< Packet8f >(const Packet8f &a)
EIGEN_STRONG_INLINE float pfirst< Packet8f >(const Packet8f &a)
EIGEN_STRONG_INLINE int pfirst< Packet8i >(const Packet8i &a)
EIGEN_STRONG_INLINE Packet8f pdiv< Packet8f >(const Packet8f &a, const Packet8f &b)
EIGEN_STRONG_INLINE Packet4d padd< Packet4d >(const Packet4d &a, const Packet4d &b)
EIGEN_STRONG_INLINE float predux< Packet8f >(const Packet8f &a)
EIGEN_STRONG_INLINE void ptranspose(PacketBlock< Packet2cf, 2 > &kernel)
EIGEN_STRONG_INLINE void pstore1< Packet4d >(double *to, const double &a)
EIGEN_STRONG_INLINE void pstoreu< int >(int *to, const Packet4i &from)
EIGEN_STRONG_INLINE void pstore< float >(float *to, const Packet4f &from)
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API. 
EIGEN_STRONG_INLINE Packet8f pand< Packet8f >(const Packet8f &a, const Packet8f &b)
EIGEN_DEVICE_FUNC void pscatter< float, Packet8f >(float *to, const Packet8f &from, Index stride)
__vector short int Packet8i
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type pfirst(const Packet &a)
EIGEN_STRONG_INLINE Packet4d pload< Packet4d >(const double *from)
EIGEN_STRONG_INLINE Packet8f pload1< Packet8f >(const float *from)
EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf &a)
EIGEN_STRONG_INLINE Packet8f pset1< Packet8f >(const float &from)
EIGEN_STRONG_INLINE Packet4d pceil< Packet4d >(const Packet4d &a)
EIGEN_STRONG_INLINE void pstoreu< float >(float *to, const Packet4f &from)
EIGEN_STRONG_INLINE Packet4d pand< Packet4d >(const Packet4d &a, const Packet4d &b)
EIGEN_STRONG_INLINE Packet8f por< Packet8f >(const Packet8f &a, const Packet8f &b)
EIGEN_DEVICE_FUNC void pstore(Scalar *to, const Packet &from)
EIGEN_STRONG_INLINE float predux_max< Packet8f >(const Packet8f &a)
EIGEN_STRONG_INLINE float predux_mul< Packet8f >(const Packet8f &a)
EIGEN_STRONG_INLINE Packet4d por< Packet4d >(const Packet4d &a, const Packet4d &b)
EIGEN_STRONG_INLINE Packet8i ploadu< Packet8i >(const int *from)
EIGEN_STRONG_INLINE Packet8f pmul< Packet8f >(const Packet8f &a, const Packet8f &b)
EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf &a)
EIGEN_STRONG_INLINE double predux_mul< Packet4d >(const Packet4d &a)
EIGEN_STRONG_INLINE Packet8f ploadu< Packet8f >(const float *from)
EIGEN_STRONG_INLINE Packet4d pdiv< Packet4d >(const Packet4d &a, const Packet4d &b)
EIGEN_STRONG_INLINE void pstore< int >(int *to, const Packet4i &from)
EIGEN_STRONG_INLINE Packet8f psub< Packet8f >(const Packet8f &a, const Packet8f &b)
EIGEN_STRONG_INLINE Packet8f pload< Packet8f >(const float *from)
EIGEN_STRONG_INLINE Packet8f preduxp< Packet8f >(const Packet8f *vecs)
EIGEN_STRONG_INLINE Packet4d plset< Packet4d >(const double &a)
EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f &a, const Packet4f &b, const Packet4f &c)
EIGEN_STRONG_INLINE Packet4cf pinsertlast(const Packet4cf &a, std::complex< float > b)
EIGEN_STRONG_INLINE Packet4d pmul< Packet4d >(const Packet4d &a, const Packet4d &b)
static EIGEN_STRONG_INLINE void run(Packet4d &first, const Packet4d &second)
EIGEN_STRONG_INLINE Packet4d pfloor< Packet4d >(const Packet4d &a)
EIGEN_STRONG_INLINE Packet8f pmax< Packet8f >(const Packet8f &a, const Packet8f &b)
EIGEN_STRONG_INLINE Packet4d pset1< Packet4d >(const double &from)
EIGEN_STRONG_INLINE Packet8f pxor< Packet8f >(const Packet8f &a, const Packet8f &b)
EIGEN_STRONG_INLINE float predux_min< Packet8f >(const Packet8f &a)
EIGEN_STRONG_INLINE Packet8f pmin< Packet8f >(const Packet8f &a, const Packet8f &b)
EIGEN_STRONG_INLINE void prefetch< double >(const double *addr)
EIGEN_STRONG_INLINE Packet8i pset1< Packet8i >(const int &from)
EIGEN_STRONG_INLINE Packet4i pblend(const Selector< 4 > &ifPacket, const Packet4i &thenPacket, const Packet4i &elsePacket)
EIGEN_STRONG_INLINE double predux_max< Packet4d >(const Packet4d &a)
EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf &a)
EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f &a)