AVX/PacketMath.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner (benoit.steiner.goog@gmail.com)
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_PACKET_MATH_AVX_H
11 #define EIGEN_PACKET_MATH_AVX_H
12 
13 namespace Eigen {
14 
15 namespace internal {
16 
17 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
19 #endif
20 
21 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
22 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS (2*sizeof(void*))
23 #endif
24 
25 #ifdef __FMA__
26 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
28 #endif
29 #endif
30 
31 typedef __m256 Packet8f;
32 typedef __m256i Packet8i;
33 typedef __m256d Packet4d;
34 
35 template<> struct is_arithmetic<__m256> { enum { value = true }; };
36 template<> struct is_arithmetic<__m256i> { enum { value = true }; };
37 template<> struct is_arithmetic<__m256d> { enum { value = true }; };
38 
39 #define _EIGEN_DECLARE_CONST_Packet8f(NAME,X) \
40  const Packet8f p8f_##NAME = pset1<Packet8f>(X)
41 
42 #define _EIGEN_DECLARE_CONST_Packet4d(NAME,X) \
43  const Packet4d p4d_##NAME = pset1<Packet4d>(X)
44 
45 #define _EIGEN_DECLARE_CONST_Packet8f_FROM_INT(NAME,X) \
46  const Packet8f p8f_##NAME = _mm256_castsi256_ps(pset1<Packet8i>(X))
47 
48 #define _EIGEN_DECLARE_CONST_Packet8i(NAME,X) \
49  const Packet8i p8i_##NAME = pset1<Packet8i>(X)
50 
51 // Use the packet_traits defined in AVX512/PacketMath.h instead if we're going
52 // to leverage AVX512 instructions.
53 #ifndef EIGEN_VECTORIZE_AVX512
54 template<> struct packet_traits<float> : default_packet_traits
55 {
56  typedef Packet8f type;
57  typedef Packet4f half;
58  enum {
59  Vectorizable = 1,
60  AlignedOnScalar = 1,
61  size=8,
62  HasHalfPacket = 1,
63 
64  HasDiv = 1,
66  HasCos = 0,
67  HasLog = 1,
68  HasExp = 1,
69  HasSqrt = 1,
70  HasRsqrt = 1,
72  HasBlend = 1,
73  HasRound = 1,
74  HasFloor = 1,
75  HasCeil = 1
76  };
77 };
78 template<> struct packet_traits<double> : default_packet_traits
79 {
80  typedef Packet4d type;
81  typedef Packet2d half;
82  enum {
85  size=4,
87 
88  HasDiv = 1,
89  HasExp = 1,
90  HasSqrt = 1,
91  HasRsqrt = 1,
92  HasBlend = 1,
93  HasRound = 1,
94  HasFloor = 1,
95  HasCeil = 1
96  };
97 };
98 #endif
99 
100 template<> struct scalar_div_cost<float,true> { enum { value = 14 }; };
101 template<> struct scalar_div_cost<double,true> { enum { value = 16 }; };
102 
103 /* Proper support for integers is only provided by AVX2. In the meantime, we'll
104  use SSE instructions and packets to deal with integers.
105 template<> struct packet_traits<int> : default_packet_traits
106 {
107  typedef Packet8i type;
108  enum {
109  Vectorizable = 1,
110  AlignedOnScalar = 1,
111  size=8
112  };
113 };
114 */
115 
116 template<> struct unpacket_traits<Packet8f> { typedef float type; typedef Packet4f half; enum {size=8, alignment=Aligned32}; };
117 template<> struct unpacket_traits<Packet4d> { typedef double type; typedef Packet2d half; enum {size=4, alignment=Aligned32}; };
118 template<> struct unpacket_traits<Packet8i> { typedef int type; typedef Packet4i half; enum {size=8, alignment=Aligned32}; };
119 
120 template<> EIGEN_STRONG_INLINE Packet8f pset1<Packet8f>(const float& from) { return _mm256_set1_ps(from); }
121 template<> EIGEN_STRONG_INLINE Packet4d pset1<Packet4d>(const double& from) { return _mm256_set1_pd(from); }
122 template<> EIGEN_STRONG_INLINE Packet8i pset1<Packet8i>(const int& from) { return _mm256_set1_epi32(from); }
123 
124 template<> EIGEN_STRONG_INLINE Packet8f pload1<Packet8f>(const float* from) { return _mm256_broadcast_ss(from); }
125 template<> EIGEN_STRONG_INLINE Packet4d pload1<Packet4d>(const double* from) { return _mm256_broadcast_sd(from); }
126 
127 template<> EIGEN_STRONG_INLINE Packet8f plset<Packet8f>(const float& a) { return _mm256_add_ps(_mm256_set1_ps(a), _mm256_set_ps(7.0,6.0,5.0,4.0,3.0,2.0,1.0,0.0)); }
128 template<> EIGEN_STRONG_INLINE Packet4d plset<Packet4d>(const double& a) { return _mm256_add_pd(_mm256_set1_pd(a), _mm256_set_pd(3.0,2.0,1.0,0.0)); }
129 
130 template<> EIGEN_STRONG_INLINE Packet8f padd<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_add_ps(a,b); }
131 template<> EIGEN_STRONG_INLINE Packet4d padd<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_add_pd(a,b); }
132 
133 template<> EIGEN_STRONG_INLINE Packet8f psub<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_sub_ps(a,b); }
134 template<> EIGEN_STRONG_INLINE Packet4d psub<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_sub_pd(a,b); }
135 
137 {
138  return _mm256_sub_ps(_mm256_set1_ps(0.0),a);
139 }
141 {
142  return _mm256_sub_pd(_mm256_set1_pd(0.0),a);
143 }
144 
145 template<> EIGEN_STRONG_INLINE Packet8f pconj(const Packet8f& a) { return a; }
146 template<> EIGEN_STRONG_INLINE Packet4d pconj(const Packet4d& a) { return a; }
147 template<> EIGEN_STRONG_INLINE Packet8i pconj(const Packet8i& a) { return a; }
148 
149 template<> EIGEN_STRONG_INLINE Packet8f pmul<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_mul_ps(a,b); }
150 template<> EIGEN_STRONG_INLINE Packet4d pmul<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_mul_pd(a,b); }
151 
152 
153 template<> EIGEN_STRONG_INLINE Packet8f pdiv<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_div_ps(a,b); }
154 template<> EIGEN_STRONG_INLINE Packet4d pdiv<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_div_pd(a,b); }
155 template<> EIGEN_STRONG_INLINE Packet8i pdiv<Packet8i>(const Packet8i& /*a*/, const Packet8i& /*b*/)
156 { eigen_assert(false && "packet integer division are not supported by AVX");
157  return pset1<Packet8i>(0);
158 }
159 
160 #ifdef __FMA__
161 template<> EIGEN_STRONG_INLINE Packet8f pmadd(const Packet8f& a, const Packet8f& b, const Packet8f& c) {
162 #if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
163  // Clang stupidly generates a vfmadd213ps instruction plus some vmovaps on registers,
164  // and even register spilling with clang>=6.0 (bug 1637).
165  // Gcc stupidly generates a vfmadd132ps instruction.
166  // So let's enforce it to generate a vfmadd231ps instruction since the most common use
167  // case is to accumulate the result of the product.
168  Packet8f res = c;
169  __asm__("vfmadd231ps %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
170  return res;
171 #else
172  return _mm256_fmadd_ps(a,b,c);
173 #endif
174 }
175 template<> EIGEN_STRONG_INLINE Packet4d pmadd(const Packet4d& a, const Packet4d& b, const Packet4d& c) {
176 #if ( (EIGEN_COMP_GNUC_STRICT && EIGEN_COMP_GNUC<80) || (EIGEN_COMP_CLANG) )
177  // see above
178  Packet4d res = c;
179  __asm__("vfmadd231pd %[a], %[b], %[c]" : [c] "+x" (res) : [a] "x" (a), [b] "x" (b));
180  return res;
181 #else
182  return _mm256_fmadd_pd(a,b,c);
183 #endif
184 }
185 #endif
186 
187 template<> EIGEN_STRONG_INLINE Packet8f pmin<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_min_ps(a,b); }
188 template<> EIGEN_STRONG_INLINE Packet4d pmin<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_min_pd(a,b); }
189 
190 template<> EIGEN_STRONG_INLINE Packet8f pmax<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_max_ps(a,b); }
191 template<> EIGEN_STRONG_INLINE Packet4d pmax<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_max_pd(a,b); }
192 
193 template<> EIGEN_STRONG_INLINE Packet8f pround<Packet8f>(const Packet8f& a) { return _mm256_round_ps(a, _MM_FROUND_CUR_DIRECTION); }
194 template<> EIGEN_STRONG_INLINE Packet4d pround<Packet4d>(const Packet4d& a) { return _mm256_round_pd(a, _MM_FROUND_CUR_DIRECTION); }
195 
196 template<> EIGEN_STRONG_INLINE Packet8f pceil<Packet8f>(const Packet8f& a) { return _mm256_ceil_ps(a); }
197 template<> EIGEN_STRONG_INLINE Packet4d pceil<Packet4d>(const Packet4d& a) { return _mm256_ceil_pd(a); }
198 
199 template<> EIGEN_STRONG_INLINE Packet8f pfloor<Packet8f>(const Packet8f& a) { return _mm256_floor_ps(a); }
200 template<> EIGEN_STRONG_INLINE Packet4d pfloor<Packet4d>(const Packet4d& a) { return _mm256_floor_pd(a); }
201 
202 template<> EIGEN_STRONG_INLINE Packet8f pand<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_and_ps(a,b); }
203 template<> EIGEN_STRONG_INLINE Packet4d pand<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_and_pd(a,b); }
204 
205 template<> EIGEN_STRONG_INLINE Packet8f por<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_or_ps(a,b); }
206 template<> EIGEN_STRONG_INLINE Packet4d por<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_or_pd(a,b); }
207 
208 template<> EIGEN_STRONG_INLINE Packet8f pxor<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_xor_ps(a,b); }
209 template<> EIGEN_STRONG_INLINE Packet4d pxor<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_xor_pd(a,b); }
210 
211 template<> EIGEN_STRONG_INLINE Packet8f pandnot<Packet8f>(const Packet8f& a, const Packet8f& b) { return _mm256_andnot_ps(a,b); }
212 template<> EIGEN_STRONG_INLINE Packet4d pandnot<Packet4d>(const Packet4d& a, const Packet4d& b) { return _mm256_andnot_pd(a,b); }
213 
214 template<> EIGEN_STRONG_INLINE Packet8f pload<Packet8f>(const float* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_ps(from); }
215 template<> EIGEN_STRONG_INLINE Packet4d pload<Packet4d>(const double* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_pd(from); }
216 template<> EIGEN_STRONG_INLINE Packet8i pload<Packet8i>(const int* from) { EIGEN_DEBUG_ALIGNED_LOAD return _mm256_load_si256(reinterpret_cast<const __m256i*>(from)); }
217 
218 template<> EIGEN_STRONG_INLINE Packet8f ploadu<Packet8f>(const float* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_ps(from); }
219 template<> EIGEN_STRONG_INLINE Packet4d ploadu<Packet4d>(const double* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_pd(from); }
220 template<> EIGEN_STRONG_INLINE Packet8i ploadu<Packet8i>(const int* from) { EIGEN_DEBUG_UNALIGNED_LOAD return _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from)); }
221 
222 // Loads 4 floats from memory a returns the packet {a0, a0 a1, a1, a2, a2, a3, a3}
223 template<> EIGEN_STRONG_INLINE Packet8f ploaddup<Packet8f>(const float* from)
224 {
225  // TODO try to find a way to avoid the need of a temporary register
226 // Packet8f tmp = _mm256_castps128_ps256(_mm_loadu_ps(from));
227 // tmp = _mm256_insertf128_ps(tmp, _mm_movehl_ps(_mm256_castps256_ps128(tmp),_mm256_castps256_ps128(tmp)), 1);
228 // return _mm256_unpacklo_ps(tmp,tmp);
229 
230  // _mm256_insertf128_ps is very slow on Haswell, thus:
231  Packet8f tmp = _mm256_broadcast_ps((const __m128*)(const void*)from);
232  // mimic an "inplace" permutation of the lower 128bits using a blend
233  tmp = _mm256_blend_ps(tmp,_mm256_castps128_ps256(_mm_permute_ps( _mm256_castps256_ps128(tmp), _MM_SHUFFLE(1,0,1,0))), 15);
234  // then we can perform a consistent permutation on the global register to get everything in shape:
235  return _mm256_permute_ps(tmp, _MM_SHUFFLE(3,3,2,2));
236 }
237 // Loads 2 doubles from memory a returns the packet {a0, a0 a1, a1}
238 template<> EIGEN_STRONG_INLINE Packet4d ploaddup<Packet4d>(const double* from)
239 {
240  Packet4d tmp = _mm256_broadcast_pd((const __m128d*)(const void*)from);
241  return _mm256_permute_pd(tmp, 3<<2);
242 }
243 
244 // Loads 2 floats from memory a returns the packet {a0, a0 a0, a0, a1, a1, a1, a1}
245 template<> EIGEN_STRONG_INLINE Packet8f ploadquad<Packet8f>(const float* from)
246 {
247  Packet8f tmp = _mm256_castps128_ps256(_mm_broadcast_ss(from));
248  return _mm256_insertf128_ps(tmp, _mm_broadcast_ss(from+1), 1);
249 }
250 
251 template<> EIGEN_STRONG_INLINE void pstore<float>(float* to, const Packet8f& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_ps(to, from); }
252 template<> EIGEN_STRONG_INLINE void pstore<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_store_pd(to, from); }
253 template<> EIGEN_STRONG_INLINE void pstore<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_ALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
254 
255 template<> EIGEN_STRONG_INLINE void pstoreu<float>(float* to, const Packet8f& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_ps(to, from); }
256 template<> EIGEN_STRONG_INLINE void pstoreu<double>(double* to, const Packet4d& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_pd(to, from); }
257 template<> EIGEN_STRONG_INLINE void pstoreu<int>(int* to, const Packet8i& from) { EIGEN_DEBUG_UNALIGNED_STORE _mm256_storeu_si256(reinterpret_cast<__m256i*>(to), from); }
258 
259 // NOTE: leverage _mm256_i32gather_ps and _mm256_i32gather_pd if AVX2 instructions are available
260 // NOTE: for the record the following seems to be slower: return _mm256_i32gather_ps(from, _mm256_set1_epi32(stride), 4);
261 template<> EIGEN_DEVICE_FUNC inline Packet8f pgather<float, Packet8f>(const float* from, Index stride)
262 {
263  return _mm256_set_ps(from[7*stride], from[6*stride], from[5*stride], from[4*stride],
264  from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
265 }
266 template<> EIGEN_DEVICE_FUNC inline Packet4d pgather<double, Packet4d>(const double* from, Index stride)
267 {
268  return _mm256_set_pd(from[3*stride], from[2*stride], from[1*stride], from[0*stride]);
269 }
270 
271 template<> EIGEN_DEVICE_FUNC inline void pscatter<float, Packet8f>(float* to, const Packet8f& from, Index stride)
272 {
273  __m128 low = _mm256_extractf128_ps(from, 0);
274  to[stride*0] = _mm_cvtss_f32(low);
275  to[stride*1] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 1));
276  to[stride*2] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 2));
277  to[stride*3] = _mm_cvtss_f32(_mm_shuffle_ps(low, low, 3));
278 
279  __m128 high = _mm256_extractf128_ps(from, 1);
280  to[stride*4] = _mm_cvtss_f32(high);
281  to[stride*5] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 1));
282  to[stride*6] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 2));
283  to[stride*7] = _mm_cvtss_f32(_mm_shuffle_ps(high, high, 3));
284 }
285 template<> EIGEN_DEVICE_FUNC inline void pscatter<double, Packet4d>(double* to, const Packet4d& from, Index stride)
286 {
287  __m128d low = _mm256_extractf128_pd(from, 0);
288  to[stride*0] = _mm_cvtsd_f64(low);
289  to[stride*1] = _mm_cvtsd_f64(_mm_shuffle_pd(low, low, 1));
290  __m128d high = _mm256_extractf128_pd(from, 1);
291  to[stride*2] = _mm_cvtsd_f64(high);
292  to[stride*3] = _mm_cvtsd_f64(_mm_shuffle_pd(high, high, 1));
293 }
294 
295 template<> EIGEN_STRONG_INLINE void pstore1<Packet8f>(float* to, const float& a)
296 {
297  Packet8f pa = pset1<Packet8f>(a);
298  pstore(to, pa);
299 }
300 template<> EIGEN_STRONG_INLINE void pstore1<Packet4d>(double* to, const double& a)
301 {
302  Packet4d pa = pset1<Packet4d>(a);
303  pstore(to, pa);
304 }
305 template<> EIGEN_STRONG_INLINE void pstore1<Packet8i>(int* to, const int& a)
306 {
307  Packet8i pa = pset1<Packet8i>(a);
308  pstore(to, pa);
309 }
310 
311 #ifndef EIGEN_VECTORIZE_AVX512
312 template<> EIGEN_STRONG_INLINE void prefetch<float>(const float* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
313 template<> EIGEN_STRONG_INLINE void prefetch<double>(const double* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
314 template<> EIGEN_STRONG_INLINE void prefetch<int>(const int* addr) { _mm_prefetch((SsePrefetchPtrType)(addr), _MM_HINT_T0); }
315 #endif
316 
317 template<> EIGEN_STRONG_INLINE float pfirst<Packet8f>(const Packet8f& a) {
318  return _mm_cvtss_f32(_mm256_castps256_ps128(a));
319 }
320 template<> EIGEN_STRONG_INLINE double pfirst<Packet4d>(const Packet4d& a) {
321  return _mm_cvtsd_f64(_mm256_castpd256_pd128(a));
322 }
324  return _mm_cvtsi128_si32(_mm256_castsi256_si128(a));
325 }
326 
327 
329 {
330  __m256 tmp = _mm256_shuffle_ps(a,a,0x1b);
331  return _mm256_permute2f128_ps(tmp, tmp, 1);
332 }
334 {
335  __m256d tmp = _mm256_shuffle_pd(a,a,5);
336  return _mm256_permute2f128_pd(tmp, tmp, 1);
337  #if 0
338  // This version is unlikely to be faster as _mm256_shuffle_ps and _mm256_permute_pd
339  // exhibit the same latency/throughput, but it is here for future reference/benchmarking...
340  __m256d swap_halves = _mm256_permute2f128_pd(a,a,1);
341  return _mm256_permute_pd(swap_halves,5);
342  #endif
343 }
344 
345 // pabs should be ok
347 {
348  const Packet8f mask = _mm256_castsi256_ps(_mm256_setr_epi32(0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF,0x7FFFFFFF));
349  return _mm256_and_ps(a,mask);
350 }
352 {
353  const Packet4d mask = _mm256_castsi256_pd(_mm256_setr_epi32(0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF,0xFFFFFFFF,0x7FFFFFFF));
354  return _mm256_and_pd(a,mask);
355 }
356 
357 // preduxp should be ok
358 // FIXME: why is this ok? why isn't the simply implementation working as expected?
360 {
361  __m256 hsum1 = _mm256_hadd_ps(vecs[0], vecs[1]);
362  __m256 hsum2 = _mm256_hadd_ps(vecs[2], vecs[3]);
363  __m256 hsum3 = _mm256_hadd_ps(vecs[4], vecs[5]);
364  __m256 hsum4 = _mm256_hadd_ps(vecs[6], vecs[7]);
365 
366  __m256 hsum5 = _mm256_hadd_ps(hsum1, hsum1);
367  __m256 hsum6 = _mm256_hadd_ps(hsum2, hsum2);
368  __m256 hsum7 = _mm256_hadd_ps(hsum3, hsum3);
369  __m256 hsum8 = _mm256_hadd_ps(hsum4, hsum4);
370 
371  __m256 perm1 = _mm256_permute2f128_ps(hsum5, hsum5, 0x23);
372  __m256 perm2 = _mm256_permute2f128_ps(hsum6, hsum6, 0x23);
373  __m256 perm3 = _mm256_permute2f128_ps(hsum7, hsum7, 0x23);
374  __m256 perm4 = _mm256_permute2f128_ps(hsum8, hsum8, 0x23);
375 
376  __m256 sum1 = _mm256_add_ps(perm1, hsum5);
377  __m256 sum2 = _mm256_add_ps(perm2, hsum6);
378  __m256 sum3 = _mm256_add_ps(perm3, hsum7);
379  __m256 sum4 = _mm256_add_ps(perm4, hsum8);
380 
381  __m256 blend1 = _mm256_blend_ps(sum1, sum2, 0xcc);
382  __m256 blend2 = _mm256_blend_ps(sum3, sum4, 0xcc);
383 
384  __m256 final = _mm256_blend_ps(blend1, blend2, 0xf0);
385  return final;
386 }
388 {
389  Packet4d tmp0, tmp1;
390 
391  tmp0 = _mm256_hadd_pd(vecs[0], vecs[1]);
392  tmp0 = _mm256_add_pd(tmp0, _mm256_permute2f128_pd(tmp0, tmp0, 1));
393 
394  tmp1 = _mm256_hadd_pd(vecs[2], vecs[3]);
395  tmp1 = _mm256_add_pd(tmp1, _mm256_permute2f128_pd(tmp1, tmp1, 1));
396 
397  return _mm256_blend_pd(tmp0, tmp1, 0xC);
398 }
399 
401 {
402  return predux(Packet4f(_mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1))));
403 }
405 {
406  return predux(Packet2d(_mm_add_pd(_mm256_castpd256_pd128(a),_mm256_extractf128_pd(a,1))));
407 }
408 
410 {
411  return _mm_add_ps(_mm256_castps256_ps128(a),_mm256_extractf128_ps(a,1));
412 }
413 
415 {
416  Packet8f tmp;
417  tmp = _mm256_mul_ps(a, _mm256_permute2f128_ps(a,a,1));
418  tmp = _mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
419  return pfirst(_mm256_mul_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
420 }
422 {
423  Packet4d tmp;
424  tmp = _mm256_mul_pd(a, _mm256_permute2f128_pd(a,a,1));
425  return pfirst(_mm256_mul_pd(tmp, _mm256_shuffle_pd(tmp,tmp,1)));
426 }
427 
429 {
430  Packet8f tmp = _mm256_min_ps(a, _mm256_permute2f128_ps(a,a,1));
431  tmp = _mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
432  return pfirst(_mm256_min_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
433 }
435 {
436  Packet4d tmp = _mm256_min_pd(a, _mm256_permute2f128_pd(a,a,1));
437  return pfirst(_mm256_min_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
438 }
439 
441 {
442  Packet8f tmp = _mm256_max_ps(a, _mm256_permute2f128_ps(a,a,1));
443  tmp = _mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,_MM_SHUFFLE(1,0,3,2)));
444  return pfirst(_mm256_max_ps(tmp, _mm256_shuffle_ps(tmp,tmp,1)));
445 }
446 
448 {
449  Packet4d tmp = _mm256_max_pd(a, _mm256_permute2f128_pd(a,a,1));
450  return pfirst(_mm256_max_pd(tmp, _mm256_shuffle_pd(tmp, tmp, 1)));
451 }
452 
453 
454 template<int Offset>
455 struct palign_impl<Offset,Packet8f>
456 {
457  static EIGEN_STRONG_INLINE void run(Packet8f& first, const Packet8f& second)
458  {
459  if (Offset==1)
460  {
461  first = _mm256_blend_ps(first, second, 1);
462  Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
463  Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
464  first = _mm256_blend_ps(tmp1, tmp2, 0x88);
465  }
466  else if (Offset==2)
467  {
468  first = _mm256_blend_ps(first, second, 3);
469  Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
470  Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
471  first = _mm256_blend_ps(tmp1, tmp2, 0xcc);
472  }
473  else if (Offset==3)
474  {
475  first = _mm256_blend_ps(first, second, 7);
476  Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
477  Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
478  first = _mm256_blend_ps(tmp1, tmp2, 0xee);
479  }
480  else if (Offset==4)
481  {
482  first = _mm256_blend_ps(first, second, 15);
483  Packet8f tmp1 = _mm256_permute_ps (first, _MM_SHUFFLE(3,2,1,0));
484  Packet8f tmp2 = _mm256_permute2f128_ps (tmp1, tmp1, 1);
485  first = _mm256_permute_ps(tmp2, _MM_SHUFFLE(3,2,1,0));
486  }
487  else if (Offset==5)
488  {
489  first = _mm256_blend_ps(first, second, 31);
490  first = _mm256_permute2f128_ps(first, first, 1);
491  Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(0,3,2,1));
492  first = _mm256_permute2f128_ps(tmp, tmp, 1);
493  first = _mm256_blend_ps(tmp, first, 0x88);
494  }
495  else if (Offset==6)
496  {
497  first = _mm256_blend_ps(first, second, 63);
498  first = _mm256_permute2f128_ps(first, first, 1);
499  Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(1,0,3,2));
500  first = _mm256_permute2f128_ps(tmp, tmp, 1);
501  first = _mm256_blend_ps(tmp, first, 0xcc);
502  }
503  else if (Offset==7)
504  {
505  first = _mm256_blend_ps(first, second, 127);
506  first = _mm256_permute2f128_ps(first, first, 1);
507  Packet8f tmp = _mm256_permute_ps (first, _MM_SHUFFLE(2,1,0,3));
508  first = _mm256_permute2f128_ps(tmp, tmp, 1);
509  first = _mm256_blend_ps(tmp, first, 0xee);
510  }
511  }
512 };
513 
514 template<int Offset>
515 struct palign_impl<Offset,Packet4d>
516 {
517  static EIGEN_STRONG_INLINE void run(Packet4d& first, const Packet4d& second)
518  {
519  if (Offset==1)
520  {
521  first = _mm256_blend_pd(first, second, 1);
522  __m256d tmp = _mm256_permute_pd(first, 5);
523  first = _mm256_permute2f128_pd(tmp, tmp, 1);
524  first = _mm256_blend_pd(tmp, first, 0xA);
525  }
526  else if (Offset==2)
527  {
528  first = _mm256_blend_pd(first, second, 3);
529  first = _mm256_permute2f128_pd(first, first, 1);
530  }
531  else if (Offset==3)
532  {
533  first = _mm256_blend_pd(first, second, 7);
534  __m256d tmp = _mm256_permute_pd(first, 5);
535  first = _mm256_permute2f128_pd(tmp, tmp, 1);
536  first = _mm256_blend_pd(tmp, first, 5);
537  }
538  }
539 };
540 
541 EIGEN_DEVICE_FUNC inline void
543  __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
544  __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
545  __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
546  __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
547  __m256 T4 = _mm256_unpacklo_ps(kernel.packet[4], kernel.packet[5]);
548  __m256 T5 = _mm256_unpackhi_ps(kernel.packet[4], kernel.packet[5]);
549  __m256 T6 = _mm256_unpacklo_ps(kernel.packet[6], kernel.packet[7]);
550  __m256 T7 = _mm256_unpackhi_ps(kernel.packet[6], kernel.packet[7]);
551  __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
552  __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
553  __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
554  __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
555  __m256 S4 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(1,0,1,0));
556  __m256 S5 = _mm256_shuffle_ps(T4,T6,_MM_SHUFFLE(3,2,3,2));
557  __m256 S6 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(1,0,1,0));
558  __m256 S7 = _mm256_shuffle_ps(T5,T7,_MM_SHUFFLE(3,2,3,2));
559  kernel.packet[0] = _mm256_permute2f128_ps(S0, S4, 0x20);
560  kernel.packet[1] = _mm256_permute2f128_ps(S1, S5, 0x20);
561  kernel.packet[2] = _mm256_permute2f128_ps(S2, S6, 0x20);
562  kernel.packet[3] = _mm256_permute2f128_ps(S3, S7, 0x20);
563  kernel.packet[4] = _mm256_permute2f128_ps(S0, S4, 0x31);
564  kernel.packet[5] = _mm256_permute2f128_ps(S1, S5, 0x31);
565  kernel.packet[6] = _mm256_permute2f128_ps(S2, S6, 0x31);
566  kernel.packet[7] = _mm256_permute2f128_ps(S3, S7, 0x31);
567 }
568 
569 EIGEN_DEVICE_FUNC inline void
571  __m256 T0 = _mm256_unpacklo_ps(kernel.packet[0], kernel.packet[1]);
572  __m256 T1 = _mm256_unpackhi_ps(kernel.packet[0], kernel.packet[1]);
573  __m256 T2 = _mm256_unpacklo_ps(kernel.packet[2], kernel.packet[3]);
574  __m256 T3 = _mm256_unpackhi_ps(kernel.packet[2], kernel.packet[3]);
575 
576  __m256 S0 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(1,0,1,0));
577  __m256 S1 = _mm256_shuffle_ps(T0,T2,_MM_SHUFFLE(3,2,3,2));
578  __m256 S2 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(1,0,1,0));
579  __m256 S3 = _mm256_shuffle_ps(T1,T3,_MM_SHUFFLE(3,2,3,2));
580 
581  kernel.packet[0] = _mm256_permute2f128_ps(S0, S1, 0x20);
582  kernel.packet[1] = _mm256_permute2f128_ps(S2, S3, 0x20);
583  kernel.packet[2] = _mm256_permute2f128_ps(S0, S1, 0x31);
584  kernel.packet[3] = _mm256_permute2f128_ps(S2, S3, 0x31);
585 }
586 
587 EIGEN_DEVICE_FUNC inline void
589  __m256d T0 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 15);
590  __m256d T1 = _mm256_shuffle_pd(kernel.packet[0], kernel.packet[1], 0);
591  __m256d T2 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 15);
592  __m256d T3 = _mm256_shuffle_pd(kernel.packet[2], kernel.packet[3], 0);
593 
594  kernel.packet[1] = _mm256_permute2f128_pd(T0, T2, 32);
595  kernel.packet[3] = _mm256_permute2f128_pd(T0, T2, 49);
596  kernel.packet[0] = _mm256_permute2f128_pd(T1, T3, 32);
597  kernel.packet[2] = _mm256_permute2f128_pd(T1, T3, 49);
598 }
599 
600 template<> EIGEN_STRONG_INLINE Packet8f pblend(const Selector<8>& ifPacket, const Packet8f& thenPacket, const Packet8f& elsePacket) {
601  const __m256 zero = _mm256_setzero_ps();
602  const __m256 select = _mm256_set_ps(ifPacket.select[7], ifPacket.select[6], ifPacket.select[5], ifPacket.select[4], ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
603  __m256 false_mask = _mm256_cmp_ps(select, zero, _CMP_EQ_UQ);
604  return _mm256_blendv_ps(thenPacket, elsePacket, false_mask);
605 }
606 template<> EIGEN_STRONG_INLINE Packet4d pblend(const Selector<4>& ifPacket, const Packet4d& thenPacket, const Packet4d& elsePacket) {
607  const __m256d zero = _mm256_setzero_pd();
608  const __m256d select = _mm256_set_pd(ifPacket.select[3], ifPacket.select[2], ifPacket.select[1], ifPacket.select[0]);
609  __m256d false_mask = _mm256_cmp_pd(select, zero, _CMP_EQ_UQ);
610  return _mm256_blendv_pd(thenPacket, elsePacket, false_mask);
611 }
612 
614 {
615  return _mm256_blend_ps(a,pset1<Packet8f>(b),1);
616 }
617 
619 {
620  return _mm256_blend_pd(a,pset1<Packet4d>(b),1);
621 }
622 
624 {
625  return _mm256_blend_ps(a,pset1<Packet8f>(b),(1<<7));
626 }
627 
629 {
630  return _mm256_blend_pd(a,pset1<Packet4d>(b),(1<<3));
631 }
632 
633 } // end namespace internal
634 
635 } // end namespace Eigen
636 
637 #endif // EIGEN_PACKET_MATH_AVX_H
Eigen::internal::plset< Packet8f >
EIGEN_STRONG_INLINE Packet8f plset< Packet8f >(const float &a)
Definition: AVX/PacketMath.h:127
Eigen::internal::Packet4i
__vector int Packet4i
Definition: AltiVec/PacketMath.h:35
Eigen::internal::Packet4d
__m256d Packet4d
Definition: AVX/PacketMath.h:33
Eigen::internal::ploaddup< Packet4d >
EIGEN_STRONG_INLINE Packet4d ploaddup< Packet4d >(const double *from)
Definition: AVX/PacketMath.h:238
Eigen::internal::pfirst< Packet8f >
EIGEN_STRONG_INLINE float pfirst< Packet8f >(const Packet8f &a)
Definition: AVX/PacketMath.h:317
Eigen::internal::pmin< Packet4d >
EIGEN_STRONG_INLINE Packet4d pmin< Packet4d >(const Packet4d &a, const Packet4d &b)
Definition: AVX/PacketMath.h:188
Eigen::internal::pandnot< Packet4d >
EIGEN_STRONG_INLINE Packet4d pandnot< Packet4d >(const Packet4d &a, const Packet4d &b)
Definition: AVX/PacketMath.h:212
Eigen::internal::unpacket_traits::size
@ size
Definition: XprHelper.h:164
Eigen::internal::pgather< float, Packet8f >
EIGEN_DEVICE_FUNC Packet8f pgather< float, Packet8f >(const float *from, Index stride)
Definition: AVX/PacketMath.h:261
Eigen::internal::pmul< Packet8f >
EIGEN_STRONG_INLINE Packet8f pmul< Packet8f >(const Packet8f &a, const Packet8f &b)
Definition: AVX/PacketMath.h:149
Eigen
Definition: common.h:73
b
Scalar * b
Definition: cholesky.cpp:56
Eigen::internal::pfirst< Packet4d >
EIGEN_STRONG_INLINE double pfirst< Packet4d >(const Packet4d &a)
Definition: AVX/PacketMath.h:320
Eigen::internal::packet_traits::AlignedOnScalar
@ AlignedOnScalar
Definition: GenericPacketMath.h:103
Eigen::internal::pxor< Packet8f >
EIGEN_STRONG_INLINE Packet8f pxor< Packet8f >(const Packet8f &a, const Packet8f &b)
Definition: AVX/PacketMath.h:208
Eigen::internal::pfirst< Packet8i >
EIGEN_STRONG_INLINE int pfirst< Packet8i >(const Packet8i &a)
Definition: AVX/PacketMath.h:323
Eigen::internal::PacketBlock
Definition: GenericPacketMath.h:539
Eigen::internal::predux_mul< Packet4d >
EIGEN_STRONG_INLINE double predux_mul< Packet4d >(const Packet4d &a)
Definition: AVX/PacketMath.h:421
Eigen::internal::ploaddup< Packet8f >
EIGEN_STRONG_INLINE Packet8f ploaddup< Packet8f >(const float *from)
Definition: AVX/PacketMath.h:223
Eigen::internal::pinsertfirst
EIGEN_STRONG_INLINE Packet4cf pinsertfirst(const Packet4cf &a, std::complex< float > b)
Definition: AVX/Complex.h:427
eigen_assert
#define eigen_assert(x)
Definition: Macros.h:579
Eigen::internal::default_packet_traits::HasBlend
@ HasBlend
Definition: GenericPacketMath.h:58
Eigen::internal::Selector
Definition: GenericPacketMath.h:552
Eigen::internal::predux_max< Packet8f >
EIGEN_STRONG_INLINE float predux_max< Packet8f >(const Packet8f &a)
Definition: AVX/PacketMath.h:440
Eigen::internal::packet_traits::Vectorizable
@ Vectorizable
Definition: GenericPacketMath.h:101
Eigen::internal::packet_traits
Definition: GenericPacketMath.h:96
Eigen::internal::packet_traits< float >::type
Packet8f type
Definition: AVX/PacketMath.h:56
Eigen::internal::pceil< Packet8f >
EIGEN_STRONG_INLINE Packet8f pceil< Packet8f >(const Packet8f &a)
Definition: AVX/PacketMath.h:196
Eigen::internal::scalar_div_cost::value
@ value
Definition: XprHelper.h:676
Eigen::internal::pdiv< Packet4d >
EIGEN_STRONG_INLINE Packet4d pdiv< Packet4d >(const Packet4d &a, const Packet4d &b)
Definition: AVX/PacketMath.h:154
Eigen::internal::default_packet_traits::HasCos
@ HasCos
Definition: GenericPacketMath.h:70
Eigen::internal::palign_impl< Offset, Packet4d >::run
static EIGEN_STRONG_INLINE void run(Packet4d &first, const Packet4d &second)
Definition: AVX/PacketMath.h:517
Eigen::internal::preverse
EIGEN_STRONG_INLINE Packet2cf preverse(const Packet2cf &a)
Definition: AltiVec/Complex.h:137
Eigen::internal::PacketBlock::packet
Packet packet[N]
Definition: GenericPacketMath.h:543
Eigen::internal::pround< Packet8f >
EIGEN_STRONG_INLINE Packet8f pround< Packet8f >(const Packet8f &a)
Definition: AVX/PacketMath.h:193
Eigen::internal::Selector::select
bool select[N]
Definition: GenericPacketMath.h:556
Eigen::internal::pconj
EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf &a)
Definition: AltiVec/Complex.h:101
Eigen::internal::predux_min< Packet8f >
EIGEN_STRONG_INLINE float predux_min< Packet8f >(const Packet8f &a)
Definition: AVX/PacketMath.h:428
Eigen::internal::packet_traits< float >::half
Packet4f half
Definition: AVX/PacketMath.h:57
Eigen::internal::prefetch< float >
EIGEN_STRONG_INLINE void prefetch< float >(const float *addr)
Definition: AltiVec/PacketMath.h:534
Eigen::internal::pstoreu< int >
EIGEN_STRONG_INLINE void pstoreu< int >(int *to, const Packet4i &from)
Definition: AltiVec/PacketMath.h:522
Eigen::internal::default_packet_traits::HasDiv
@ HasDiv
Definition: GenericPacketMath.h:60
Eigen::internal::default_packet_traits
Definition: GenericPacketMath.h:42
Eigen::internal::pstore1< Packet4d >
EIGEN_STRONG_INLINE void pstore1< Packet4d >(double *to, const double &a)
Definition: AVX/PacketMath.h:300
Eigen::internal::preduxp< Packet8f >
EIGEN_STRONG_INLINE Packet8f preduxp< Packet8f >(const Packet8f *vecs)
Definition: AVX/PacketMath.h:359
Eigen::internal::Packet4f
Definition: ZVector/PacketMath.h:44
Eigen::internal::pscatter< float, Packet8f >
EIGEN_DEVICE_FUNC void pscatter< float, Packet8f >(float *to, const Packet8f &from, Index stride)
Definition: AVX/PacketMath.h:271
Eigen::internal::pfirst
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type pfirst(const Packet &a)
Definition: GenericPacketMath.h:315
Eigen::internal::ploadu< Packet4d >
EIGEN_STRONG_INLINE Packet4d ploadu< Packet4d >(const double *from)
Definition: AVX/PacketMath.h:219
Eigen::internal::pfloor< Packet4d >
EIGEN_STRONG_INLINE Packet4d pfloor< Packet4d >(const Packet4d &a)
Definition: AVX/PacketMath.h:200
EIGEN_DEBUG_UNALIGNED_STORE
#define EIGEN_DEBUG_UNALIGNED_STORE
Definition: GenericPacketMath.h:39
Eigen::internal::default_packet_traits::HasSin
@ HasSin
Definition: GenericPacketMath.h:69
Eigen::internal::default_packet_traits::HasExp
@ HasExp
Definition: GenericPacketMath.h:63
Eigen::internal::pload< Packet4d >
EIGEN_STRONG_INLINE Packet4d pload< Packet4d >(const double *from)
Definition: AVX/PacketMath.h:215
Eigen::internal::psub< Packet8f >
EIGEN_STRONG_INLINE Packet8f psub< Packet8f >(const Packet8f &a, const Packet8f &b)
Definition: AVX/PacketMath.h:133
Eigen::internal::default_packet_traits::HasLog
@ HasLog
Definition: GenericPacketMath.h:64
Eigen::internal::pstore1< Packet8i >
EIGEN_STRONG_INLINE void pstore1< Packet8i >(int *to, const int &a)
Definition: AVX/PacketMath.h:305
Eigen::internal::psub< Packet4d >
EIGEN_STRONG_INLINE Packet4d psub< Packet4d >(const Packet4d &a, const Packet4d &b)
Definition: AVX/PacketMath.h:134
Eigen::internal::pmin< Packet8f >
EIGEN_STRONG_INLINE Packet8f pmin< Packet8f >(const Packet8f &a, const Packet8f &b)
Definition: AVX/PacketMath.h:187
Eigen::internal::pceil< Packet4d >
EIGEN_STRONG_INLINE Packet4d pceil< Packet4d >(const Packet4d &a)
Definition: AVX/PacketMath.h:197
Eigen::internal::pmul< Packet4d >
EIGEN_STRONG_INLINE Packet4d pmul< Packet4d >(const Packet4d &a, const Packet4d &b)
Definition: AVX/PacketMath.h:150
Eigen::internal::pand< Packet4d >
EIGEN_STRONG_INLINE Packet4d pand< Packet4d >(const Packet4d &a, const Packet4d &b)
Definition: AVX/PacketMath.h:203
Eigen::internal::por< Packet8f >
EIGEN_STRONG_INLINE Packet8f por< Packet8f >(const Packet8f &a, const Packet8f &b)
Definition: AVX/PacketMath.h:205
Eigen::internal::pstore< double >
EIGEN_STRONG_INLINE void pstore< double >(double *to, const Packet4d &from)
Definition: AVX/PacketMath.h:252
Eigen::internal::unpacket_traits::half
T half
Definition: XprHelper.h:161
Eigen::internal::pload1< Packet4d >
EIGEN_STRONG_INLINE Packet4d pload1< Packet4d >(const double *from)
Definition: AVX/PacketMath.h:125
Eigen::internal::pgather< double, Packet4d >
EIGEN_DEVICE_FUNC Packet4d pgather< double, Packet4d >(const double *from, Index stride)
Definition: AVX/PacketMath.h:266
Eigen::internal::pround< Packet4d >
EIGEN_STRONG_INLINE Packet4d pround< Packet4d >(const Packet4d &a)
Definition: AVX/PacketMath.h:194
Eigen::internal::predux_downto4< Packet8f >
EIGEN_STRONG_INLINE Packet4f predux_downto4< Packet8f >(const Packet8f &a)
Definition: AVX/PacketMath.h:409
Eigen::internal::ploadquad< Packet8f >
EIGEN_STRONG_INLINE Packet8f ploadquad< Packet8f >(const float *from)
Definition: AVX/PacketMath.h:245
Eigen::internal::pnegate
EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf &a)
Definition: AltiVec/Complex.h:100
Eigen::internal::padd< Packet4d >
EIGEN_STRONG_INLINE Packet4d padd< Packet4d >(const Packet4d &a, const Packet4d &b)
Definition: AVX/PacketMath.h:131
Eigen::internal::unpacket_traits
Definition: XprHelper.h:158
Eigen::internal::pand< Packet8f >
EIGEN_STRONG_INLINE Packet8f pand< Packet8f >(const Packet8f &a, const Packet8f &b)
Definition: AVX/PacketMath.h:202
Eigen::internal::ptranspose
EIGEN_STRONG_INLINE void ptranspose(PacketBlock< Packet2cf, 2 > &kernel)
Definition: AltiVec/Complex.h:242
Eigen::internal::default_packet_traits::HasCeil
@ HasCeil
Definition: GenericPacketMath.h:90
EIGEN_DEBUG_ALIGNED_LOAD
#define EIGEN_DEBUG_ALIGNED_LOAD
Definition: GenericPacketMath.h:27
EIGEN_STRONG_INLINE
#define EIGEN_STRONG_INLINE
Definition: Macros.h:494
Eigen::internal::pstore1< Packet8f >
EIGEN_STRONG_INLINE void pstore1< Packet8f >(float *to, const float &a)
Definition: AVX/PacketMath.h:295
Eigen::internal::default_packet_traits::HasRsqrt
@ HasRsqrt
Definition: GenericPacketMath.h:62
Eigen::internal::predux< Packet8f >
EIGEN_STRONG_INLINE float predux< Packet8f >(const Packet8f &a)
Definition: AVX/PacketMath.h:400
Eigen::internal::pblend
EIGEN_STRONG_INLINE Packet4i pblend(const Selector< 4 > &ifPacket, const Packet4i &thenPacket, const Packet4i &elsePacket)
Definition: AltiVec/PacketMath.h:759
Eigen::internal::default_packet_traits::HasFloor
@ HasFloor
Definition: GenericPacketMath.h:89
Eigen::internal::pload< Packet8f >
EIGEN_STRONG_INLINE Packet8f pload< Packet8f >(const float *from)
Definition: AVX/PacketMath.h:214
Eigen::internal::pxor< Packet4d >
EIGEN_STRONG_INLINE Packet4d pxor< Packet4d >(const Packet4d &a, const Packet4d &b)
Definition: AVX/PacketMath.h:209
Eigen::internal::Packet8i
__vector short int Packet8i
Definition: AltiVec/PacketMath.h:38
Eigen::internal::pset1< Packet8f >
EIGEN_STRONG_INLINE Packet8f pset1< Packet8f >(const float &from)
Definition: AVX/PacketMath.h:120
Eigen::internal::default_packet_traits::HasTanh
@ HasTanh
Definition: GenericPacketMath.h:77
Eigen::internal::palign_impl< Offset, Packet8f >::run
static EIGEN_STRONG_INLINE void run(Packet8f &first, const Packet8f &second)
Definition: AVX/PacketMath.h:457
Eigen::internal::pstore
EIGEN_DEVICE_FUNC void pstore(Scalar *to, const Packet &from)
Definition: GenericPacketMath.h:285
Eigen::internal::predux_mul< Packet8f >
EIGEN_STRONG_INLINE float predux_mul< Packet8f >(const Packet8f &a)
Definition: AVX/PacketMath.h:414
Eigen::internal::por< Packet4d >
EIGEN_STRONG_INLINE Packet4d por< Packet4d >(const Packet4d &a, const Packet4d &b)
Definition: AVX/PacketMath.h:206
Eigen::internal::ploadu< Packet8i >
EIGEN_STRONG_INLINE Packet8i ploadu< Packet8i >(const int *from)
Definition: AVX/PacketMath.h:220
Eigen::internal::unpacket_traits< Packet8f >::type
float type
Definition: AVX/PacketMath.h:116
Eigen::internal::default_packet_traits::HasSqrt
@ HasSqrt
Definition: GenericPacketMath.h:61
Eigen::internal::pmax< Packet8f >
EIGEN_STRONG_INLINE Packet8f pmax< Packet8f >(const Packet8f &a, const Packet8f &b)
Definition: AVX/PacketMath.h:190
Eigen::internal::pset1< Packet4d >
EIGEN_STRONG_INLINE Packet4d pset1< Packet4d >(const double &from)
Definition: AVX/PacketMath.h:121
Eigen::internal::scalar_div_cost
Definition: XprHelper.h:675
Eigen::internal::is_arithmetic::value
@ value
Definition: Meta.h:85
c
RealScalar c
Definition: level1_cplx_impl.h:103
Eigen::internal::pandnot< Packet8f >
EIGEN_STRONG_INLINE Packet8f pandnot< Packet8f >(const Packet8f &a, const Packet8f &b)
Definition: AVX/PacketMath.h:211
Eigen::internal::predux
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type predux(const Packet &a)
Definition: GenericPacketMath.h:323
Eigen::internal::Packet2d
__m128d Packet2d
Definition: SSE/PacketMath.h:57
EIGEN_DEBUG_UNALIGNED_LOAD
#define EIGEN_DEBUG_UNALIGNED_LOAD
Definition: GenericPacketMath.h:31
Eigen::internal::unpacket_traits::alignment
@ alignment
Definition: XprHelper.h:165
a
Scalar * a
Definition: cholesky.cpp:26
Eigen::internal::SsePrefetchPtrType
const typedef char * SsePrefetchPtrType
Definition: SSE/PacketMath.h:415
Eigen::internal::predux_max< Packet4d >
EIGEN_STRONG_INLINE double predux_max< Packet4d >(const Packet4d &a)
Definition: AVX/PacketMath.h:447
Eigen::internal::pload< Packet8i >
EIGEN_STRONG_INLINE Packet8i pload< Packet8i >(const int *from)
Definition: AVX/PacketMath.h:216
Eigen::internal::ploadu< Packet8f >
EIGEN_STRONG_INLINE Packet8f ploadu< Packet8f >(const float *from)
Definition: AVX/PacketMath.h:218
Eigen::internal::pstore< int >
EIGEN_STRONG_INLINE void pstore< int >(int *to, const Packet4i &from)
Definition: AltiVec/PacketMath.h:269
Eigen::internal::pmadd
EIGEN_STRONG_INLINE Packet4f pmadd(const Packet4f &a, const Packet4f &b, const Packet4f &c)
Definition: AltiVec/PacketMath.h:388
Eigen::internal::packet_traits< double >::half
Packet2d half
Definition: AVX/PacketMath.h:81
Eigen::internal::unpacket_traits< Packet4d >::type
double type
Definition: AVX/PacketMath.h:117
Eigen::internal::plset< Packet4d >
EIGEN_STRONG_INLINE Packet4d plset< Packet4d >(const double &a)
Definition: AVX/PacketMath.h:128
Eigen::internal::pstoreu< double >
EIGEN_STRONG_INLINE void pstoreu< double >(double *to, const Packet4d &from)
Definition: AVX/PacketMath.h:256
Eigen::internal::packet_traits::HasHalfPacket
@ HasHalfPacket
Definition: GenericPacketMath.h:104
Eigen::internal::prefetch< int >
EIGEN_STRONG_INLINE void prefetch< int >(const int *addr)
Definition: AltiVec/PacketMath.h:535
Eigen::internal::pabs
EIGEN_STRONG_INLINE Packet4f pabs(const Packet4f &a)
Definition: AltiVec/PacketMath.h:548
Eigen::internal::packet_traits< double >::type
Packet4d type
Definition: AVX/PacketMath.h:80
Eigen::internal::pmax< Packet4d >
EIGEN_STRONG_INLINE Packet4d pmax< Packet4d >(const Packet4d &a, const Packet4d &b)
Definition: AVX/PacketMath.h:191
Eigen::internal::preduxp< Packet4d >
EIGEN_STRONG_INLINE Packet4d preduxp< Packet4d >(const Packet4d *vecs)
Definition: AVX/PacketMath.h:387
Eigen::internal::pfloor< Packet8f >
EIGEN_STRONG_INLINE Packet8f pfloor< Packet8f >(const Packet8f &a)
Definition: AVX/PacketMath.h:199
Eigen::internal::Packet4f
__vector float Packet4f
Definition: AltiVec/PacketMath.h:34
Eigen::internal::pscatter< double, Packet4d >
EIGEN_DEVICE_FUNC void pscatter< double, Packet4d >(double *to, const Packet4d &from, Index stride)
Definition: AVX/PacketMath.h:285
Eigen::internal::pinsertlast
EIGEN_STRONG_INLINE Packet4cf pinsertlast(const Packet4cf &a, std::complex< float > b)
Definition: AVX/Complex.h:437
internal
Definition: BandTriangularSolver.h:13
Eigen::internal::pdiv< Packet8i >
EIGEN_STRONG_INLINE Packet8i pdiv< Packet8i >(const Packet8i &, const Packet8i &)
Definition: AVX/PacketMath.h:155
Eigen::internal::pstoreu< float >
EIGEN_STRONG_INLINE void pstoreu< float >(float *to, const Packet4f &from)
Definition: AltiVec/PacketMath.h:527
Eigen::internal::predux_min< Packet4d >
EIGEN_STRONG_INLINE double predux_min< Packet4d >(const Packet4d &a)
Definition: AVX/PacketMath.h:434
Eigen::Aligned32
@ Aligned32
Definition: Constants.h:231
Eigen::internal::pset1< Packet8i >
EIGEN_STRONG_INLINE Packet8i pset1< Packet8i >(const int &from)
Definition: AVX/PacketMath.h:122
Eigen::internal::prefetch< double >
EIGEN_STRONG_INLINE void prefetch< double >(const double *addr)
Definition: AVX/PacketMath.h:313
Eigen::internal::predux< Packet4d >
EIGEN_STRONG_INLINE double predux< Packet4d >(const Packet4d &a)
Definition: AVX/PacketMath.h:404
Eigen::internal::default_packet_traits::HasRound
@ HasRound
Definition: GenericPacketMath.h:88
Eigen::internal::pload1< Packet8f >
EIGEN_STRONG_INLINE Packet8f pload1< Packet8f >(const float *from)
Definition: AVX/PacketMath.h:124
Eigen::internal::pdiv< Packet8f >
EIGEN_STRONG_INLINE Packet8f pdiv< Packet8f >(const Packet8f &a, const Packet8f &b)
Definition: AVX/PacketMath.h:153
Eigen::internal::palign_impl
Definition: GenericPacketMath.h:492
Eigen::internal::unpacket_traits< Packet8i >::type
int type
Definition: AVX/PacketMath.h:118
EIGEN_DEBUG_ALIGNED_STORE
#define EIGEN_DEBUG_ALIGNED_STORE
Definition: GenericPacketMath.h:35
Eigen::internal::pstore< float >
EIGEN_STRONG_INLINE void pstore< float >(float *to, const Packet4f &from)
Definition: AltiVec/PacketMath.h:259
Eigen::internal::Packet8f
__m256 Packet8f
Definition: AVX/PacketMath.h:31
Eigen::internal::is_arithmetic
Definition: Meta.h:85
Eigen::Index
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
Definition: Meta.h:33
EIGEN_FAST_MATH
#define EIGEN_FAST_MATH
Definition: Macros.h:472
Eigen::internal::padd< Packet8f >
EIGEN_STRONG_INLINE Packet8f padd< Packet8f >(const Packet8f &a, const Packet8f &b)
Definition: AVX/PacketMath.h:130
Eigen::internal::packet_traits::size
@ size
Definition: GenericPacketMath.h:102


control_box_rst
Author(s): Christoph Rösmann
autogenerated on Wed Mar 2 2022 00:05:59