12 #ifndef EIGEN_PACKET_MATH_NEON_H
13 #define EIGEN_PACKET_MATH_NEON_H
19 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
20 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 8
23 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
24 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
27 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
28 #define EIGEN_HAS_SINGLE_INSTRUCTION_CJMADD
31 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
33 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
35 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 16
46 template<
typename T,
int unique_
id>
47 struct eigen_packet_wrapper
49 operator T&() {
return m_val; }
50 operator const T&()
const {
return m_val; }
51 eigen_packet_wrapper() {}
52 eigen_packet_wrapper(
const T &v) : m_val(v) {}
53 eigen_packet_wrapper& operator=(
const T &v) {
60 typedef eigen_packet_wrapper<float32x2_t,0>
Packet2f;
61 typedef eigen_packet_wrapper<float32x4_t,1>
Packet4f;
62 typedef eigen_packet_wrapper<int32x4_t ,2>
Packet4i;
63 typedef eigen_packet_wrapper<int32x2_t ,3>
Packet2i;
64 typedef eigen_packet_wrapper<uint32x4_t ,4>
Packet4ui;
74 #endif // EIGEN_COMP_MSVC
76 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
77 const Packet4f p4f_##NAME = pset1<Packet4f>(X)
79 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
80 const Packet4f p4f_##NAME = vreinterpretq_f32_u32(pset1<int32_t>(X))
82 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
83 const Packet4i p4i_##NAME = pset1<Packet4i>(X)
89 #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__("prfm pldl1keep, [%[addr]]\n" ::[addr] "r"(ADDR) : );
90 #elif EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC
91 #define EIGEN_ARM_PREFETCH(ADDR) __builtin_prefetch(ADDR);
93 #define EIGEN_ARM_PREFETCH(ADDR) __pld(ADDR)
94 #elif EIGEN_ARCH_ARM32
95 #define EIGEN_ARM_PREFETCH(ADDR) __asm__ __volatile__ ("pld [%[addr]]\n" :: [addr] "r" (ADDR) : );
98 #define EIGEN_ARM_PREFETCH(ADDR)
101 template<>
struct packet_traits<float> : default_packet_traits
133 #if EIGEN_GNUC_AT_MOST(4,4) && !EIGEN_COMP_LLVM
135 EIGEN_STRONG_INLINE float32x4_t vld1q_f32(
const float*
x) { return ::vld1q_f32((
const float32_t*)
x); }
136 EIGEN_STRONG_INLINE float32x2_t vld1_f32 (
const float*
x) { return ::vld1_f32 ((
const float32_t*)
x); }
137 EIGEN_STRONG_INLINE float32x2_t vld1_dup_f32 (
const float*
x) { return ::vld1_dup_f32 ((
const float32_t*)
x); }
138 EIGEN_STRONG_INLINE void vst1q_f32(
float* to, float32x4_t from) { ::vst1q_f32((float32_t*)to,from); }
139 EIGEN_STRONG_INLINE void vst1_f32 (
float* to, float32x2_t from) { ::vst1_f32 ((float32_t*)to,from); }
150 const float f[] = {0, 1, 2, 3};
156 const int32_t i[] = {0, 1, 2, 3};
179 return vdivq_f32(
a,
b);
188 inv = vrecpeq_f32(
b);
192 restep = vrecpsq_f32(
b, inv);
193 inv = vmulq_f32(restep, inv);
196 div = vmulq_f32(
a, inv);
203 {
eigen_assert(
false &&
"packet integer division are not supported by NEON");
211 #if (defined __ARM_FEATURE_FMA) && !(EIGEN_COMP_CLANG && EIGEN_ARCH_ARM)
221 #if EIGEN_COMP_CLANG && EIGEN_ARCH_ARM
231 "vmla.f32 %q[r], %q[a], %q[b]"
238 return vmlaq_f32(
c,
a,
b);
255 return vreinterpretq_f32_u32(vandq_u32(vreinterpretq_u32_f32(
a),vreinterpretq_u32_f32(
b)));
261 return vreinterpretq_f32_u32(vorrq_u32(vreinterpretq_u32_f32(
a),vreinterpretq_u32_f32(
b)));
267 return vreinterpretq_f32_u32(veorq_u32(vreinterpretq_u32_f32(
a),vreinterpretq_u32_f32(
b)));
273 return vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(
a),vreinterpretq_u32_f32(
b)));
286 lo = vld1_dup_f32(from);
287 hi = vld1_dup_f32(from+1);
288 return vcombine_f32(lo, hi);
293 lo = vld1_dup_s32(from);
294 hi = vld1_dup_s32(from+1);
295 return vcombine_s32(lo, hi);
307 res = vsetq_lane_f32(from[0*stride], res, 0);
308 res = vsetq_lane_f32(from[1*stride], res, 1);
309 res = vsetq_lane_f32(from[2*stride], res, 2);
310 res = vsetq_lane_f32(from[3*stride], res, 3);
316 res = vsetq_lane_s32(from[0*stride], res, 0);
317 res = vsetq_lane_s32(from[1*stride], res, 1);
318 res = vsetq_lane_s32(from[2*stride], res, 2);
319 res = vsetq_lane_s32(from[3*stride], res, 3);
325 to[stride*0] = vgetq_lane_f32(from, 0);
326 to[stride*1] = vgetq_lane_f32(from, 1);
327 to[stride*2] = vgetq_lane_f32(from, 2);
328 to[stride*3] = vgetq_lane_f32(from, 3);
332 to[stride*0] = vgetq_lane_s32(from, 0);
333 to[stride*1] = vgetq_lane_s32(from, 1);
334 to[stride*2] = vgetq_lane_s32(from, 2);
335 to[stride*3] = vgetq_lane_s32(from, 3);
346 float32x2_t a_lo, a_hi;
349 a_r64 = vrev64q_f32(
a);
350 a_lo = vget_low_f32(a_r64);
351 a_hi = vget_high_f32(a_r64);
352 return vcombine_f32(a_hi, a_lo);
355 int32x2_t a_lo, a_hi;
358 a_r64 = vrev64q_s32(
a);
359 a_lo = vget_low_s32(a_r64);
360 a_hi = vget_high_s32(a_r64);
361 return vcombine_s32(a_hi, a_lo);
369 float32x2_t a_lo, a_hi, sum;
371 a_lo = vget_low_f32(
a);
372 a_hi = vget_high_f32(
a);
373 sum = vpadd_f32(a_lo, a_hi);
374 sum = vpadd_f32(sum, sum);
375 return vget_lane_f32(sum, 0);
380 float32x4x2_t vtrn1, vtrn2, res1, res2;
385 vtrn1 = vzipq_f32(vecs[0], vecs[2]);
386 vtrn2 = vzipq_f32(vecs[1], vecs[3]);
387 res1 = vzipq_f32(vtrn1.val[0], vtrn2.val[0]);
388 res2 = vzipq_f32(vtrn1.val[1], vtrn2.val[1]);
391 sum1 = vaddq_f32(res1.val[0], res1.val[1]);
392 sum2 = vaddq_f32(res2.val[0], res2.val[1]);
393 sum = vaddq_f32(sum1, sum2);
400 int32x2_t a_lo, a_hi, sum;
402 a_lo = vget_low_s32(
a);
403 a_hi = vget_high_s32(
a);
404 sum = vpadd_s32(a_lo, a_hi);
405 sum = vpadd_s32(sum, sum);
406 return vget_lane_s32(sum, 0);
411 int32x4x2_t vtrn1, vtrn2, res1, res2;
416 vtrn1 = vzipq_s32(vecs[0], vecs[2]);
417 vtrn2 = vzipq_s32(vecs[1], vecs[3]);
418 res1 = vzipq_s32(vtrn1.val[0], vtrn2.val[0]);
419 res2 = vzipq_s32(vtrn1.val[1], vtrn2.val[1]);
422 sum1 = vaddq_s32(res1.val[0], res1.val[1]);
423 sum2 = vaddq_s32(res2.val[0], res2.val[1]);
424 sum = vaddq_s32(sum1, sum2);
433 float32x2_t a_lo, a_hi, prod;
436 a_lo = vget_low_f32(
a);
437 a_hi = vget_high_f32(
a);
439 prod = vmul_f32(a_lo, a_hi);
441 prod = vmul_f32(prod, vrev64_f32(prod));
443 return vget_lane_f32(prod, 0);
447 int32x2_t a_lo, a_hi, prod;
450 a_lo = vget_low_s32(
a);
451 a_hi = vget_high_s32(
a);
453 prod = vmul_s32(a_lo, a_hi);
455 prod = vmul_s32(prod, vrev64_s32(prod));
457 return vget_lane_s32(prod, 0);
463 float32x2_t a_lo, a_hi,
min;
465 a_lo = vget_low_f32(
a);
466 a_hi = vget_high_f32(
a);
467 min = vpmin_f32(a_lo, a_hi);
470 return vget_lane_f32(
min, 0);
475 int32x2_t a_lo, a_hi,
min;
477 a_lo = vget_low_s32(
a);
478 a_hi = vget_high_s32(
a);
479 min = vpmin_s32(a_lo, a_hi);
482 return vget_lane_s32(
min, 0);
488 float32x2_t a_lo, a_hi,
max;
490 a_lo = vget_low_f32(
a);
491 a_hi = vget_high_f32(
a);
492 max = vpmax_f32(a_lo, a_hi);
495 return vget_lane_f32(
max, 0);
500 int32x2_t a_lo, a_hi,
max;
502 a_lo = vget_low_s32(
a);
503 a_hi = vget_high_s32(
a);
504 max = vpmax_s32(a_lo, a_hi);
507 return vget_lane_s32(
max, 0);
512 #define PALIGN_NEON(Offset,Type,Command) \
514 struct palign_impl<Offset,Type>\
516 EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\
519 first = Command(first, second, Offset);\
534 EIGEN_DEVICE_FUNC
inline void
536 float32x4x2_t tmp1 = vzipq_f32(kernel.packet[0], kernel.packet[1]);
537 float32x4x2_t tmp2 = vzipq_f32(kernel.packet[2], kernel.packet[3]);
539 kernel.packet[0] = vcombine_f32(vget_low_f32(tmp1.val[0]), vget_low_f32(tmp2.val[0]));
540 kernel.packet[1] = vcombine_f32(vget_high_f32(tmp1.val[0]), vget_high_f32(tmp2.val[0]));
541 kernel.packet[2] = vcombine_f32(vget_low_f32(tmp1.val[1]), vget_low_f32(tmp2.val[1]));
542 kernel.packet[3] = vcombine_f32(vget_high_f32(tmp1.val[1]), vget_high_f32(tmp2.val[1]));
545 EIGEN_DEVICE_FUNC
inline void
547 int32x4x2_t tmp1 = vzipq_s32(kernel.packet[0], kernel.packet[1]);
548 int32x4x2_t tmp2 = vzipq_s32(kernel.packet[2], kernel.packet[3]);
549 kernel.packet[0] = vcombine_s32(vget_low_s32(tmp1.val[0]), vget_low_s32(tmp2.val[0]));
550 kernel.packet[1] = vcombine_s32(vget_high_s32(tmp1.val[0]), vget_high_s32(tmp2.val[0]));
551 kernel.packet[2] = vcombine_s32(vget_low_s32(tmp1.val[1]), vget_low_s32(tmp2.val[1]));
552 kernel.packet[3] = vcombine_s32(vget_high_s32(tmp1.val[1]), vget_high_s32(tmp2.val[1]));
559 #ifdef __apple_build_version__
563 #define EIGEN_APPLE_DOUBLE_NEON_BUG (__apple_build_version__ < 6010000)
565 #define EIGEN_APPLE_DOUBLE_NEON_BUG 0
568 #if EIGEN_ARCH_ARM64 && !EIGEN_APPLE_DOUBLE_NEON_BUG
574 template <
typename T>
575 uint64x2_t vreinterpretq_u64_f64(T
a)
577 return (uint64x2_t)
a;
580 template <
typename T>
581 float64x2_t vreinterpretq_f64_u64(T
a)
583 return (float64x2_t)
a;
587 typedef float64x1_t Packet1d;
589 template<>
struct packet_traits<double> : default_packet_traits
615 const double countdown_raw[] = {0.0,1.0};
616 const Packet2d countdown = vld1q_f64(countdown_raw);
631 #ifdef __ARM_FEATURE_FMA
645 return vreinterpretq_f64_u64(vandq_u64(vreinterpretq_u64_f64(
a),vreinterpretq_u64_f64(
b)));
650 return vreinterpretq_f64_u64(vorrq_u64(vreinterpretq_u64_f64(
a),vreinterpretq_u64_f64(
b)));
655 return vreinterpretq_f64_u64(veorq_u64(vreinterpretq_u64_f64(
a),vreinterpretq_u64_f64(
b)));
660 return vreinterpretq_f64_u64(vbicq_u64(vreinterpretq_u64_f64(
a),vreinterpretq_u64_f64(
b)));
669 return vld1q_dup_f64(from);
678 res = vsetq_lane_f64(from[0*stride], res, 0);
679 res = vsetq_lane_f64(from[1*stride], res, 1);
684 to[stride*0] = vgetq_lane_f64(from, 0);
685 to[stride*1] = vgetq_lane_f64(from, 1);
696 #if EIGEN_COMP_CLANG && defined(__apple_build_version__)
705 float64x2_t trn1, trn2;
709 trn1 = vzip1q_f64(vecs[0], vecs[1]);
710 trn2 = vzip2q_f64(vecs[0], vecs[1]);
713 return vaddq_f64(trn1, trn2);
717 #if EIGEN_COMP_CLANG && defined(__apple_build_version__)
731 #define PALIGN_NEON(Offset,Type,Command) \
733 struct palign_impl<Offset,Type>\
735 EIGEN_STRONG_INLINE static void run(Type& first, const Type& second)\
738 first = Command(first, second, Offset);\
746 EIGEN_DEVICE_FUNC
inline void
748 float64x2_t trn1 = vzip1q_f64(kernel.packet[0], kernel.packet[1]);
749 float64x2_t trn2 = vzip2q_f64(kernel.packet[0], kernel.packet[1]);
751 kernel.packet[0] = trn1;
752 kernel.packet[1] = trn2;
754 #endif // EIGEN_ARCH_ARM64
760 #endif // EIGEN_PACKET_MATH_NEON_H