10 #ifndef EIGEN_PACKET_MATH_ALTIVEC_H
11 #define EIGEN_PACKET_MATH_ALTIVEC_H
17 #ifndef EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD
18 #define EIGEN_CACHEFRIENDLY_PRODUCT_THRESHOLD 4
21 #ifndef EIGEN_HAS_SINGLE_INSTRUCTION_MADD
22 #define EIGEN_HAS_SINGLE_INSTRUCTION_MADD
26 #ifndef EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS
27 #define EIGEN_ARCH_DEFAULT_NUMBER_OF_REGISTERS 32
42 #define _EIGEN_DECLARE_CONST_FAST_Packet4f(NAME,X) \
43 Packet4f p4f_##NAME = {X, X, X, X}
45 #define _EIGEN_DECLARE_CONST_FAST_Packet4i(NAME,X) \
46 Packet4i p4i_##NAME = vec_splat_s32(X)
48 #define _EIGEN_DECLARE_CONST_FAST_Packet4ui(NAME,X) \
49 Packet4ui p4ui_##NAME = {X, X, X, X}
51 #define _EIGEN_DECLARE_CONST_FAST_Packet8us(NAME,X) \
52 Packet8us p8us_##NAME = {X, X, X, X, X, X, X, X}
54 #define _EIGEN_DECLARE_CONST_FAST_Packet16uc(NAME,X) \
55 Packet16uc p16uc_##NAME = {X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X}
57 #define _EIGEN_DECLARE_CONST_Packet4f(NAME,X) \
58 Packet4f p4f_##NAME = pset1<Packet4f>(X)
60 #define _EIGEN_DECLARE_CONST_Packet4i(NAME,X) \
61 Packet4i p4i_##NAME = pset1<Packet4i>(X)
63 #define _EIGEN_DECLARE_CONST_Packet2d(NAME,X) \
64 Packet2d p2d_##NAME = pset1<Packet2d>(X)
66 #define _EIGEN_DECLARE_CONST_Packet2l(NAME,X) \
67 Packet2l p2l_##NAME = pset1<Packet2l>(X)
69 #define _EIGEN_DECLARE_CONST_Packet4f_FROM_INT(NAME,X) \
70 const Packet4f p4f_##NAME = reinterpret_cast<Packet4f>(pset1<Packet4i>(X))
73 #define DST_CTRL(size, count, stride) (((size) << 24) | ((count) << 16) | (stride))
74 #define __UNPACK_TYPE__(PACKETNAME) typename unpacket_traits<PACKETNAME>::type
97 8, 9, 10, 11, 12, 13, 14, 15};
99 8, 9, 10, 11, 12, 13, 14, 15};
101 static Packet16uc p16uc_REVERSE32 = { 12,13,14,15, 8,9,10,11, 4,5,6,7, 0,1,2,3 };
102 static Packet16uc p16uc_REVERSE16 = { 14,15, 12,13, 10,11, 8,9, 6,7, 4,5, 2,3, 0,1 };
103 static Packet16uc p16uc_REVERSE8 = { 15,14,13,12,11,10,9,8,7,6,5,4,3,2,1,0 };
105 static Packet16uc p16uc_DUPLICATE32_HI = { 0,1,2,3, 0,1,2,3, 4,5,6,7, 4,5,6,7 };
106 static Packet16uc p16uc_DUPLICATE16_HI = { 0,1,0,1, 2,3,2,3, 4,5,4,5, 6,7,6,7 };
107 static Packet16uc p16uc_DUPLICATE8_HI = { 0,0, 1,1, 2,2, 3,3, 4,4, 5,5, 6,6, 7,7 };
108 static const Packet16uc p16uc_DUPLICATE16_EVEN= { 0,1 ,0,1, 4,5, 4,5, 8,9, 8,9, 12,13, 12,13 };
109 static const Packet16uc p16uc_DUPLICATE16_ODD = { 2,3 ,2,3, 6,7, 6,7, 10,11, 10,11, 14,15, 14,15 };
111 static Packet16uc p16uc_QUADRUPLICATE16_HI = { 0,1,0,1,0,1,0,1, 2,3,2,3,2,3,2,3 };
118 static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
125 static Packet16uc p16uc_REVERSE64 = { 8,9,10,11, 12,13,14,15, 0,1,2,3, 4,5,6,7 };
129 #endif // _BIG_ENDIAN
142 #endif // _BIG_ENDIAN
144 #if EIGEN_HAS_BUILTIN(__builtin_prefetch) || EIGEN_COMP_GNUC
145 #define EIGEN_PPC_PREFETCH(ADDR) __builtin_prefetch(ADDR);
147 #define EIGEN_PPC_PREFETCH(ADDR) asm( " dcbt [%[addr]]\n" :: [addr] "r" (ADDR) : "cc" );
173 #if !EIGEN_COMP_CLANG
215 #if !EIGEN_COMP_CLANG
347 typedef unsigned short int type;
378 for (
int i=0;
i< 16;
i++)
379 s << vt.n[
i] <<
", ";
390 for (
int i=0;
i< 16;
i++)
391 s << vt.n[
i] <<
", ";
402 s << vt.n[0] <<
", " << vt.n[1] <<
", " << vt.n[2] <<
", " << vt.n[3];
413 s << vt.n[0] <<
", " << vt.n[1] <<
", " << vt.n[2] <<
", " << vt.n[3];
424 s << vt.n[0] <<
", " << vt.n[1] <<
", " << vt.n[2] <<
", " << vt.n[3];
428 template <
typename Packet>
438 return vec_ld(0, from);
445 return pload_common<Packet4f>(from);
450 return pload_common<Packet4i>(from);
455 return pload_common<Packet8s>(from);
460 return pload_common<Packet8us>(from);
465 return pload_common<Packet16c>(from);
470 return pload_common<Packet16uc>(from);
475 return pload_common<Packet8us>(
reinterpret_cast<const unsigned short int*
>(from));
478 template <
typename Packet>
485 vec_xst(from, 0, to);
493 pstore_common<Packet4f>(to, from);
498 pstore_common<Packet4i>(to, from);
503 pstore_common<Packet8s>(to, from);
508 pstore_common<Packet8us>(to, from);
513 pstore_common<Packet8us>(
reinterpret_cast<unsigned short int*
>(to), from);
518 pstore_common<Packet16c>(to, from);
523 pstore_common<Packet16uc>(to, from);
526 template<
typename Packet>
529 Packet v = {from, from, from, from};
533 template<
typename Packet>
536 Packet v = {from, from, from, from, from, from, from, from};
540 template<
typename Packet>
543 Packet v = {from, from, from, from, from, from, from, from, from, from, from, from, from, from, from, from};
548 return pset1_size4<Packet4f>(from);
552 return pset1_size4<Packet4i>(from);
556 return pset1_size8<Packet8s>(from);
560 return pset1_size8<Packet8us>(from);
564 return pset1_size16<Packet16c>(from);
568 return pset1_size16<Packet16uc>(from);
576 return pset1_size8<Packet8us>(
reinterpret_cast<const unsigned short int&
>(from));
583 a3 = pload<Packet>(
a);
584 a0 = vec_splat(
a3, 0);
585 a1 = vec_splat(
a3, 1);
586 a2 = vec_splat(
a3, 2);
587 a3 = vec_splat(
a3, 3);
594 pbroadcast4_common<Packet4f>(
a, a0,
a1,
a2,
a3);
600 pbroadcast4_common<Packet4i>(
a, a0,
a1,
a2,
a3);
606 a[0] = from[0*stride];
607 a[1] = from[1*stride];
608 a[2] = from[2*stride];
609 a[3] = from[3*stride];
610 return pload<Packet>(
a);
615 return pgather_common<Packet4f>(from, stride);
620 return pgather_common<Packet4i>(from, stride);
626 a[0] = from[0*stride];
627 a[1] = from[1*stride];
628 a[2] = from[2*stride];
629 a[3] = from[3*stride];
630 a[4] = from[4*stride];
631 a[5] = from[5*stride];
632 a[6] = from[6*stride];
633 a[7] = from[7*stride];
634 return pload<Packet>(
a);
639 return pgather_size8<Packet8s>(from, stride);
644 return pgather_size8<Packet8us>(from, stride);
649 return pgather_size8<Packet8bf>(from, stride);
655 a[0] = from[0*stride];
656 a[1] = from[1*stride];
657 a[2] = from[2*stride];
658 a[3] = from[3*stride];
659 a[4] = from[4*stride];
660 a[5] = from[5*stride];
661 a[6] = from[6*stride];
662 a[7] = from[7*stride];
663 a[8] = from[8*stride];
664 a[9] = from[9*stride];
665 a[10] = from[10*stride];
666 a[11] = from[11*stride];
667 a[12] = from[12*stride];
668 a[13] = from[13*stride];
669 a[14] = from[14*stride];
670 a[15] = from[15*stride];
671 return pload<Packet>(
a);
677 return pgather_size16<Packet16c>(from, stride);
682 return pgather_size16<Packet16uc>(from, stride);
688 pstore<__UNPACK_TYPE__(Packet)>(
a, from);
697 pscatter_size4<Packet4f>(to, from, stride);
702 pscatter_size4<Packet4i>(to, from, stride);
708 pstore<__UNPACK_TYPE__(Packet)>(
a, from);
722 pscatter_size8<Packet8s>(to, from, stride);
727 pscatter_size8<Packet8us>(to, from, stride);
732 pscatter_size8<Packet8bf>(to, from, stride);
738 pstore<__UNPACK_TYPE__(Packet)>(
a, from);
749 to[10*stride] =
a[10];
750 to[11*stride] =
a[11];
751 to[12*stride] =
a[12];
752 to[13*stride] =
a[13];
753 to[14*stride] =
a[14];
754 to[15*stride] =
a[15];
759 pscatter_size16<Packet16c>(to, from, stride);
764 pscatter_size16<Packet16uc>(to, from, stride);
805 #ifndef __VSX__ // VSX actually provides a div instruction
813 y_1 = vec_madd(y_0,
t, y_0);
817 return vec_div(
a,
b);
822 {
eigen_assert(
false &&
"packet integer division are not supported by AltiVec");
837 __asm__ (
"xvcmpgesp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" :
"=&wa" (
ret) :
"wa" (
a),
"wa" (
b));
840 return vec_min(
a,
b);
855 __asm__ (
"xvcmpgtsp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" :
"=&wa" (
ret) :
"wa" (
a),
"wa" (
b));
858 return vec_max(
a,
b);
918 return vec_sel(
b,
a,
reinterpret_cast<Packet4ui>(mask));
927 __asm__(
"xvrspiz %x0, %x1\n\t"
931 __asm__(
"vrfiz %0, %1\n\t"
944 __asm__(
"xvrspic %x0, %x1\n\t"
957 MSQ = vec_ld(0, (
unsigned char *)from);
958 LSQ = vec_ld(15, (
unsigned char *)from);
959 mask = vec_lvsl(0, from);
961 return (
Packet) vec_perm(MSQ, LSQ, mask);
970 return ploadu_common<Packet4f>(from);
974 return ploadu_common<Packet4i>(from);
978 return ploadu_common<Packet8s>(from);
982 return ploadu_common<Packet8us>(from);
986 return ploadu_common<Packet8us>(
reinterpret_cast<const unsigned short int*
>(from));
990 return ploadu_common<Packet16c>(from);
994 return ploadu_common<Packet16uc>(from);
1000 if((std::ptrdiff_t(from) % 16) == 0)
p = pload<Packet>(from);
1001 else p = ploadu<Packet>(from);
1006 return ploaddup_common<Packet4f>(from);
1010 return ploaddup_common<Packet4i>(from);
1075 MSQ = vec_ld(0, (
unsigned char *)to);
1076 LSQ = vec_ld(15, (
unsigned char *)to);
1077 edgeAlign = vec_lvsl(0, to);
1078 edges=vec_perm(LSQ,MSQ,edgeAlign);
1079 align = vec_lvsr( 0, to );
1082 vec_st( LSQ, 15, (
unsigned char *)to );
1083 vec_st( MSQ, 0, (
unsigned char *)to );
1085 vec_xst(from, 0, to);
1090 pstoreu_common<Packet4f>(to, from);
1094 pstoreu_common<Packet4i>(to, from);
1098 pstoreu_common<Packet8s>(to, from);
1102 pstoreu_common<Packet8us>(to, from);
1106 pstoreu_common<Packet8us>(
reinterpret_cast<unsigned short int*
>(to), from);
1110 pstoreu_common<Packet16c>(to, from);
1114 pstoreu_common<Packet16uc>(to, from);
1130 return pfirst_common<Packet8s>(
a);
1134 return pfirst_common<Packet8us>(
a);
1139 return pfirst_common<Packet16c>(
a);
1144 return pfirst_common<Packet16uc>(
a);
1173 return preverse<Packet8us>(
a);
1197 return reinterpret_cast<Packet4f>(r);
1204 return reinterpret_cast<Packet4f>(r);
1210 return vec_sr(
a, p4ui_mask);
1216 return vec_sl(
a, p4ui_mask);
1222 return vec_sl(
a, p8us_mask);
1227 return vec_sr(
a, p8us_mask);
1231 return plogical_shift_left<16>(
reinterpret_cast<Packet4f>(bf.
m_val));
1238 reinterpret_cast<Packet4f>(p4ui_high_mask)
1247 bf_odd =
pand(
reinterpret_cast<Packet4f>(p4ui_high_mask), odd);
1248 bf_even = plogical_shift_right<16>(even);
1254 Packet4ui lsb = plogical_shift_right<16>(input);
1274 reinterpret_cast<Packet4ui>(is_max_exp),
1275 reinterpret_cast<Packet4ui>(is_mant_zero)
1279 reinterpret_cast<Packet4ui>(is_zero_exp),
1280 reinterpret_cast<Packet4ui>(is_mant_zero)
1284 input = vec_sel(input, p4ui_nan, nan_selector);
1285 input = vec_sel(input,
reinterpret_cast<Packet4ui>(p4f), subnormal_selector);
1288 input = plogical_shift_right<16>(input);
1289 return reinterpret_cast<Packet8us>(input);
1295 bf_odd = plogical_shift_left<16>(bf_odd);
1299 #define BF16_TO_F32_UNARY_OP_WRAPPER(OP, A) \
1300 Packet4f a_even = Bf16ToF32Even(A);\
1301 Packet4f a_odd = Bf16ToF32Odd(A);\
1302 Packet4f op_even = OP(a_even);\
1303 Packet4f op_odd = OP(a_odd);\
1304 return F32ToBf16(op_even, op_odd);\
1306 #define BF16_TO_F32_BINARY_OP_WRAPPER(OP, A, B) \
1307 Packet4f a_even = Bf16ToF32Even(A);\
1308 Packet4f a_odd = Bf16ToF32Odd(A);\
1309 Packet4f b_even = Bf16ToF32Even(B);\
1310 Packet4f b_odd = Bf16ToF32Odd(B);\
1311 Packet4f op_even = OP(a_even, b_even);\
1312 Packet4f op_odd = OP(a_odd, b_odd);\
1313 return F32ToBf16(op_even, op_odd);\
1315 #define BF16_TO_F32_BINARY_OP_WRAPPER_BOOL(OP, A, B) \
1316 Packet4f a_even = Bf16ToF32Even(A);\
1317 Packet4f a_odd = Bf16ToF32Odd(A);\
1318 Packet4f b_even = Bf16ToF32Even(B);\
1319 Packet4f b_odd = Bf16ToF32Odd(B);\
1320 Packet4f op_even = OP(a_even, b_even);\
1321 Packet4f op_odd = OP(a_odd, b_odd);\
1322 return F32ToBf16Bool(op_even, op_odd);\
1403 Packet4f pmadd_even = pmadd<Packet4f>(a_even, b_even, c_even);
1404 Packet4f pmadd_odd = pmadd<Packet4f>(a_odd, b_odd, c_odd);
1405 return F32ToBf16(pmadd_even, pmadd_odd);
1447 b = vec_sld(
a,
a, 8);
1449 b = vec_sld(sum, sum, 4);
1457 sum = vec_sums(
a, p4i_ZERO);
1459 sum = vec_sld(sum, p4i_ZERO, 12);
1461 sum = vec_sld(p4i_ZERO, sum, 4);
1470 float f32_result = redux_even + redux_odd;
1481 EIGEN_ALIGN16 int first_loader[4] = { vt.n[0], vt.n[1], vt.n[2], vt.n[3] };
1482 EIGEN_ALIGN16 int second_loader[4] = { vt.n[4], vt.n[5], vt.n[6], vt.n[7] };
1491 return predux_size8<Packet8s>(
a);
1496 return predux_size8<Packet8us>(
a);
1507 EIGEN_ALIGN16 int first_loader[4] = { vt.n[0], vt.n[1], vt.n[2], vt.n[3] };
1508 EIGEN_ALIGN16 int second_loader[4] = { vt.n[4], vt.n[5], vt.n[6], vt.n[7] };
1509 EIGEN_ALIGN16 int third_loader[4] = { vt.n[8], vt.n[9], vt.n[10], vt.n[11] };
1510 EIGEN_ALIGN16 int fourth_loader[4] = { vt.n[12], vt.n[13], vt.n[14], vt.n[15] };
1523 return predux_size16<Packet16c>(
a);
1528 return predux_size16<Packet16uc>(
a);
1544 return aux[0] * aux[1] * aux[2] * aux[3];
1551 pair = vec_mul(
a, vec_sld(
a,
a, 8));
1552 quad = vec_mul(pair, vec_sld(pair, pair, 4));
1553 octo = vec_mul(quad, vec_sld(quad, quad, 2));
1562 pair = vec_mul(
a, vec_sld(
a,
a, 8));
1563 quad = vec_mul(pair, vec_sld(pair, pair, 4));
1564 octo = vec_mul(quad, vec_sld(quad, quad, 2));
1573 float f32_result = redux_even * redux_odd;
1582 pair = vec_mul(
a, vec_sld(
a,
a, 8));
1583 quad = vec_mul(pair, vec_sld(pair, pair, 4));
1584 octo = vec_mul(quad, vec_sld(quad, quad, 2));
1585 result = vec_mul(octo, vec_sld(octo, octo, 1));
1594 pair = vec_mul(
a, vec_sld(
a,
a, 8));
1595 quad = vec_mul(pair, vec_sld(pair, pair, 4));
1596 octo = vec_mul(quad, vec_sld(quad, quad, 2));
1597 result = vec_mul(octo, vec_sld(octo, octo, 1));
1607 b = vec_min(
a, vec_sld(
a,
a, 8));
1608 res = vec_min(
b, vec_sld(
b,
b, 4));
1615 return predux_min4<Packet4f>(
a);
1620 return predux_min4<Packet4i>(
a);
1627 float f32_result = (
std::min)(redux_even, redux_odd);
1636 pair = vec_min(
a, vec_sld(
a,
a, 8));
1639 quad = vec_min(pair, vec_sld(pair, pair, 4));
1642 octo = vec_min(quad, vec_sld(quad, quad, 2));
1651 pair = vec_min(
a, vec_sld(
a,
a, 8));
1654 quad = vec_min(pair, vec_sld(pair, pair, 4));
1657 octo = vec_min(quad, vec_sld(quad, quad, 2));
1665 pair = vec_min(
a, vec_sld(
a,
a, 8));
1666 quad = vec_min(pair, vec_sld(pair, pair, 4));
1667 octo = vec_min(quad, vec_sld(quad, quad, 2));
1668 result = vec_min(octo, vec_sld(octo, octo, 1));
1677 pair = vec_min(
a, vec_sld(
a,
a, 8));
1678 quad = vec_min(pair, vec_sld(pair, pair, 4));
1679 octo = vec_min(quad, vec_sld(quad, quad, 2));
1680 result = vec_min(octo, vec_sld(octo, octo, 1));
1688 b = vec_max(
a, vec_sld(
a,
a, 8));
1689 res = vec_max(
b, vec_sld(
b,
b, 4));
1695 return predux_max4<Packet4f>(
a);
1700 return predux_max4<Packet4i>(
a);
1707 float f32_result = (
std::max)(redux_even, redux_odd);
1716 pair = vec_max(
a, vec_sld(
a,
a, 8));
1719 quad = vec_max(pair, vec_sld(pair, pair, 4));
1722 octo = vec_max(quad, vec_sld(quad, quad, 2));
1731 pair = vec_max(
a, vec_sld(
a,
a, 8));
1734 quad = vec_max(pair, vec_sld(pair, pair, 4));
1737 octo = vec_max(quad, vec_sld(quad, quad, 2));
1745 pair = vec_max(
a, vec_sld(
a,
a, 8));
1746 quad = vec_max(pair, vec_sld(pair, pair, 4));
1747 octo = vec_max(quad, vec_sld(quad, quad, 2));
1748 result = vec_max(octo, vec_sld(octo, octo, 1));
1757 pair = vec_max(
a, vec_sld(
a,
a, 8));
1758 quad = vec_max(pair, vec_sld(pair, pair, 4));
1759 octo = vec_max(quad, vec_sld(quad, quad, 2));
1760 result = vec_max(octo, vec_sld(octo, octo, 1));
1767 return vec_any_ne(
x,
pzero(
x));
1777 kernel.
packet[0] = vec_mergeh(t0, t2);
1778 kernel.
packet[1] = vec_mergel(t0, t2);
1779 kernel.
packet[2] = vec_mergeh(t1, t3);
1780 kernel.
packet[3] = vec_mergel(t1, t3);
1785 ptranpose_common<Packet4f>(kernel);
1790 ptranpose_common<Packet4i>(kernel);
1800 kernel.
packet[0] = vec_mergeh(t0, t2);
1801 kernel.
packet[1] = vec_mergel(t0, t2);
1802 kernel.
packet[2] = vec_mergeh(t1, t3);
1803 kernel.
packet[3] = vec_mergel(t1, t3);
1813 kernel.
packet[0] = vec_mergeh(t0, t2);
1814 kernel.
packet[1] = vec_mergel(t0, t2);
1815 kernel.
packet[2] = vec_mergeh(t1, t3);
1816 kernel.
packet[3] = vec_mergel(t1, t3);
1828 kernel.
packet[0] = vec_mergeh(t0, t2);
1829 kernel.
packet[1] = vec_mergel(t0, t2);
1830 kernel.
packet[2] = vec_mergeh(t1, t3);
1831 kernel.
packet[3] = vec_mergel(t1, t3);
1841 kernel.
packet[0] = vec_mergeh(t0, t2);
1842 kernel.
packet[1] = vec_mergel(t0, t2);
1843 kernel.
packet[2] = vec_mergeh(t1, t3);
1844 kernel.
packet[3] = vec_mergel(t1, t3);
1855 kernel.
packet[0] = vec_mergeh(t0, t2);
1856 kernel.
packet[1] = vec_mergel(t0, t2);
1857 kernel.
packet[2] = vec_mergeh(t1, t3);
1858 kernel.
packet[3] = vec_mergel(t1, t3);
1873 sum[0] = vec_mergeh(
v[0],
v[4]);
1874 sum[1] = vec_mergel(
v[0],
v[4]);
1875 sum[2] = vec_mergeh(
v[1],
v[5]);
1876 sum[3] = vec_mergel(
v[1],
v[5]);
1877 sum[4] = vec_mergeh(
v[2],
v[6]);
1878 sum[5] = vec_mergel(
v[2],
v[6]);
1879 sum[6] = vec_mergeh(
v[3],
v[7]);
1880 sum[7] = vec_mergel(
v[3],
v[7]);
1882 kernel.
packet[0] = vec_mergeh(sum[0], sum[4]);
1883 kernel.
packet[1] = vec_mergel(sum[0], sum[4]);
1884 kernel.
packet[2] = vec_mergeh(sum[1], sum[5]);
1885 kernel.
packet[3] = vec_mergel(sum[1], sum[5]);
1886 kernel.
packet[4] = vec_mergeh(sum[2], sum[6]);
1887 kernel.
packet[5] = vec_mergel(sum[2], sum[6]);
1888 kernel.
packet[6] = vec_mergeh(sum[3], sum[7]);
1889 kernel.
packet[7] = vec_mergel(sum[3], sum[7]);
1904 sum[0] = vec_mergeh(
v[0],
v[4]);
1905 sum[1] = vec_mergel(
v[0],
v[4]);
1906 sum[2] = vec_mergeh(
v[1],
v[5]);
1907 sum[3] = vec_mergel(
v[1],
v[5]);
1908 sum[4] = vec_mergeh(
v[2],
v[6]);
1909 sum[5] = vec_mergel(
v[2],
v[6]);
1910 sum[6] = vec_mergeh(
v[3],
v[7]);
1911 sum[7] = vec_mergel(
v[3],
v[7]);
1913 kernel.
packet[0] = vec_mergeh(sum[0], sum[4]);
1914 kernel.
packet[1] = vec_mergel(sum[0], sum[4]);
1915 kernel.
packet[2] = vec_mergeh(sum[1], sum[5]);
1916 kernel.
packet[3] = vec_mergel(sum[1], sum[5]);
1917 kernel.
packet[4] = vec_mergeh(sum[2], sum[6]);
1918 kernel.
packet[5] = vec_mergel(sum[2], sum[6]);
1919 kernel.
packet[6] = vec_mergeh(sum[3], sum[7]);
1920 kernel.
packet[7] = vec_mergel(sum[3], sum[7]);
1935 sum[0] = vec_mergeh(
v[0].m_val,
v[4].m_val);
1936 sum[1] = vec_mergel(
v[0].m_val,
v[4].m_val);
1937 sum[2] = vec_mergeh(
v[1].m_val,
v[5].m_val);
1938 sum[3] = vec_mergel(
v[1].m_val,
v[5].m_val);
1939 sum[4] = vec_mergeh(
v[2].m_val,
v[6].m_val);
1940 sum[5] = vec_mergel(
v[2].m_val,
v[6].m_val);
1941 sum[6] = vec_mergeh(
v[3].m_val,
v[7].m_val);
1942 sum[7] = vec_mergel(
v[3].m_val,
v[7].m_val);
1944 kernel.
packet[0] = vec_mergeh(sum[0].m_val, sum[4].m_val);
1945 kernel.
packet[1] = vec_mergel(sum[0].m_val, sum[4].m_val);
1946 kernel.
packet[2] = vec_mergeh(sum[1].m_val, sum[5].m_val);
1947 kernel.
packet[3] = vec_mergel(sum[1].m_val, sum[5].m_val);
1948 kernel.
packet[4] = vec_mergeh(sum[2].m_val, sum[6].m_val);
1949 kernel.
packet[5] = vec_mergel(sum[2].m_val, sum[6].m_val);
1950 kernel.
packet[6] = vec_mergeh(sum[3].m_val, sum[7].m_val);
1951 kernel.
packet[7] = vec_mergel(sum[3].m_val, sum[7].m_val);
1956 Packet16c step1[16], step2[16], step3[16];
1958 step1[0] = vec_mergeh(kernel.
packet[0], kernel.
packet[8]);
1959 step1[1] = vec_mergel(kernel.
packet[0], kernel.
packet[8]);
1960 step1[2] = vec_mergeh(kernel.
packet[1], kernel.
packet[9]);
1961 step1[3] = vec_mergel(kernel.
packet[1], kernel.
packet[9]);
1962 step1[4] = vec_mergeh(kernel.
packet[2], kernel.
packet[10]);
1963 step1[5] = vec_mergel(kernel.
packet[2], kernel.
packet[10]);
1964 step1[6] = vec_mergeh(kernel.
packet[3], kernel.
packet[11]);
1965 step1[7] = vec_mergel(kernel.
packet[3], kernel.
packet[11]);
1966 step1[8] = vec_mergeh(kernel.
packet[4], kernel.
packet[12]);
1967 step1[9] = vec_mergel(kernel.
packet[4], kernel.
packet[12]);
1968 step1[10] = vec_mergeh(kernel.
packet[5], kernel.
packet[13]);
1969 step1[11] = vec_mergel(kernel.
packet[5], kernel.
packet[13]);
1970 step1[12] = vec_mergeh(kernel.
packet[6], kernel.
packet[14]);
1971 step1[13] = vec_mergel(kernel.
packet[6], kernel.
packet[14]);
1972 step1[14] = vec_mergeh(kernel.
packet[7], kernel.
packet[15]);
1973 step1[15] = vec_mergel(kernel.
packet[7], kernel.
packet[15]);
1975 step2[0] = vec_mergeh(step1[0], step1[8]);
1976 step2[1] = vec_mergel(step1[0], step1[8]);
1977 step2[2] = vec_mergeh(step1[1], step1[9]);
1978 step2[3] = vec_mergel(step1[1], step1[9]);
1979 step2[4] = vec_mergeh(step1[2], step1[10]);
1980 step2[5] = vec_mergel(step1[2], step1[10]);
1981 step2[6] = vec_mergeh(step1[3], step1[11]);
1982 step2[7] = vec_mergel(step1[3], step1[11]);
1983 step2[8] = vec_mergeh(step1[4], step1[12]);
1984 step2[9] = vec_mergel(step1[4], step1[12]);
1985 step2[10] = vec_mergeh(step1[5], step1[13]);
1986 step2[11] = vec_mergel(step1[5], step1[13]);
1987 step2[12] = vec_mergeh(step1[6], step1[14]);
1988 step2[13] = vec_mergel(step1[6], step1[14]);
1989 step2[14] = vec_mergeh(step1[7], step1[15]);
1990 step2[15] = vec_mergel(step1[7], step1[15]);
1992 step3[0] = vec_mergeh(step2[0], step2[8]);
1993 step3[1] = vec_mergel(step2[0], step2[8]);
1994 step3[2] = vec_mergeh(step2[1], step2[9]);
1995 step3[3] = vec_mergel(step2[1], step2[9]);
1996 step3[4] = vec_mergeh(step2[2], step2[10]);
1997 step3[5] = vec_mergel(step2[2], step2[10]);
1998 step3[6] = vec_mergeh(step2[3], step2[11]);
1999 step3[7] = vec_mergel(step2[3], step2[11]);
2000 step3[8] = vec_mergeh(step2[4], step2[12]);
2001 step3[9] = vec_mergel(step2[4], step2[12]);
2002 step3[10] = vec_mergeh(step2[5], step2[13]);
2003 step3[11] = vec_mergel(step2[5], step2[13]);
2004 step3[12] = vec_mergeh(step2[6], step2[14]);
2005 step3[13] = vec_mergel(step2[6], step2[14]);
2006 step3[14] = vec_mergeh(step2[7], step2[15]);
2007 step3[15] = vec_mergel(step2[7], step2[15]);
2009 kernel.
packet[0] = vec_mergeh(step3[0], step3[8]);
2010 kernel.
packet[1] = vec_mergel(step3[0], step3[8]);
2011 kernel.
packet[2] = vec_mergeh(step3[1], step3[9]);
2012 kernel.
packet[3] = vec_mergel(step3[1], step3[9]);
2013 kernel.
packet[4] = vec_mergeh(step3[2], step3[10]);
2014 kernel.
packet[5] = vec_mergel(step3[2], step3[10]);
2015 kernel.
packet[6] = vec_mergeh(step3[3], step3[11]);
2016 kernel.
packet[7] = vec_mergel(step3[3], step3[11]);
2017 kernel.
packet[8] = vec_mergeh(step3[4], step3[12]);
2018 kernel.
packet[9] = vec_mergel(step3[4], step3[12]);
2019 kernel.
packet[10] = vec_mergeh(step3[5], step3[13]);
2020 kernel.
packet[11] = vec_mergel(step3[5], step3[13]);
2021 kernel.
packet[12] = vec_mergeh(step3[6], step3[14]);
2022 kernel.
packet[13] = vec_mergel(step3[6], step3[14]);
2023 kernel.
packet[14] = vec_mergeh(step3[7], step3[15]);
2024 kernel.
packet[15] = vec_mergel(step3[7], step3[15]);
2031 step1[0] = vec_mergeh(kernel.
packet[0], kernel.
packet[8]);
2032 step1[1] = vec_mergel(kernel.
packet[0], kernel.
packet[8]);
2033 step1[2] = vec_mergeh(kernel.
packet[1], kernel.
packet[9]);
2034 step1[3] = vec_mergel(kernel.
packet[1], kernel.
packet[9]);
2035 step1[4] = vec_mergeh(kernel.
packet[2], kernel.
packet[10]);
2036 step1[5] = vec_mergel(kernel.
packet[2], kernel.
packet[10]);
2037 step1[6] = vec_mergeh(kernel.
packet[3], kernel.
packet[11]);
2038 step1[7] = vec_mergel(kernel.
packet[3], kernel.
packet[11]);
2039 step1[8] = vec_mergeh(kernel.
packet[4], kernel.
packet[12]);
2040 step1[9] = vec_mergel(kernel.
packet[4], kernel.
packet[12]);
2041 step1[10] = vec_mergeh(kernel.
packet[5], kernel.
packet[13]);
2042 step1[11] = vec_mergel(kernel.
packet[5], kernel.
packet[13]);
2043 step1[12] = vec_mergeh(kernel.
packet[6], kernel.
packet[14]);
2044 step1[13] = vec_mergel(kernel.
packet[6], kernel.
packet[14]);
2045 step1[14] = vec_mergeh(kernel.
packet[7], kernel.
packet[15]);
2046 step1[15] = vec_mergel(kernel.
packet[7], kernel.
packet[15]);
2048 step2[0] = vec_mergeh(step1[0], step1[8]);
2049 step2[1] = vec_mergel(step1[0], step1[8]);
2050 step2[2] = vec_mergeh(step1[1], step1[9]);
2051 step2[3] = vec_mergel(step1[1], step1[9]);
2052 step2[4] = vec_mergeh(step1[2], step1[10]);
2053 step2[5] = vec_mergel(step1[2], step1[10]);
2054 step2[6] = vec_mergeh(step1[3], step1[11]);
2055 step2[7] = vec_mergel(step1[3], step1[11]);
2056 step2[8] = vec_mergeh(step1[4], step1[12]);
2057 step2[9] = vec_mergel(step1[4], step1[12]);
2058 step2[10] = vec_mergeh(step1[5], step1[13]);
2059 step2[11] = vec_mergel(step1[5], step1[13]);
2060 step2[12] = vec_mergeh(step1[6], step1[14]);
2061 step2[13] = vec_mergel(step1[6], step1[14]);
2062 step2[14] = vec_mergeh(step1[7], step1[15]);
2063 step2[15] = vec_mergel(step1[7], step1[15]);
2065 step3[0] = vec_mergeh(step2[0], step2[8]);
2066 step3[1] = vec_mergel(step2[0], step2[8]);
2067 step3[2] = vec_mergeh(step2[1], step2[9]);
2068 step3[3] = vec_mergel(step2[1], step2[9]);
2069 step3[4] = vec_mergeh(step2[2], step2[10]);
2070 step3[5] = vec_mergel(step2[2], step2[10]);
2071 step3[6] = vec_mergeh(step2[3], step2[11]);
2072 step3[7] = vec_mergel(step2[3], step2[11]);
2073 step3[8] = vec_mergeh(step2[4], step2[12]);
2074 step3[9] = vec_mergel(step2[4], step2[12]);
2075 step3[10] = vec_mergeh(step2[5], step2[13]);
2076 step3[11] = vec_mergel(step2[5], step2[13]);
2077 step3[12] = vec_mergeh(step2[6], step2[14]);
2078 step3[13] = vec_mergel(step2[6], step2[14]);
2079 step3[14] = vec_mergeh(step2[7], step2[15]);
2080 step3[15] = vec_mergel(step2[7], step2[15]);
2082 kernel.
packet[0] = vec_mergeh(step3[0], step3[8]);
2083 kernel.
packet[1] = vec_mergel(step3[0], step3[8]);
2084 kernel.
packet[2] = vec_mergeh(step3[1], step3[9]);
2085 kernel.
packet[3] = vec_mergel(step3[1], step3[9]);
2086 kernel.
packet[4] = vec_mergeh(step3[2], step3[10]);
2087 kernel.
packet[5] = vec_mergel(step3[2], step3[10]);
2088 kernel.
packet[6] = vec_mergeh(step3[3], step3[11]);
2089 kernel.
packet[7] = vec_mergel(step3[3], step3[11]);
2090 kernel.
packet[8] = vec_mergeh(step3[4], step3[12]);
2091 kernel.
packet[9] = vec_mergel(step3[4], step3[12]);
2092 kernel.
packet[10] = vec_mergeh(step3[5], step3[13]);
2093 kernel.
packet[11] = vec_mergel(step3[5], step3[13]);
2094 kernel.
packet[12] = vec_mergeh(step3[6], step3[14]);
2095 kernel.
packet[13] = vec_mergel(step3[6], step3[14]);
2096 kernel.
packet[14] = vec_mergeh(step3[7], step3[15]);
2097 kernel.
packet[15] = vec_mergel(step3[7], step3[15]);
2104 return vec_sel(elsePacket, thenPacket, mask);
2108 return pblend4<Packet4i>(ifPacket, thenPacket, elsePacket);
2112 return pblend4<Packet4f>(ifPacket, thenPacket, elsePacket);
2127 return vec_sel(elsePacket, thenPacket, mask);
2131 return pblend<Packet8us>(ifPacket, thenPacket, elsePacket);
2141 return vec_sel(elsePacket, thenPacket, mask);
2151 return vec_sel(elsePacket, thenPacket, mask);
2191 return vec_cts(
a,0);
2195 return vec_ctu(
a,0);
2199 return vec_ctf(
a,0);
2203 return vec_ctf(
a,0);
2217 if(vec_any_gt(int_even, p4ui_low_mask)){
2218 overflow_selector = vec_cmpgt(int_even, p4ui_low_mask);
2219 low_even = vec_sel(low_even, p4ui_low_mask, overflow_selector);
2221 if(vec_any_gt(int_odd, p4ui_low_mask)){
2222 overflow_selector = vec_cmpgt(int_odd, p4ui_low_mask);
2223 low_odd = vec_sel(low_even, p4ui_low_mask, overflow_selector);
2226 low_odd = plogical_shift_left<16>(low_odd);
2229 return reinterpret_cast<Packet8us>(int_final);
2237 Packet4ui int_odd = plogical_shift_right<16>(int_cast);
2240 return F32ToBf16(float_even, float_odd);
2257 typedef __vector
unsigned long long Packet2ul;
2258 typedef __vector
long long Packet2l;
2259 #if EIGEN_COMP_CLANG
2262 typedef __vector __bool
long Packet2bl;
2265 static Packet2l p2l_ONE = { 1, 1 };
2267 static Packet2ul p2ul_SIGN = { 0x8000000000000000ull, 0x8000000000000000ull };
2268 static Packet2ul p2ul_PREV0DOT5 = { 0x3FDFFFFFFFFFFFFFull, 0x3FDFFFFFFFFFFFFFull };
2271 static Packet2d p2d_MZERO = { numext::bit_cast<double>(0x8000000000000000ull),
2272 numext::bit_cast<double>(0x8000000000000000ull) };
2282 return vec_splat(
a, index);
2285 template<>
struct packet_traits<double> : default_packet_traits
2326 s << vt.n[0] <<
", " << vt.n[1];
2337 s << vt.n[0] <<
", " << vt.n[1];
2345 return vec_xl(0,
const_cast<double *
>(from));
2351 vec_xst(from, 0, to);
2360 Packet2l v = {
static_cast<long long>(from),
static_cast<long long>(from)};
2378 af[0] = from[0*stride];
2379 af[1] = from[1*stride];
2386 to[0*stride] = af[0];
2387 to[1*stride] = af[1];
2410 __asm__ (
"xvcmpgedp %x0,%x1,%x2\n\txxsel %x0,%x1,%x2,%x0" :
"=&wa" (
ret) :
"wa" (
a),
"wa" (
b));
2418 __asm__ (
"xvcmpgtdp %x0,%x2,%x1\n\txxsel %x0,%x1,%x2,%x0" :
"=&wa" (
ret) :
"wa" (
a),
"wa" (
b));
2427 return vec_nor(
c,
c);
2443 __asm__(
"xvrdpiz %x0, %x1\n\t"
2455 __asm__(
"xvrdpic %x0, %x1\n\t"
2465 return vec_xl(0,
const_cast<double*
>(from));
2473 return vec_splat_dbl<0>(
p);
2479 vec_xst(from, 0, to);
2500 #if EIGEN_GNUC_AT_LEAST(5, 4) || \
2501 (EIGEN_GNUC_AT(6, 1) && __GNUC_PATCHLEVEL__ >= 1)
2502 return vec_cts(
x, 0);
2505 memcpy(tmp, &
x,
sizeof(tmp));
2506 Packet2l l = {
static_cast<long long>(tmp[0]),
2507 static_cast<long long>(tmp[1]) };
2514 unsigned long long tmp[2];
2515 memcpy(tmp, &
x,
sizeof(tmp));
2516 Packet2d d = {
static_cast<double>(tmp[0]),
2517 static_cast<double>(tmp[1]) };
2528 #ifdef __POWER8_VECTOR__
2533 return vec_sl(
a, shift);
2539 return vec_sr(
a, shift);
2548 0x14, 0x15, 0x16, 0x17, 0x00, 0x01, 0x02, 0x03,
2549 0x1c, 0x1d, 0x1e, 0x1f, 0x08, 0x09, 0x0a, 0x0b };
2551 return vec_perm(p4i_ZERO,
a,
perm);
2553 return vec_perm(
a, p4i_ZERO,
perm);
2561 0x04, 0x05, 0x06, 0x07, 0x10, 0x11, 0x12, 0x13,
2562 0x0c, 0x0d, 0x0e, 0x0f, 0x18, 0x19, 0x1a, 0x1b };
2564 return vec_perm(p4i_ZERO,
a,
perm);
2566 return vec_perm(
a, p4i_ZERO,
perm);
2570 template<
int N,
typename EnableIf =
void>
2571 struct plogical_shift_left_impl;
2574 struct plogical_shift_left_impl<
N, typename enable_if<(
N < 32) && (
N >= 0)>::
type> {
2576 static const unsigned n =
static_cast<unsigned>(
N);
2579 static const unsigned m =
static_cast<unsigned>(32 -
N);
2581 const Packet4i out_hi = vec_sl(ai, shift);
2582 const Packet4i out_lo = shift_even_left(vec_sr(ai, shift_right));
2588 struct plogical_shift_left_impl<
N, typename enable_if<(
N >= 32)>::
type> {
2590 static const unsigned m =
static_cast<unsigned>(
N - 32);
2593 return reinterpret_cast<Packet2l>(shift_even_left(vec_sl(ai, shift)));
2602 template<
int N,
typename EnableIf =
void>
2603 struct plogical_shift_right_impl;
2606 struct plogical_shift_right_impl<
N, typename enable_if<(
N < 32) && (
N >= 0)>::
type> {
2608 static const unsigned n =
static_cast<unsigned>(
N);
2611 static const unsigned m =
static_cast<unsigned>(32 -
N);
2613 const Packet4i out_lo = vec_sr(ai, shift);
2620 struct plogical_shift_right_impl<
N, typename enable_if<(
N >= 32)>::
type> {
2622 static const unsigned m =
static_cast<unsigned>(
N - 32);
2625 return reinterpret_cast<Packet2l>(shift_odd_right(vec_sr(ai, shift)));
2646 c =
reinterpret_cast<Packet2d>(plogical_shift_left<52>(
b +
bias));
2656 return pcast<Packet2l, Packet2d>(plogical_shift_right<52>(
reinterpret_cast<Packet2l>(
pabs(
a))));
2691 ptranspose(PacketBlock<Packet2d,2>& kernel) {
2695 kernel.packet[0] = t0;
2696 kernel.packet[1] = t1;
2700 Packet2l select = { ifPacket.select[0], ifPacket.select[1] };
2701 Packet2bl mask =
reinterpret_cast<Packet2bl
>( vec_cmpeq(
reinterpret_cast<Packet2d>(select),
reinterpret_cast<Packet2d>(p2l_ONE)) );
2702 return vec_sel(elsePacket, thenPacket, mask);
2711 #endif // EIGEN_PACKET_MATH_ALTIVEC_H