10 #ifndef EIGEN_PACKET_MATH_HALF_CUDA_H 11 #define EIGEN_PACKET_MATH_HALF_CUDA_H 18 #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDACC__) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300 20 template<>
struct is_arithmetic<half2> {
enum {
value =
true }; };
22 template<>
struct packet_traits<
Eigen::half> : default_packet_traits
42 template<>
struct unpacket_traits<half2> {
typedef Eigen::half type;
enum {
size=2, alignment=
Aligned16};
typedef half2 half; };
45 return __half2half2(from);
49 return *
reinterpret_cast<const half2*
>(from);
53 return __halves2half2(from[0], from[1]);
57 return __halves2half2(from[0], from[0]);
61 *
reinterpret_cast<half2*
>(to) = from;
65 to[0] = __low2half(from);
66 to[1] = __high2half(from);
71 #if __CUDA_ARCH__ >= 350 72 return __ldg((
const half2*)from);
74 return __halves2half2(*(from+0), *(from+1));
80 #if __CUDA_ARCH__ >= 350 81 return __halves2half2(__ldg(from+0), __ldg(from+1));
83 return __halves2half2(*(from+0), *(from+1));
88 return __halves2half2(from[0*stride], from[1*stride]);
92 to[stride*0] = __low2half(from);
93 to[stride*1] = __high2half(from);
102 result.x = a.x & 0x7FFF7FFF;
109 __half a1 = __low2half(kernel.packet[0]);
110 __half a2 = __high2half(kernel.packet[0]);
111 __half b1 = __low2half(kernel.packet[1]);
112 __half b2 = __high2half(kernel.packet[1]);
113 kernel.packet[0] = __halves2half2(a1, b1);
114 kernel.packet[1] = __halves2half2(a2, b2);
118 #if __CUDA_ARCH__ >= 530 119 return __halves2half2(a, __hadd(a, __float2half(1.0
f)));
121 float f = __half2float(a) + 1.0f;
122 return __halves2half2(a, __float2half(f));
127 #if __CUDA_ARCH__ >= 530 128 return __hadd2(a,
b);
130 float a1 = __low2float(a);
131 float a2 = __high2float(a);
132 float b1 = __low2float(
b);
133 float b2 = __high2float(
b);
136 return __floats2half2_rn(r1, r2);
141 #if __CUDA_ARCH__ >= 530 142 return __hsub2(a,
b);
144 float a1 = __low2float(a);
145 float a2 = __high2float(a);
146 float b1 = __low2float(
b);
147 float b2 = __high2float(
b);
150 return __floats2half2_rn(r1, r2);
155 #if __CUDA_ARCH__ >= 530 158 float a1 = __low2float(a);
159 float a2 = __high2float(a);
160 return __floats2half2_rn(-a1, -a2);
167 #if __CUDA_ARCH__ >= 530 168 return __hmul2(a,
b);
170 float a1 = __low2float(a);
171 float a2 = __high2float(a);
172 float b1 = __low2float(
b);
173 float b2 = __high2float(
b);
176 return __floats2half2_rn(r1, r2);
180 template<> __device__
EIGEN_STRONG_INLINE half2 pmadd<half2>(
const half2& a,
const half2&
b,
const half2& c) {
181 #if __CUDA_ARCH__ >= 530 182 return __hfma2(a, b, c);
184 float a1 = __low2float(a);
185 float a2 = __high2float(a);
186 float b1 = __low2float(b);
187 float b2 = __high2float(b);
188 float c1 = __low2float(c);
189 float c2 = __high2float(c);
190 float r1 = a1 * b1 + c1;
191 float r2 = a2 * b2 + c2;
192 return __floats2half2_rn(r1, r2);
197 float a1 = __low2float(a);
198 float a2 = __high2float(a);
199 float b1 = __low2float(b);
200 float b2 = __high2float(b);
203 return __floats2half2_rn(r1, r2);
207 float a1 = __low2float(a);
208 float a2 = __high2float(a);
209 float b1 = __low2float(b);
210 float b2 = __high2float(b);
211 __half r1 = a1 < b1 ? __low2half(a) : __low2half(b);
212 __half r2 = a2 < b2 ? __high2half(a) : __high2half(b);
213 return __halves2half2(r1, r2);
217 float a1 = __low2float(a);
218 float a2 = __high2float(a);
219 float b1 = __low2float(b);
220 float b2 = __high2float(b);
221 __half r1 = a1 > b1 ? __low2half(a) : __low2half(b);
222 __half r2 = a2 > b2 ? __high2half(a) : __high2half(b);
223 return __halves2half2(r1, r2);
227 #if __CUDA_ARCH__ >= 530 228 return __hadd(__low2half(a), __high2half(a));
230 float a1 = __low2float(a);
231 float a2 = __high2float(a);
237 #if __CUDA_ARCH__ >= 530 238 __half first = __low2half(a);
239 __half second = __high2half(a);
240 return __hgt(first, second) ? first : second;
242 float a1 = __low2float(a);
243 float a2 = __high2float(a);
244 return a1 > a2 ? __low2half(a) : __high2half(a);
249 #if __CUDA_ARCH__ >= 530 250 __half first = __low2half(a);
251 __half second = __high2half(a);
252 return __hlt(first, second) ? first : second;
254 float a1 = __low2float(a);
255 float a2 = __high2float(a);
256 return a1 < a2 ? __low2half(a) : __high2half(a);
261 #if __CUDA_ARCH__ >= 530 262 return __hmul(__low2half(a), __high2half(a));
264 float a1 = __low2float(a);
265 float a2 = __high2float(a);
271 float a1 = __low2float(a);
272 float a2 = __high2float(a);
273 float r1 = log1pf(a1);
274 float r2 = log1pf(a2);
275 return __floats2half2_rn(r1, r2);
278 #if defined __CUDACC_VER__ && __CUDACC_VER__ >= 80000 && defined __CUDA_ARCH__ && __CUDA_ARCH__ >= 530 281 half2 plog<half2>(
const half2& a) {
286 half2 pexp<half2>(
const half2& a) {
291 half2 psqrt<half2>(
const half2& a) {
296 half2 prsqrt<half2>(
const half2& a) {
303 float a1 = __low2float(a);
304 float a2 = __high2float(a);
307 return __floats2half2_rn(r1, r2);
311 float a1 = __low2float(a);
312 float a2 = __high2float(a);
315 return __floats2half2_rn(r1, r2);
319 float a1 = __low2float(a);
320 float a2 = __high2float(a);
321 float r1 = sqrtf(a1);
322 float r2 = sqrtf(a2);
323 return __floats2half2_rn(r1, r2);
327 float a1 = __low2float(a);
328 float a2 = __high2float(a);
329 float r1 = rsqrtf(a1);
330 float r2 = rsqrtf(a2);
331 return __floats2half2_rn(r1, r2);
336 #elif defined EIGEN_VECTORIZE_AVX512 343 template<>
struct is_arithmetic<Packet16h> {
enum {
value =
true }; };
346 struct packet_traits<half> : default_packet_traits {
347 typedef Packet16h type;
349 typedef Packet16h half;
375 template<>
struct unpacket_traits<Packet16h> {
typedef Eigen::half type;
enum {
size=16, alignment=
Aligned32};
typedef Packet16h half; };
379 result.x = _mm256_set1_epi16(from.x);
389 result.x = _mm256_load_si256(reinterpret_cast<const __m256i*>(from));
395 result.x = _mm256_loadu_si256(reinterpret_cast<const __m256i*>(from));
400 _mm256_store_si256((__m256i*)to, from.x);
404 _mm256_storeu_si256((__m256i*)to, from.x);
410 unsigned short a = from[0].
x;
411 unsigned short b = from[1].
x;
412 unsigned short c = from[2].
x;
413 unsigned short d = from[3].
x;
414 result.x = _mm256_set_epi16(d, d, d, d, c, c, c, c, b, b, b, b, a, a, a, a);
419 #ifdef EIGEN_HAS_FP16_C 420 return _mm512_cvtph_ps(a.x);
441 return _mm512_set_ps(
442 ff, fe, fd, fc, fb, fa, f9, f8, f7, f6, f5, f4, f3, f2, f1, f0);
447 #ifdef EIGEN_HAS_FP16_C 449 result.x = _mm512_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
472 result.x = _mm256_set_epi16(
473 hf.x, he.x, hd.x, hc.x, hb.x, ha.x, h9.x, h8.x,
474 h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);
479 template<>
EIGEN_STRONG_INLINE Packet16h padd<Packet16h>(
const Packet16h& a,
const Packet16h&
b) {
483 return float2half(rf);
486 template<>
EIGEN_STRONG_INLINE Packet16h pmul<Packet16h>(
const Packet16h& a,
const Packet16h&
b) {
490 return float2half(rf);
495 return half(
predux(from_float));
501 result.x = _mm256_set_epi16(
502 from[15*stride].x, from[14*stride].x, from[13*stride].x, from[12*stride].x,
503 from[11*stride].x, from[10*stride].x, from[9*stride].x, from[8*stride].x,
504 from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x,
505 from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
513 to[stride*0].x = aux[0].x;
514 to[stride*1].x = aux[1].x;
515 to[stride*2].x = aux[2].x;
516 to[stride*3].x = aux[3].x;
517 to[stride*4].x = aux[4].x;
518 to[stride*5].x = aux[5].x;
519 to[stride*6].x = aux[6].x;
520 to[stride*7].x = aux[7].x;
521 to[stride*8].x = aux[8].x;
522 to[stride*9].x = aux[9].x;
523 to[stride*10].x = aux[10].x;
524 to[stride*11].x = aux[11].x;
525 to[stride*12].x = aux[12].x;
526 to[stride*13].x = aux[13].x;
527 to[stride*14].x = aux[14].x;
528 to[stride*15].x = aux[15].x;
532 ptranspose(PacketBlock<Packet16h,16>& kernel) {
533 __m256i a = kernel.packet[0].x;
534 __m256i b = kernel.packet[1].x;
535 __m256i c = kernel.packet[2].x;
536 __m256i
d = kernel.packet[3].x;
537 __m256i e = kernel.packet[4].x;
538 __m256i f = kernel.packet[5].x;
539 __m256i g = kernel.packet[6].x;
540 __m256i h = kernel.packet[7].x;
541 __m256i i = kernel.packet[8].x;
542 __m256i j = kernel.packet[9].x;
543 __m256i k = kernel.packet[10].x;
544 __m256i l = kernel.packet[11].x;
545 __m256i m = kernel.packet[12].x;
546 __m256i n = kernel.packet[13].x;
547 __m256i o = kernel.packet[14].x;
548 __m256i p = kernel.packet[15].x;
550 __m256i ab_07 = _mm256_unpacklo_epi16(a, b);
551 __m256i cd_07 = _mm256_unpacklo_epi16(c, d);
552 __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
553 __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
554 __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
555 __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
556 __m256i mn_07 = _mm256_unpacklo_epi16(m, n);
557 __m256i op_07 = _mm256_unpacklo_epi16(o, p);
559 __m256i ab_8f = _mm256_unpackhi_epi16(a, b);
560 __m256i cd_8f = _mm256_unpackhi_epi16(c, d);
561 __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
562 __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
563 __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
564 __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
565 __m256i mn_8f = _mm256_unpackhi_epi16(m, n);
566 __m256i op_8f = _mm256_unpackhi_epi16(o, p);
568 __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
569 __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
570 __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
571 __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
572 __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
573 __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
574 __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
575 __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
577 __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
578 __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
579 __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
580 __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
581 __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
582 __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
583 __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
584 __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
586 __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
587 __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
588 __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
589 __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
590 __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
591 __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
592 __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
593 __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
594 __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
595 __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
596 __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
597 __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
598 __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
599 __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
600 __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
601 __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
604 __m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
605 __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
606 __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
607 __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
608 __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
609 __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
610 __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
611 __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
612 __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
613 __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
614 __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
615 __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
616 __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
617 __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
618 __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
619 __m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
621 kernel.packet[0].x = a_p_0;
622 kernel.packet[1].x = a_p_1;
623 kernel.packet[2].x = a_p_2;
624 kernel.packet[3].x = a_p_3;
625 kernel.packet[4].x = a_p_4;
626 kernel.packet[5].x = a_p_5;
627 kernel.packet[6].x = a_p_6;
628 kernel.packet[7].x = a_p_7;
629 kernel.packet[8].x = a_p_8;
630 kernel.packet[9].x = a_p_9;
631 kernel.packet[10].x = a_p_a;
632 kernel.packet[11].x = a_p_b;
633 kernel.packet[12].x = a_p_c;
634 kernel.packet[13].x = a_p_d;
635 kernel.packet[14].x = a_p_e;
636 kernel.packet[15].x = a_p_f;
640 ptranspose(PacketBlock<Packet16h,8>& kernel) {
642 pstore<half>(in[0], kernel.packet[0]);
643 pstore<half>(in[1], kernel.packet[1]);
644 pstore<half>(in[2], kernel.packet[2]);
645 pstore<half>(in[3], kernel.packet[3]);
646 pstore<half>(in[4], kernel.packet[4]);
647 pstore<half>(in[5], kernel.packet[5]);
648 pstore<half>(in[6], kernel.packet[6]);
649 pstore<half>(in[7], kernel.packet[7]);
653 for (
int i = 0; i < 8; ++i) {
654 for (
int j = 0; j < 8; ++j) {
655 out[i][j] = in[j][2*i];
657 for (
int j = 0; j < 8; ++j) {
658 out[i][j+8] = in[j][2*i+1];
662 kernel.packet[0] = pload<Packet16h>(out[0]);
663 kernel.packet[1] = pload<Packet16h>(out[1]);
664 kernel.packet[2] = pload<Packet16h>(out[2]);
665 kernel.packet[3] = pload<Packet16h>(out[3]);
666 kernel.packet[4] = pload<Packet16h>(out[4]);
667 kernel.packet[5] = pload<Packet16h>(out[5]);
668 kernel.packet[6] = pload<Packet16h>(out[6]);
669 kernel.packet[7] = pload<Packet16h>(out[7]);
673 ptranspose(PacketBlock<Packet16h,4>& kernel) {
675 pstore<half>(in[0], kernel.packet[0]);
676 pstore<half>(in[1], kernel.packet[1]);
677 pstore<half>(in[2], kernel.packet[2]);
678 pstore<half>(in[3], kernel.packet[3]);
682 for (
int i = 0; i < 4; ++i) {
683 for (
int j = 0; j < 4; ++j) {
684 out[i][j] = in[j][4*i];
686 for (
int j = 0; j < 4; ++j) {
687 out[i][j+4] = in[j][4*i+1];
689 for (
int j = 0; j < 4; ++j) {
690 out[i][j+8] = in[j][4*i+2];
692 for (
int j = 0; j < 4; ++j) {
693 out[i][j+12] = in[j][4*i+3];
697 kernel.packet[0] = pload<Packet16h>(out[0]);
698 kernel.packet[1] = pload<Packet16h>(out[1]);
699 kernel.packet[2] = pload<Packet16h>(out[2]);
700 kernel.packet[3] = pload<Packet16h>(out[3]);
704 #elif defined EIGEN_VECTORIZE_AVX 711 template<>
struct is_arithmetic<Packet8h> {
enum {
value =
true }; };
714 struct packet_traits<
Eigen::half> : default_packet_traits {
715 typedef Packet8h type;
717 typedef Packet8h half;
743 template<>
struct unpacket_traits<Packet8h> {
typedef Eigen::half type;
enum {
size=8, alignment=
Aligned16};
typedef Packet8h half; };
747 result.x = _mm_set1_epi16(from.x);
757 result.x = _mm_load_si128(reinterpret_cast<const __m128i*>(from));
763 result.x = _mm_loadu_si128(reinterpret_cast<const __m128i*>(from));
768 _mm_store_si128(reinterpret_cast<__m128i*>(to), from.x);
772 _mm_storeu_si128(reinterpret_cast<__m128i*>(to), from.x);
778 unsigned short a = from[0].x;
779 unsigned short b = from[1].x;
780 result.x = _mm_set_epi16(b, b, b, b, a, a, a, a);
785 #ifdef EIGEN_HAS_FP16_C 786 return _mm256_cvtph_ps(a.x);
799 return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0);
804 #ifdef EIGEN_HAS_FP16_C 806 result.x = _mm256_cvtps_ph(a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
821 result.x = _mm_set_epi16(h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);
832 return float2half(rf);
839 return float2half(rf);
845 result.x = _mm_set_epi16(from[7*stride].x, from[6*stride].x, from[5*stride].x, from[4*stride].x, from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
853 to[stride*0].x = aux[0].x;
854 to[stride*1].x = aux[1].x;
855 to[stride*2].x = aux[2].x;
856 to[stride*3].x = aux[3].x;
857 to[stride*4].x = aux[4].x;
858 to[stride*5].x = aux[5].x;
859 to[stride*6].x = aux[6].x;
860 to[stride*7].x = aux[7].x;
889 __m128i a = kernel.packet[0].x;
890 __m128i b = kernel.packet[1].x;
891 __m128i c = kernel.packet[2].x;
892 __m128i
d = kernel.packet[3].x;
893 __m128i e = kernel.packet[4].x;
894 __m128i f = kernel.packet[5].x;
895 __m128i g = kernel.packet[6].x;
896 __m128i h = kernel.packet[7].x;
898 __m128i a03b03 = _mm_unpacklo_epi16(a, b);
899 __m128i c03d03 = _mm_unpacklo_epi16(c, d);
900 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
901 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
902 __m128i a47b47 = _mm_unpackhi_epi16(a, b);
903 __m128i c47d47 = _mm_unpackhi_epi16(c, d);
904 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
905 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
907 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
908 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
909 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
910 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
911 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
912 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
913 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
914 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
916 __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
917 __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
918 __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
919 __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
920 __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
921 __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
922 __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
923 __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
925 kernel.packet[0].x = a0b0c0d0e0f0g0h0;
926 kernel.packet[1].x = a1b1c1d1e1f1g1h1;
927 kernel.packet[2].x = a2b2c2d2e2f2g2h2;
928 kernel.packet[3].x = a3b3c3d3e3f3g3h3;
929 kernel.packet[4].x = a4b4c4d4e4f4g4h4;
930 kernel.packet[5].x = a5b5c5d5e5f5g5h5;
931 kernel.packet[6].x = a6b6c6d6e6f6g6h6;
932 kernel.packet[7].x = a7b7c7d7e7f7g7h7;
938 pstore<Eigen::half>(in[0], kernel.packet[0]);
939 pstore<Eigen::half>(in[1], kernel.packet[1]);
940 pstore<Eigen::half>(in[2], kernel.packet[2]);
941 pstore<Eigen::half>(in[3], kernel.packet[3]);
945 for (
int i = 0; i < 4; ++i) {
946 for (
int j = 0; j < 4; ++j) {
947 out[i][j] = in[j][2*i];
949 for (
int j = 0; j < 4; ++j) {
950 out[i][j+4] = in[j][2*i+1];
954 kernel.packet[0] = pload<Packet8h>(out[0]);
955 kernel.packet[1] = pload<Packet8h>(out[1]);
956 kernel.packet[2] = pload<Packet8h>(out[2]);
957 kernel.packet[3] = pload<Packet8h>(out[3]);
970 template<>
struct is_arithmetic<Packet4h> {
enum {
value =
true }; };
973 struct packet_traits<
Eigen::half> : default_packet_traits {
974 typedef Packet4h type;
976 typedef Packet4h half;
1002 template<>
struct unpacket_traits<Packet4h> {
typedef Eigen::half type;
enum {
size=4, alignment=
Aligned16};
typedef Packet4h half; };
1006 result.x = _mm_set1_pi16(from.x);
1017 __int64_t a64 = _mm_cvtm64_si64(a.x);
1018 __int64_t b64 = _mm_cvtm64_si64(b.x);
1035 result.
x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
1040 __int64_t a64 = _mm_cvtm64_si64(a.x);
1041 __int64_t b64 = _mm_cvtm64_si64(b.x);
1058 result.
x = _mm_set_pi16(h[3].x, h[2].x, h[1].x, h[0].x);
1064 result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
1070 result.x = _mm_cvtsi64_m64(*reinterpret_cast<const __int64_t*>(from));
1075 __int64_t r = _mm_cvtm64_si64(from.x);
1076 *(
reinterpret_cast<__int64_t*
>(to)) = r;
1080 __int64_t r = _mm_cvtm64_si64(from.x);
1081 *(
reinterpret_cast<__int64_t*
>(to)) = r;
1086 return pset1<Packet4h>(*from);
1092 result.x = _mm_set_pi16(from[3*stride].x, from[2*stride].x, from[1*stride].x, from[0*stride].x);
1098 __int64_t a = _mm_cvtm64_si64(from.x);
1099 to[stride*0].x =
static_cast<unsigned short>(a);
1100 to[stride*1].x =
static_cast<unsigned short>(a >> 16);
1101 to[stride*2].x =
static_cast<unsigned short>(a >> 32);
1102 to[stride*3].x =
static_cast<unsigned short>(a >> 48);
1106 ptranspose(PacketBlock<Packet4h,4>& kernel) {
1107 __m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x);
1108 __m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x);
1109 __m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x);
1110 __m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x);
1112 kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1);
1113 kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1);
1114 kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3);
1115 kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3);
1123 #endif // EIGEN_PACKET_MATH_HALF_CUDA_H
#define EIGEN_ALWAYS_INLINE
#define EIGEN_STRONG_INLINE
static int f(const TensorMap< Tensor< int, 3 > > &tensor)
EIGEN_DEVICE_FUNC const Scalar & x
static constexpr size_t size(Tuple< Args... > &)
Provides access to the number of elements in a tuple as a compile-time constant expression.
EIGEN_DEVICE_FUNC unpacket_traits< Packet >::type predux(const Packet &a)
EIGEN_DEVICE_FUNC Packet ploadquad(const typename unpacket_traits< Packet >::type *from)
EIGEN_DEVICE_FUNC Packet padd(const Packet &a, const Packet &b)
EIGEN_STRONG_INLINE float predux< Packet8f >(const Packet8f &a)
EIGEN_STRONG_INLINE void ptranspose(PacketBlock< Packet2cf, 2 > &kernel)
EIGEN_DEFAULT_DENSE_INDEX_TYPE Index
The Index type as used for the API.
EIGEN_STRONG_INLINE Packet2cf pconj(const Packet2cf &a)
EIGEN_DEVICE_FUNC void pstore(Scalar *to, const Packet &from)
EIGEN_STRONG_INLINE float predux_max< Packet8f >(const Packet8f &a)
EIGEN_STRONG_INLINE float predux_mul< Packet8f >(const Packet8f &a)
EIGEN_STRONG_INLINE Packet2cf pnegate(const Packet2cf &a)
EIGEN_STRONG_INLINE EIGEN_DEVICE_FUNC __half raw_uint16_to_half(unsigned short x)
EIGEN_DEVICE_FUNC const Scalar & b
EIGEN_STRONG_INLINE float predux_min< Packet8f >(const Packet8f &a)
EIGEN_DEVICE_FUNC Packet pmul(const Packet &a, const Packet &b)