10 #ifndef EIGEN_PACKET_MATH_HALF_CUDA_H
11 #define EIGEN_PACKET_MATH_HALF_CUDA_H
18 #if defined(EIGEN_HAS_CUDA_FP16) && defined(__CUDACC__) && defined(__CUDA_ARCH__) && __CUDA_ARCH__ >= 300
20 template<>
struct is_arithmetic<half2> {
enum {
value =
true }; };
22 template<>
struct packet_traits<
Eigen::half> : default_packet_traits
45 return __half2half2(from);
49 return *
reinterpret_cast<const half2*
>(from);
53 return __halves2half2(from[0], from[1]);
57 return __halves2half2(from[0], from[0]);
61 *
reinterpret_cast<half2*
>(to) = from;
65 to[0] = __low2half(from);
66 to[1] = __high2half(from);
71 #if __CUDA_ARCH__ >= 350
72 return __ldg((
const half2*)from);
74 return __halves2half2(*(from+0), *(from+1));
80 #if __CUDA_ARCH__ >= 350
81 return __halves2half2(__ldg(from+0), __ldg(from+1));
83 return __halves2half2(*(from+0), *(from+1));
88 return __halves2half2(from[0*stride], from[1*stride]);
92 to[stride*0] = __low2half(from);
93 to[stride*1] = __high2half(from);
102 unsigned temp = *(
reinterpret_cast<const unsigned*
>(&(
a)));
103 *(
reinterpret_cast<unsigned*
>(&(result))) = temp & 0x7FFF7FFF;
110 __half a1 = __low2half(kernel.packet[0]);
111 __half a2 = __high2half(kernel.packet[0]);
112 __half b1 = __low2half(kernel.packet[1]);
113 __half b2 = __high2half(kernel.packet[1]);
114 kernel.packet[0] = __halves2half2(a1, b1);
115 kernel.packet[1] = __halves2half2(a2, b2);
119 #if __CUDA_ARCH__ >= 530
120 return __halves2half2(
a, __hadd(
a, __float2half(1.0f)));
122 float f = __half2float(
a) + 1.0f;
123 return __halves2half2(
a, __float2half(f));
128 #if __CUDA_ARCH__ >= 530
129 return __hadd2(
a,
b);
131 float a1 = __low2float(
a);
132 float a2 = __high2float(
a);
133 float b1 = __low2float(
b);
134 float b2 = __high2float(
b);
137 return __floats2half2_rn(r1, r2);
142 #if __CUDA_ARCH__ >= 530
143 return __hsub2(
a,
b);
145 float a1 = __low2float(
a);
146 float a2 = __high2float(
a);
147 float b1 = __low2float(
b);
148 float b2 = __high2float(
b);
151 return __floats2half2_rn(r1, r2);
156 #if __CUDA_ARCH__ >= 530
159 float a1 = __low2float(
a);
160 float a2 = __high2float(
a);
161 return __floats2half2_rn(-a1, -a2);
168 #if __CUDA_ARCH__ >= 530
169 return __hmul2(
a,
b);
171 float a1 = __low2float(
a);
172 float a2 = __high2float(
a);
173 float b1 = __low2float(
b);
174 float b2 = __high2float(
b);
177 return __floats2half2_rn(r1, r2);
181 template<> __device__
EIGEN_STRONG_INLINE half2 pmadd<half2>(
const half2&
a,
const half2&
b,
const half2&
c) {
182 #if __CUDA_ARCH__ >= 530
183 return __hfma2(
a,
b,
c);
185 float a1 = __low2float(
a);
186 float a2 = __high2float(
a);
187 float b1 = __low2float(
b);
188 float b2 = __high2float(
b);
189 float c1 = __low2float(
c);
190 float c2 = __high2float(
c);
191 float r1 = a1 * b1 + c1;
192 float r2 = a2 * b2 + c2;
193 return __floats2half2_rn(r1, r2);
198 float a1 = __low2float(
a);
199 float a2 = __high2float(
a);
200 float b1 = __low2float(
b);
201 float b2 = __high2float(
b);
204 return __floats2half2_rn(r1, r2);
208 float a1 = __low2float(
a);
209 float a2 = __high2float(
a);
210 float b1 = __low2float(
b);
211 float b2 = __high2float(
b);
212 __half r1 = a1 < b1 ? __low2half(
a) : __low2half(
b);
213 __half r2 = a2 < b2 ? __high2half(
a) : __high2half(
b);
214 return __halves2half2(r1, r2);
218 float a1 = __low2float(
a);
219 float a2 = __high2float(
a);
220 float b1 = __low2float(
b);
221 float b2 = __high2float(
b);
222 __half r1 = a1 > b1 ? __low2half(
a) : __low2half(
b);
223 __half r2 = a2 > b2 ? __high2half(
a) : __high2half(
b);
224 return __halves2half2(r1, r2);
228 #if __CUDA_ARCH__ >= 530
229 return __hadd(__low2half(
a), __high2half(
a));
231 float a1 = __low2float(
a);
232 float a2 = __high2float(
a);
238 #if __CUDA_ARCH__ >= 530
239 __half first = __low2half(
a);
240 __half second = __high2half(
a);
241 return __hgt(first, second) ? first : second;
243 float a1 = __low2float(
a);
244 float a2 = __high2float(
a);
245 return a1 > a2 ? __low2half(
a) : __high2half(
a);
250 #if __CUDA_ARCH__ >= 530
251 __half first = __low2half(
a);
252 __half second = __high2half(
a);
253 return __hlt(first, second) ? first : second;
255 float a1 = __low2float(
a);
256 float a2 = __high2float(
a);
257 return a1 < a2 ? __low2half(
a) : __high2half(
a);
262 #if __CUDA_ARCH__ >= 530
263 return __hmul(__low2half(
a), __high2half(
a));
265 float a1 = __low2float(
a);
266 float a2 = __high2float(
a);
272 float a1 = __low2float(
a);
273 float a2 = __high2float(
a);
274 float r1 = log1pf(a1);
275 float r2 = log1pf(a2);
276 return __floats2half2_rn(r1, r2);
279 #if EIGEN_CUDACC_VER >= 80000 && defined EIGEN_CUDA_ARCH && EIGEN_CUDA_ARCH >= 530
282 half2 plog<half2>(
const half2&
a) {
287 half2 pexp<half2>(
const half2&
a) {
292 half2 psqrt<half2>(
const half2&
a) {
297 half2 prsqrt<half2>(
const half2&
a) {
304 float a1 = __low2float(
a);
305 float a2 = __high2float(
a);
308 return __floats2half2_rn(r1, r2);
312 float a1 = __low2float(
a);
313 float a2 = __high2float(
a);
316 return __floats2half2_rn(r1, r2);
320 float a1 = __low2float(
a);
321 float a2 = __high2float(
a);
322 float r1 = sqrtf(a1);
323 float r2 = sqrtf(a2);
324 return __floats2half2_rn(r1, r2);
328 float a1 = __low2float(
a);
329 float a2 = __high2float(
a);
330 float r1 = rsqrtf(a1);
331 float r2 = rsqrtf(a2);
332 return __floats2half2_rn(r1, r2);
337 #elif defined EIGEN_VECTORIZE_AVX512
344 template<>
struct is_arithmetic<Packet16h> {
enum {
value =
true }; };
347 struct packet_traits<half> : default_packet_traits {
348 typedef Packet16h
type;
350 typedef Packet16h
half;
380 result.x = _mm256_set1_epi16(from.
x);
390 result.x = _mm256_load_si256(
reinterpret_cast<const __m256i*
>(from));
396 result.x = _mm256_loadu_si256(
reinterpret_cast<const __m256i*
>(from));
401 _mm256_store_si256((__m256i*)to, from.x);
405 _mm256_storeu_si256((__m256i*)to, from.x);
411 unsigned short a = from[0].
x;
412 unsigned short b = from[1].
x;
413 unsigned short c = from[2].
x;
414 unsigned short d = from[3].
x;
415 result.x = _mm256_set_epi16(d, d, d, d,
c,
c,
c,
c,
b,
b,
b,
b,
a,
a,
a,
a);
420 #ifdef EIGEN_HAS_FP16_C
421 return _mm512_cvtph_ps(
a.x);
442 return _mm512_set_ps(
443 ff, fe, fd, fc, fb, fa, f9, f8, f7, f6, f5, f4, f3, f2, f1, f0);
448 #ifdef EIGEN_HAS_FP16_C
450 result.x = _mm512_cvtps_ph(
a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
473 result.x = _mm256_set_epi16(
474 hf.x, he.x, hd.x, hc.x, hb.x, ha.x, h9.x, h8.x,
475 h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);
484 return float2half(rf);
491 return float2half(rf);
496 return half(
predux(from_float));
502 result.x = _mm256_set_epi16(
503 from[15*stride].
x, from[14*stride].
x, from[13*stride].
x, from[12*stride].
x,
504 from[11*stride].
x, from[10*stride].
x, from[9*stride].
x, from[8*stride].
x,
505 from[7*stride].
x, from[6*stride].
x, from[5*stride].
x, from[4*stride].
x,
506 from[3*stride].
x, from[2*stride].
x, from[1*stride].
x, from[0*stride].
x);
514 to[stride*0].x = aux[0].x;
515 to[stride*1].x = aux[1].x;
516 to[stride*2].x = aux[2].x;
517 to[stride*3].x = aux[3].x;
518 to[stride*4].x = aux[4].x;
519 to[stride*5].x = aux[5].x;
520 to[stride*6].x = aux[6].x;
521 to[stride*7].x = aux[7].x;
522 to[stride*8].x = aux[8].x;
523 to[stride*9].x = aux[9].x;
524 to[stride*10].x = aux[10].x;
525 to[stride*11].x = aux[11].x;
526 to[stride*12].x = aux[12].x;
527 to[stride*13].x = aux[13].x;
528 to[stride*14].x = aux[14].x;
529 to[stride*15].x = aux[15].x;
533 ptranspose(PacketBlock<Packet16h,16>& kernel) {
534 __m256i
a = kernel.packet[0].x;
535 __m256i
b = kernel.packet[1].x;
536 __m256i
c = kernel.packet[2].x;
537 __m256i d = kernel.packet[3].x;
538 __m256i e = kernel.packet[4].x;
539 __m256i f = kernel.packet[5].x;
540 __m256i g = kernel.packet[6].x;
541 __m256i h = kernel.packet[7].x;
542 __m256i i = kernel.packet[8].x;
543 __m256i j = kernel.packet[9].x;
544 __m256i k = kernel.packet[10].x;
545 __m256i l = kernel.packet[11].x;
546 __m256i m = kernel.packet[12].x;
547 __m256i
n = kernel.packet[13].x;
548 __m256i o = kernel.packet[14].x;
549 __m256i p = kernel.packet[15].x;
551 __m256i ab_07 = _mm256_unpacklo_epi16(
a,
b);
552 __m256i cd_07 = _mm256_unpacklo_epi16(
c, d);
553 __m256i ef_07 = _mm256_unpacklo_epi16(e, f);
554 __m256i gh_07 = _mm256_unpacklo_epi16(g, h);
555 __m256i ij_07 = _mm256_unpacklo_epi16(i, j);
556 __m256i kl_07 = _mm256_unpacklo_epi16(k, l);
557 __m256i mn_07 = _mm256_unpacklo_epi16(m,
n);
558 __m256i op_07 = _mm256_unpacklo_epi16(o, p);
560 __m256i ab_8f = _mm256_unpackhi_epi16(
a,
b);
561 __m256i cd_8f = _mm256_unpackhi_epi16(
c, d);
562 __m256i ef_8f = _mm256_unpackhi_epi16(e, f);
563 __m256i gh_8f = _mm256_unpackhi_epi16(g, h);
564 __m256i ij_8f = _mm256_unpackhi_epi16(i, j);
565 __m256i kl_8f = _mm256_unpackhi_epi16(k, l);
566 __m256i mn_8f = _mm256_unpackhi_epi16(m,
n);
567 __m256i op_8f = _mm256_unpackhi_epi16(o, p);
569 __m256i abcd_03 = _mm256_unpacklo_epi32(ab_07, cd_07);
570 __m256i abcd_47 = _mm256_unpackhi_epi32(ab_07, cd_07);
571 __m256i efgh_03 = _mm256_unpacklo_epi32(ef_07, gh_07);
572 __m256i efgh_47 = _mm256_unpackhi_epi32(ef_07, gh_07);
573 __m256i ijkl_03 = _mm256_unpacklo_epi32(ij_07, kl_07);
574 __m256i ijkl_47 = _mm256_unpackhi_epi32(ij_07, kl_07);
575 __m256i mnop_03 = _mm256_unpacklo_epi32(mn_07, op_07);
576 __m256i mnop_47 = _mm256_unpackhi_epi32(mn_07, op_07);
578 __m256i abcd_8b = _mm256_unpacklo_epi32(ab_8f, cd_8f);
579 __m256i abcd_cf = _mm256_unpackhi_epi32(ab_8f, cd_8f);
580 __m256i efgh_8b = _mm256_unpacklo_epi32(ef_8f, gh_8f);
581 __m256i efgh_cf = _mm256_unpackhi_epi32(ef_8f, gh_8f);
582 __m256i ijkl_8b = _mm256_unpacklo_epi32(ij_8f, kl_8f);
583 __m256i ijkl_cf = _mm256_unpackhi_epi32(ij_8f, kl_8f);
584 __m256i mnop_8b = _mm256_unpacklo_epi32(mn_8f, op_8f);
585 __m256i mnop_cf = _mm256_unpackhi_epi32(mn_8f, op_8f);
587 __m256i abcdefgh_01 = _mm256_unpacklo_epi64(abcd_03, efgh_03);
588 __m256i abcdefgh_23 = _mm256_unpackhi_epi64(abcd_03, efgh_03);
589 __m256i ijklmnop_01 = _mm256_unpacklo_epi64(ijkl_03, mnop_03);
590 __m256i ijklmnop_23 = _mm256_unpackhi_epi64(ijkl_03, mnop_03);
591 __m256i abcdefgh_45 = _mm256_unpacklo_epi64(abcd_47, efgh_47);
592 __m256i abcdefgh_67 = _mm256_unpackhi_epi64(abcd_47, efgh_47);
593 __m256i ijklmnop_45 = _mm256_unpacklo_epi64(ijkl_47, mnop_47);
594 __m256i ijklmnop_67 = _mm256_unpackhi_epi64(ijkl_47, mnop_47);
595 __m256i abcdefgh_89 = _mm256_unpacklo_epi64(abcd_8b, efgh_8b);
596 __m256i abcdefgh_ab = _mm256_unpackhi_epi64(abcd_8b, efgh_8b);
597 __m256i ijklmnop_89 = _mm256_unpacklo_epi64(ijkl_8b, mnop_8b);
598 __m256i ijklmnop_ab = _mm256_unpackhi_epi64(ijkl_8b, mnop_8b);
599 __m256i abcdefgh_cd = _mm256_unpacklo_epi64(abcd_cf, efgh_cf);
600 __m256i abcdefgh_ef = _mm256_unpackhi_epi64(abcd_cf, efgh_cf);
601 __m256i ijklmnop_cd = _mm256_unpacklo_epi64(ijkl_cf, mnop_cf);
602 __m256i ijklmnop_ef = _mm256_unpackhi_epi64(ijkl_cf, mnop_cf);
605 __m256i a_p_0 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x20);
606 __m256i a_p_1 = _mm256_permute2x128_si256(abcdefgh_01, ijklmnop_01, 0x31);
607 __m256i a_p_2 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x20);
608 __m256i a_p_3 = _mm256_permute2x128_si256(abcdefgh_23, ijklmnop_23, 0x31);
609 __m256i a_p_4 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x20);
610 __m256i a_p_5 = _mm256_permute2x128_si256(abcdefgh_45, ijklmnop_45, 0x31);
611 __m256i a_p_6 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x20);
612 __m256i a_p_7 = _mm256_permute2x128_si256(abcdefgh_67, ijklmnop_67, 0x31);
613 __m256i a_p_8 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x20);
614 __m256i a_p_9 = _mm256_permute2x128_si256(abcdefgh_89, ijklmnop_89, 0x31);
615 __m256i a_p_a = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x20);
616 __m256i a_p_b = _mm256_permute2x128_si256(abcdefgh_ab, ijklmnop_ab, 0x31);
617 __m256i a_p_c = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x20);
618 __m256i a_p_d = _mm256_permute2x128_si256(abcdefgh_cd, ijklmnop_cd, 0x31);
619 __m256i a_p_e = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x20);
620 __m256i a_p_f = _mm256_permute2x128_si256(abcdefgh_ef, ijklmnop_ef, 0x31);
622 kernel.packet[0].x = a_p_0;
623 kernel.packet[1].x = a_p_1;
624 kernel.packet[2].x = a_p_2;
625 kernel.packet[3].x = a_p_3;
626 kernel.packet[4].x = a_p_4;
627 kernel.packet[5].x = a_p_5;
628 kernel.packet[6].x = a_p_6;
629 kernel.packet[7].x = a_p_7;
630 kernel.packet[8].x = a_p_8;
631 kernel.packet[9].x = a_p_9;
632 kernel.packet[10].x = a_p_a;
633 kernel.packet[11].x = a_p_b;
634 kernel.packet[12].x = a_p_c;
635 kernel.packet[13].x = a_p_d;
636 kernel.packet[14].x = a_p_e;
637 kernel.packet[15].x = a_p_f;
641 ptranspose(PacketBlock<Packet16h,8>& kernel) {
643 pstore<half>(in[0], kernel.packet[0]);
644 pstore<half>(in[1], kernel.packet[1]);
645 pstore<half>(in[2], kernel.packet[2]);
646 pstore<half>(in[3], kernel.packet[3]);
647 pstore<half>(in[4], kernel.packet[4]);
648 pstore<half>(in[5], kernel.packet[5]);
649 pstore<half>(in[6], kernel.packet[6]);
650 pstore<half>(in[7], kernel.packet[7]);
654 for (
int i = 0; i < 8; ++i) {
655 for (
int j = 0; j < 8; ++j) {
656 out[i][j] = in[j][2*i];
658 for (
int j = 0; j < 8; ++j) {
659 out[i][j+8] = in[j][2*i+1];
663 kernel.packet[0] = pload<Packet16h>(out[0]);
664 kernel.packet[1] = pload<Packet16h>(out[1]);
665 kernel.packet[2] = pload<Packet16h>(out[2]);
666 kernel.packet[3] = pload<Packet16h>(out[3]);
667 kernel.packet[4] = pload<Packet16h>(out[4]);
668 kernel.packet[5] = pload<Packet16h>(out[5]);
669 kernel.packet[6] = pload<Packet16h>(out[6]);
670 kernel.packet[7] = pload<Packet16h>(out[7]);
674 ptranspose(PacketBlock<Packet16h,4>& kernel) {
676 pstore<half>(in[0], kernel.packet[0]);
677 pstore<half>(in[1], kernel.packet[1]);
678 pstore<half>(in[2], kernel.packet[2]);
679 pstore<half>(in[3], kernel.packet[3]);
683 for (
int i = 0; i < 4; ++i) {
684 for (
int j = 0; j < 4; ++j) {
685 out[i][j] = in[j][4*i];
687 for (
int j = 0; j < 4; ++j) {
688 out[i][j+4] = in[j][4*i+1];
690 for (
int j = 0; j < 4; ++j) {
691 out[i][j+8] = in[j][4*i+2];
693 for (
int j = 0; j < 4; ++j) {
694 out[i][j+12] = in[j][4*i+3];
698 kernel.packet[0] = pload<Packet16h>(out[0]);
699 kernel.packet[1] = pload<Packet16h>(out[1]);
700 kernel.packet[2] = pload<Packet16h>(out[2]);
701 kernel.packet[3] = pload<Packet16h>(out[3]);
705 #elif defined EIGEN_VECTORIZE_AVX
712 template<>
struct is_arithmetic<Packet8h> {
enum {
value =
true }; };
715 struct packet_traits<
Eigen::half> : default_packet_traits {
716 typedef Packet8h
type;
718 typedef Packet8h
half;
748 result.x = _mm_set1_epi16(from.
x);
758 result.x = _mm_load_si128(
reinterpret_cast<const __m128i*
>(from));
764 result.x = _mm_loadu_si128(
reinterpret_cast<const __m128i*
>(from));
769 _mm_store_si128(
reinterpret_cast<__m128i*
>(to), from.x);
773 _mm_storeu_si128(
reinterpret_cast<__m128i*
>(to), from.x);
779 unsigned short a = from[0].
x;
780 unsigned short b = from[1].
x;
781 result.x = _mm_set_epi16(
b,
b,
b,
b,
a,
a,
a,
a);
786 #ifdef EIGEN_HAS_FP16_C
787 return _mm256_cvtph_ps(
a.x);
800 return _mm256_set_ps(f7, f6, f5, f4, f3, f2, f1, f0);
805 #ifdef EIGEN_HAS_FP16_C
807 result.x = _mm256_cvtps_ph(
a, _MM_FROUND_TO_NEAREST_INT|_MM_FROUND_NO_EXC);
822 result.x = _mm_set_epi16(h7.x, h6.x, h5.x, h4.x, h3.x, h2.x, h1.x, h0.x);
833 return float2half(rf);
840 return float2half(rf);
846 result.x = _mm_set_epi16(from[7*stride].
x, from[6*stride].
x, from[5*stride].
x, from[4*stride].
x, from[3*stride].
x, from[2*stride].
x, from[1*stride].
x, from[0*stride].
x);
854 to[stride*0].
x = aux[0].x;
855 to[stride*1].
x = aux[1].x;
856 to[stride*2].
x = aux[2].x;
857 to[stride*3].
x = aux[3].x;
858 to[stride*4].
x = aux[4].x;
859 to[stride*5].
x = aux[5].x;
860 to[stride*6].
x = aux[6].x;
861 to[stride*7].
x = aux[7].x;
890 __m128i
a = kernel.packet[0].x;
891 __m128i
b = kernel.packet[1].x;
892 __m128i
c = kernel.packet[2].x;
893 __m128i d = kernel.packet[3].x;
894 __m128i e = kernel.packet[4].x;
895 __m128i f = kernel.packet[5].x;
896 __m128i g = kernel.packet[6].x;
897 __m128i h = kernel.packet[7].x;
899 __m128i a03b03 = _mm_unpacklo_epi16(
a,
b);
900 __m128i c03d03 = _mm_unpacklo_epi16(
c, d);
901 __m128i e03f03 = _mm_unpacklo_epi16(e, f);
902 __m128i g03h03 = _mm_unpacklo_epi16(g, h);
903 __m128i a47b47 = _mm_unpackhi_epi16(
a,
b);
904 __m128i c47d47 = _mm_unpackhi_epi16(
c, d);
905 __m128i e47f47 = _mm_unpackhi_epi16(e, f);
906 __m128i g47h47 = _mm_unpackhi_epi16(g, h);
908 __m128i a01b01c01d01 = _mm_unpacklo_epi32(a03b03, c03d03);
909 __m128i a23b23c23d23 = _mm_unpackhi_epi32(a03b03, c03d03);
910 __m128i e01f01g01h01 = _mm_unpacklo_epi32(e03f03, g03h03);
911 __m128i e23f23g23h23 = _mm_unpackhi_epi32(e03f03, g03h03);
912 __m128i a45b45c45d45 = _mm_unpacklo_epi32(a47b47, c47d47);
913 __m128i a67b67c67d67 = _mm_unpackhi_epi32(a47b47, c47d47);
914 __m128i e45f45g45h45 = _mm_unpacklo_epi32(e47f47, g47h47);
915 __m128i e67f67g67h67 = _mm_unpackhi_epi32(e47f47, g47h47);
917 __m128i a0b0c0d0e0f0g0h0 = _mm_unpacklo_epi64(a01b01c01d01, e01f01g01h01);
918 __m128i a1b1c1d1e1f1g1h1 = _mm_unpackhi_epi64(a01b01c01d01, e01f01g01h01);
919 __m128i a2b2c2d2e2f2g2h2 = _mm_unpacklo_epi64(a23b23c23d23, e23f23g23h23);
920 __m128i a3b3c3d3e3f3g3h3 = _mm_unpackhi_epi64(a23b23c23d23, e23f23g23h23);
921 __m128i a4b4c4d4e4f4g4h4 = _mm_unpacklo_epi64(a45b45c45d45, e45f45g45h45);
922 __m128i a5b5c5d5e5f5g5h5 = _mm_unpackhi_epi64(a45b45c45d45, e45f45g45h45);
923 __m128i a6b6c6d6e6f6g6h6 = _mm_unpacklo_epi64(a67b67c67d67, e67f67g67h67);
924 __m128i a7b7c7d7e7f7g7h7 = _mm_unpackhi_epi64(a67b67c67d67, e67f67g67h67);
926 kernel.packet[0].x = a0b0c0d0e0f0g0h0;
927 kernel.packet[1].x = a1b1c1d1e1f1g1h1;
928 kernel.packet[2].x = a2b2c2d2e2f2g2h2;
929 kernel.packet[3].x = a3b3c3d3e3f3g3h3;
930 kernel.packet[4].x = a4b4c4d4e4f4g4h4;
931 kernel.packet[5].x = a5b5c5d5e5f5g5h5;
932 kernel.packet[6].x = a6b6c6d6e6f6g6h6;
933 kernel.packet[7].x = a7b7c7d7e7f7g7h7;
939 pstore<Eigen::half>(in[0], kernel.packet[0]);
940 pstore<Eigen::half>(in[1], kernel.packet[1]);
941 pstore<Eigen::half>(in[2], kernel.packet[2]);
942 pstore<Eigen::half>(in[3], kernel.packet[3]);
946 for (
int i = 0; i < 4; ++i) {
947 for (
int j = 0; j < 4; ++j) {
948 out[i][j] = in[j][2*i];
950 for (
int j = 0; j < 4; ++j) {
951 out[i][j+4] = in[j][2*i+1];
955 kernel.packet[0] = pload<Packet8h>(out[0]);
956 kernel.packet[1] = pload<Packet8h>(out[1]);
957 kernel.packet[2] = pload<Packet8h>(out[2]);
958 kernel.packet[3] = pload<Packet8h>(out[3]);
971 template<>
struct is_arithmetic<Packet4h> {
enum {
value =
true }; };
974 struct packet_traits<
Eigen::half> : default_packet_traits {
975 typedef Packet4h
type;
977 typedef Packet4h
half;
1007 result.x = _mm_set1_pi16(from.
x);
1018 __int64_t a64 = _mm_cvtm64_si64(
a.x);
1019 __int64_t b64 = _mm_cvtm64_si64(
b.x);
1036 result.
x = _mm_set_pi16(h[3].
x, h[2].
x, h[1].
x, h[0].
x);
1041 __int64_t a64 = _mm_cvtm64_si64(
a.x);
1042 __int64_t b64 = _mm_cvtm64_si64(
b.x);
1059 result.
x = _mm_set_pi16(h[3].
x, h[2].
x, h[1].
x, h[0].
x);
1065 result.x = _mm_cvtsi64_m64(*
reinterpret_cast<const __int64_t*
>(from));
1071 result.x = _mm_cvtsi64_m64(*
reinterpret_cast<const __int64_t*
>(from));
1076 __int64_t r = _mm_cvtm64_si64(from.x);
1077 *(
reinterpret_cast<__int64_t*
>(to)) = r;
1081 __int64_t r = _mm_cvtm64_si64(from.x);
1082 *(
reinterpret_cast<__int64_t*
>(to)) = r;
1087 return pset1<Packet4h>(*from);
1093 result.x = _mm_set_pi16(from[3*stride].
x, from[2*stride].
x, from[1*stride].
x, from[0*stride].
x);
1099 __int64_t
a = _mm_cvtm64_si64(from.x);
1100 to[stride*0].
x =
static_cast<unsigned short>(
a);
1101 to[stride*1].
x =
static_cast<unsigned short>(
a >> 16);
1102 to[stride*2].
x =
static_cast<unsigned short>(
a >> 32);
1103 to[stride*3].
x =
static_cast<unsigned short>(
a >> 48);
1107 ptranspose(PacketBlock<Packet4h,4>& kernel) {
1108 __m64 T0 = _mm_unpacklo_pi16(kernel.packet[0].x, kernel.packet[1].x);
1109 __m64 T1 = _mm_unpacklo_pi16(kernel.packet[2].x, kernel.packet[3].x);
1110 __m64 T2 = _mm_unpackhi_pi16(kernel.packet[0].x, kernel.packet[1].x);
1111 __m64 T3 = _mm_unpackhi_pi16(kernel.packet[2].x, kernel.packet[3].x);
1113 kernel.packet[0].x = _mm_unpacklo_pi32(T0, T1);
1114 kernel.packet[1].x = _mm_unpackhi_pi32(T0, T1);
1115 kernel.packet[2].x = _mm_unpacklo_pi32(T2, T3);
1116 kernel.packet[3].x = _mm_unpackhi_pi32(T2, T3);
1124 #endif // EIGEN_PACKET_MATH_HALF_CUDA_H