10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
36 typename internal::enable_if<
sizeof(
T)==4,
int>::
type count_leading_zeros(
const T val)
38 #ifdef EIGEN_GPU_COMPILE_PHASE
40 #elif defined(SYCL_DEVICE_ONLY)
41 return cl::sycl::clz(val);
44 _BitScanReverse(&index, val);
48 return __builtin_clz(
static_cast<uint32_t>(val));
54 typename internal::enable_if<
sizeof(
T)==8,
int>::
type count_leading_zeros(
const T val)
56 #ifdef EIGEN_GPU_COMPILE_PHASE
58 #elif defined(SYCL_DEVICE_ONLY)
59 return static_cast<int>(cl::sycl::clz(val));
60 #elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64
62 _BitScanReverse64(&index, val);
66 unsigned int lo = (
unsigned int)(val&0xffffffff);
67 unsigned int hi = (
unsigned int)((val>>32)&0xffffffff);
70 n = 32 + count_leading_zeros<unsigned int>(lo);
72 n = count_leading_zeros<unsigned int>(hi);
76 return __builtin_clzll(
static_cast<uint64_t>(val));
81 struct UnsignedTraits {
86 struct DividerTraits {
88 static const int N =
sizeof(
T) * 8;
93 #if defined(EIGEN_GPU_COMPILE_PHASE)
94 return __umulhi(
a,
b);
95 #elif defined(SYCL_DEVICE_ONLY)
96 return cl::sycl::mul_hi(
a,
static_cast<uint32_t>(
b));
102 template <
typename T>
104 #if defined(EIGEN_GPU_COMPILE_PHASE)
105 return __umul64hi(
a,
b);
106 #elif defined(SYCL_DEVICE_ONLY)
107 return cl::sycl::mul_hi(
a,
static_cast<uint64_t>(
b));
108 #elif EIGEN_HAS_BUILTIN_INT128
109 __uint128_t
v =
static_cast<__uint128_t
>(
a) *
static_cast<__uint128_t
>(
b);
112 return (TensorUInt128<static_val<0>,
uint64_t>(
a) * TensorUInt128<static_val<0>,
uint64_t>(
b)).upper();
116 template <
int N,
typename T>
117 struct DividerHelper {
124 template <
typename T>
125 struct DividerHelper<64,
T> {
127 #if EIGEN_HAS_BUILTIN_INT128 && !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY)
128 return static_cast<uint64_t>((
static_cast<__uint128_t
>(1) << (64+log_div)) /
static_cast<__uint128_t
>(divider) - (
static_cast<__uint128_t
>(1) << 64) + 1);
130 const uint64_t shift = 1ULL << log_div;
131 TensorUInt128<uint64_t, uint64_t>
result = TensorUInt128<uint64_t, static_val<0> >(shift, 0) / TensorUInt128<static_val<0>,
uint64_t>(divider)
132 - TensorUInt128<static_val<1>, static_val<0> >(1, 0)
133 + TensorUInt128<static_val<0>, static_val<1> >(1);
141 template <
typename T,
bool div_gt_one = false>
159 const int leading_zeros = count_leading_zeros(
static_cast<UnsignedType>(divider));
160 int log_div =
N - leading_zeros;
165 multiplier = DividerHelper<N, T>::computeMultiplier(log_div, divider);
166 shift1 = log_div > 1 ? 1 : log_div;
167 shift2 = log_div > 1 ? log_div-1 : 0;
206 #ifdef EIGEN_GPU_COMPILE_PHASE
207 return (__umulhi(magic,
n) >> shift);
208 #elif defined(SYCL_DEVICE_ONLY)
209 return (cl::sycl::mul_hi(magic,
static_cast<uint32_t>(
n)) >> shift);
212 return (
static_cast<uint32_t>(
v >> 32) >> shift);
220 const unsigned two31 = 0x80000000;
222 unsigned t = two31 + (ad >> 31);
223 unsigned anc =
t - 1 -
t%ad;
225 unsigned q1 = two31/anc;
226 unsigned r1 = two31 - q1*anc;
227 unsigned q2 = two31/ad;
228 unsigned r2 = two31 - q2*ad;
245 magic = (unsigned)(q2 + 1);
254 template <
typename T,
bool div_gt_one>
256 return divisor.
divide(numerator);
263 #endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H