10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
36 typename internal::enable_if<
sizeof(T)==4,
int>::type count_leading_zeros(
const T val)
42 _BitScanReverse(&index, val);
46 return __builtin_clz(
static_cast<uint32_t
>(val));
52 typename internal::enable_if<
sizeof(T)==8,
int>::type count_leading_zeros(
const T val)
56 #elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64
58 _BitScanReverse64(&index, val);
62 unsigned int lo = (
unsigned int)(val&0xffffffff);
63 unsigned int hi = (
unsigned int)((val>>32)&0xffffffff);
66 n = 32 + count_leading_zeros<unsigned int>(lo);
68 n = count_leading_zeros<unsigned int>(hi);
72 return __builtin_clzll(
static_cast<uint64_t
>(val));
77 struct UnsignedTraits {
78 typedef typename conditional<
sizeof(T) == 8, uint64_t, uint32_t>::type type;
82 struct DividerTraits {
83 typedef typename UnsignedTraits<T>::type type;
84 static const int N =
sizeof(T) * 8;
89 #if defined(__CUDA_ARCH__)
90 return __umulhi(
a,
b);
92 return (
static_cast<uint64_t
>(
a) *
b) >> 32;
98 #if defined(__CUDA_ARCH__)
99 return __umul64hi(
a,
b);
100 #elif defined(__SIZEOF_INT128__)
101 __uint128_t v =
static_cast<__uint128_t
>(
a) *
static_cast<__uint128_t
>(
b);
102 return static_cast<uint64_t
>(v >> 64);
104 return (TensorUInt128<static_val<0>, uint64_t>(
a) * TensorUInt128<static_val<0>, uint64_t>(
b)).upper();
108 template <
int N,
typename T>
109 struct DividerHelper {
110 static EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(
const int log_div,
const T divider) {
112 return static_cast<uint32_t
>((
static_cast<uint64_t
>(1) << (
N+log_div)) / divider - (
static_cast<uint64_t
>(1) <<
N) + 1);
116 template <
typename T>
117 struct DividerHelper<64, T> {
118 static EIGEN_DEVICE_FUNC
EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(
const int log_div,
const T divider) {
119 #if defined(__SIZEOF_INT128__) && !defined(__CUDA_ARCH__)
120 return static_cast<uint64_t
>((
static_cast<__uint128_t
>(1) << (64+log_div)) /
static_cast<__uint128_t
>(divider) - (
static_cast<__uint128_t
>(1) << 64) + 1);
122 const uint64_t shift = 1ULL << log_div;
123 TensorUInt128<uint64_t, uint64_t> result = TensorUInt128<uint64_t, static_val<0> >(shift, 0) / TensorUInt128<static_val<0>, uint64_t>(divider)
124 - TensorUInt128<static_val<1>, static_val<0> >(1, 0)
125 + TensorUInt128<static_val<0>, static_val<1> >(1);
126 return static_cast<uint64_t
>(result);
133 template <
typename T,
bool div_gt_one = false>
151 const int leading_zeros = count_leading_zeros(
static_cast<UnsignedType>(divider));
152 int log_div =
N - leading_zeros;
157 multiplier = DividerHelper<N, T>::computeMultiplier(log_div, divider);
158 shift1 = log_div > 1 ? 1 : log_div;
159 shift2 = log_div > 1 ? log_div-1 : 0;
170 return (t1 + t) >>
shift2;
199 return (__umulhi(magic,
n) >> shift);
201 uint64_t v =
static_cast<uint64_t
>(magic) *
static_cast<uint64_t
>(
n);
202 return (
static_cast<uint32_t
>(v >> 32) >> shift);
210 const unsigned two31 = 0x80000000;
212 unsigned t = two31 + (ad >> 31);
213 unsigned anc = t - 1 - t%ad;
215 unsigned q1 = two31/anc;
216 unsigned r1 = two31 - q1*anc;
217 unsigned q2 = two31/ad;
218 unsigned r2 = two31 - q2*ad;
233 }
while (q1 < delta || (q1 == delta && r1 == 0));
235 magic = (unsigned)(q2 + 1);
244 template <
typename T,
bool div_gt_one>
246 return divisor.
divide(numerator);
253 #endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H