TensorIntDiv.h
Go to the documentation of this file.
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2014 Benoit Steiner <benoit.steiner.goog@gmail.com>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
11 #define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
12 
13 
14 namespace Eigen {
15 
29 namespace internal {
30 
31 namespace {
32 
33  // Note: result is undefined if val == 0
34  template <typename T>
36  typename internal::enable_if<sizeof(T)==4,int>::type count_leading_zeros(const T val)
37  {
38 #ifdef EIGEN_GPU_COMPILE_PHASE
39  return __clz(val);
40 #elif defined(SYCL_DEVICE_ONLY)
41  return cl::sycl::clz(val);
42 #elif EIGEN_COMP_MSVC
43  unsigned long index;
44  _BitScanReverse(&index, val);
45  return 31 - index;
46 #else
47  EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
48  return __builtin_clz(static_cast<uint32_t>(val));
49 #endif
50  }
51 
52  template <typename T>
54  typename internal::enable_if<sizeof(T)==8,int>::type count_leading_zeros(const T val)
55  {
56 #ifdef EIGEN_GPU_COMPILE_PHASE
57  return __clzll(val);
58 #elif defined(SYCL_DEVICE_ONLY)
59  return static_cast<int>(cl::sycl::clz(val));
60 #elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64
61  unsigned long index;
62  _BitScanReverse64(&index, val);
63  return 63 - index;
64 #elif EIGEN_COMP_MSVC
65  // MSVC's _BitScanReverse64 is not available for 32bits builds.
66  unsigned int lo = (unsigned int)(val&0xffffffff);
67  unsigned int hi = (unsigned int)((val>>32)&0xffffffff);
68  int n;
69  if(hi==0)
70  n = 32 + count_leading_zeros<unsigned int>(lo);
71  else
72  n = count_leading_zeros<unsigned int>(hi);
73  return n;
74 #else
75  EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
76  return __builtin_clzll(static_cast<uint64_t>(val));
77 #endif
78  }
79 
80  template <typename T>
81  struct UnsignedTraits {
83  };
84 
85  template <typename T>
86  struct DividerTraits {
87  typedef typename UnsignedTraits<T>::type type;
88  static const int N = sizeof(T) * 8;
89  };
90 
91  template <typename T>
93 #if defined(EIGEN_GPU_COMPILE_PHASE)
94  return __umulhi(a, b);
95 #elif defined(SYCL_DEVICE_ONLY)
96  return cl::sycl::mul_hi(a, static_cast<uint32_t>(b));
97 #else
98  return (static_cast<uint64_t>(a) * b) >> 32;
99 #endif
100  }
101 
102  template <typename T>
104 #if defined(EIGEN_GPU_COMPILE_PHASE)
105  return __umul64hi(a, b);
106 #elif defined(SYCL_DEVICE_ONLY)
107  return cl::sycl::mul_hi(a, static_cast<uint64_t>(b));
108 #elif EIGEN_HAS_BUILTIN_INT128
109  __uint128_t v = static_cast<__uint128_t>(a) * static_cast<__uint128_t>(b);
110  return static_cast<uint64_t>(v >> 64);
111 #else
112  return (TensorUInt128<static_val<0>, uint64_t>(a) * TensorUInt128<static_val<0>, uint64_t>(b)).upper();
113 #endif
114  }
115 
116  template <int N, typename T>
117  struct DividerHelper {
118  static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(const int log_div, const T divider) {
119  EIGEN_STATIC_ASSERT(N == 32, YOU_MADE_A_PROGRAMMING_MISTAKE);
120  return static_cast<uint32_t>((static_cast<uint64_t>(1) << (N+log_div)) / divider - (static_cast<uint64_t>(1) << N) + 1);
121  }
122  };
123 
124  template <typename T>
125  struct DividerHelper<64, T> {
126  static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(const int log_div, const T divider) {
127 #if EIGEN_HAS_BUILTIN_INT128 && !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY)
128  return static_cast<uint64_t>((static_cast<__uint128_t>(1) << (64+log_div)) / static_cast<__uint128_t>(divider) - (static_cast<__uint128_t>(1) << 64) + 1);
129 #else
130  const uint64_t shift = 1ULL << log_div;
131  TensorUInt128<uint64_t, uint64_t> result = TensorUInt128<uint64_t, static_val<0> >(shift, 0) / TensorUInt128<static_val<0>, uint64_t>(divider)
132  - TensorUInt128<static_val<1>, static_val<0> >(1, 0)
133  + TensorUInt128<static_val<0>, static_val<1> >(1);
134  return static_cast<uint64_t>(result);
135 #endif
136  }
137  };
138 }
139 
140 
141 template <typename T, bool div_gt_one = false>
143  public:
145  multiplier = 0;
146  shift1 = 0;
147  shift2 = 0;
148  }
149 
150  // Must have 0 < divider < 2^31. This is relaxed to
151  // 0 < divider < 2^63 when using 64-bit indices on platforms that support
152  // the __uint128_t type.
154  const int N = DividerTraits<T>::N;
155  eigen_assert(static_cast<typename UnsignedTraits<T>::type>(divider) < NumTraits<UnsignedType>::highest()/2);
156  eigen_assert(divider > 0);
157 
158  // fast ln2
159  const int leading_zeros = count_leading_zeros(static_cast<UnsignedType>(divider));
160  int log_div = N - leading_zeros;
161  // if divider is a power of two then log_div is 1 more than it should be.
162  if ((static_cast<typename UnsignedTraits<T>::type>(1) << (log_div-1)) == static_cast<typename UnsignedTraits<T>::type>(divider))
163  log_div--;
164 
165  multiplier = DividerHelper<N, T>::computeMultiplier(log_div, divider);
166  shift1 = log_div > 1 ? 1 : log_div;
167  shift2 = log_div > 1 ? log_div-1 : 0;
168  }
169 
170  // Must have 0 <= numerator. On platforms that don't support the __uint128_t
171  // type numerator should also be less than 2^32-1.
172  EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const {
173  eigen_assert(static_cast<typename UnsignedTraits<T>::type>(numerator) < NumTraits<UnsignedType>::highest()/2);
174  //eigen_assert(numerator >= 0); // this is implicitly asserted by the line above
175 
176  UnsignedType t1 = muluh(multiplier, numerator);
177  UnsignedType t = (static_cast<UnsignedType>(numerator) - t1) >> shift1;
178  return (t1 + t) >> shift2;
179  }
180 
181  private:
183  UnsignedType multiplier;
186 };
187 
188 
189 // Optimized version for signed 32 bit integers.
190 // Derived from Hacker's Delight.
191 // Only works for divisors strictly greater than one
192 template <>
193 class TensorIntDivisor<int32_t, true> {
194  public:
196  magic = 0;
197  shift = 0;
198  }
199  // Must have 2 <= divider
201  eigen_assert(divider >= 2);
202  calcMagic(divider);
203  }
204 
206 #ifdef EIGEN_GPU_COMPILE_PHASE
207  return (__umulhi(magic, n) >> shift);
208 #elif defined(SYCL_DEVICE_ONLY)
209  return (cl::sycl::mul_hi(magic, static_cast<uint32_t>(n)) >> shift);
210 #else
211  uint64_t v = static_cast<uint64_t>(magic) * static_cast<uint64_t>(n);
212  return (static_cast<uint32_t>(v >> 32) >> shift);
213 #endif
214  }
215 
216 private:
217  // Compute the magic numbers. See Hacker's Delight section 10 for an in
218  // depth explanation.
220  const unsigned two31 = 0x80000000; // 2**31.
221  unsigned ad = d;
222  unsigned t = two31 + (ad >> 31);
223  unsigned anc = t - 1 - t%ad; // Absolute value of nc.
224  int p = 31; // Init. p.
225  unsigned q1 = two31/anc; // Init. q1 = 2**p/|nc|.
226  unsigned r1 = two31 - q1*anc; // Init. r1 = rem(2**p, |nc|).
227  unsigned q2 = two31/ad; // Init. q2 = 2**p/|d|.
228  unsigned r2 = two31 - q2*ad; // Init. r2 = rem(2**p, |d|).
229  unsigned delta = 0;
230  do {
231  p = p + 1;
232  q1 = 2*q1; // Update q1 = 2**p/|nc|.
233  r1 = 2*r1; // Update r1 = rem(2**p, |nc|).
234  if (r1 >= anc) { // (Must be an unsigned
235  q1 = q1 + 1; // comparison here).
236  r1 = r1 - anc;}
237  q2 = 2*q2; // Update q2 = 2**p/|d|.
238  r2 = 2*r2; // Update r2 = rem(2**p, |d|).
239  if (r2 >= ad) { // (Must be an unsigned
240  q2 = q2 + 1; // comparison here).
241  r2 = r2 - ad;}
242  delta = ad - r2;
243  } while (q1 < delta || (q1 == delta && r1 == 0));
244 
245  magic = (unsigned)(q2 + 1);
246  shift = p - 32;
247  }
248 
251 };
252 
253 
254 template <typename T, bool div_gt_one>
256  return divisor.divide(numerator);
257 }
258 
259 
260 } // end namespace internal
261 } // end namespace Eigen
262 
263 #endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int divide(const int32_t n) const
Definition: TensorIntDiv.h:205
static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator/(const T &numerator, const TensorIntDivisor< T, div_gt_one > &divisor)
Definition: TensorIntDiv.h:255
#define EIGEN_ALWAYS_INLINE
Definition: Macros.h:932
#define EIGEN_STRONG_INLINE
Definition: Macros.h:917
Scalar * b
Definition: benchVecAdd.cpp:17
int n
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const
Definition: TensorIntDiv.h:172
Namespace containing all symbols from the Eigen library.
Definition: jet.h:637
Holds information about the various numeric (i.e. scalar) types allowed by Eigen. ...
Definition: NumTraits.h:232
#define EIGEN_STATIC_ASSERT(CONDITION, MSG)
Definition: StaticAssert.h:127
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor(const T divider)
Definition: TensorIntDiv.h:153
Values result
#define eigen_assert(x)
Definition: Macros.h:1037
Array< int, Dynamic, 1 > v
unsigned __int64 uint64_t
Definition: ms_stdint.h:95
unsigned int uint32_t
Definition: ms_stdint.h:85
Eigen::Triplet< double > T
signed int int32_t
Definition: ms_stdint.h:82
static const double r2
EIGEN_DEVICE_FUNC TensorIntDivisor(int32_t divider)
Definition: TensorIntDiv.h:200
#define EIGEN_DEVICE_FUNC
Definition: Macros.h:976
static const double r1
EIGEN_DEVICE_FUNC void calcMagic(int32_t d)
Definition: TensorIntDiv.h:219
float * p
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor()
Definition: TensorIntDiv.h:144
Generic expression where a coefficient-wise unary operator is applied to an expression.
Definition: CwiseUnaryOp.h:55
static const int N
Definition: TensorIntDiv.h:88
EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor()
Definition: TensorIntDiv.h:195
DividerTraits< Index >::type UnsignedType
Definition: TensorIntDiv.h:182
Point2 t(10, 10)


gtsam
Author(s):
autogenerated on Tue Jul 4 2023 02:37:04