10 #ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H 11 #define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H 35 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
36 typename internal::enable_if<sizeof(T)==4,int>::type count_leading_zeros(
const T val)
38 #ifdef EIGEN_GPU_COMPILE_PHASE 40 #elif defined(SYCL_DEVICE_ONLY) 41 return cl::sycl::clz(val);
44 _BitScanReverse(&index, val);
47 EIGEN_STATIC_ASSERT(
sizeof(
unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
48 return __builtin_clz(static_cast<uint32_t>(val));
53 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
54 typename internal::enable_if<sizeof(T)==8,int>::type count_leading_zeros(
const T val)
56 #ifdef EIGEN_GPU_COMPILE_PHASE 58 #elif defined(SYCL_DEVICE_ONLY) 59 return static_cast<int>(cl::sycl::clz(val));
60 #elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64 62 _BitScanReverse64(&index, val);
66 unsigned int lo = (
unsigned int)(val&0xffffffff);
67 unsigned int hi = (
unsigned int)((val>>32)&0xffffffff);
70 n = 32 + count_leading_zeros<unsigned int>(lo);
72 n = count_leading_zeros<unsigned int>(hi);
75 EIGEN_STATIC_ASSERT(
sizeof(
unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE);
76 return __builtin_clzll(static_cast<uint64_t>(val));
81 struct UnsignedTraits {
82 typedef typename conditional<sizeof(T) == 8, uint64_t, uint32_t>::type type;
86 struct DividerTraits {
87 typedef typename UnsignedTraits<T>::type type;
88 static const int N =
sizeof(T) * 8;
92 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t muluh(
const uint32_t a,
const T b) {
93 #if defined(EIGEN_GPU_COMPILE_PHASE) 94 return __umulhi(a, b);
95 #elif defined(SYCL_DEVICE_ONLY) 96 return cl::sycl::mul_hi(a, static_cast<uint32_t>(b));
98 return (static_cast<uint64_t>(a) * b) >> 32;
102 template <
typename T>
103 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t muluh(
const uint64_t a,
const T b) {
104 #if defined(EIGEN_GPU_COMPILE_PHASE) 105 return __umul64hi(a, b);
106 #elif defined(SYCL_DEVICE_ONLY) 107 return cl::sycl::mul_hi(a, static_cast<uint64_t>(b));
108 #elif EIGEN_HAS_BUILTIN_INT128 109 __uint128_t v =
static_cast<__uint128_t
>(a) * static_cast<__uint128_t>(b);
110 return static_cast<uint64_t
>(v >> 64);
112 return (TensorUInt128<static_val<0>, uint64_t>(a) * TensorUInt128<static_val<0>, uint64_t>(b)).upper();
116 template <
int N,
typename T>
117 struct DividerHelper {
118 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(
const int log_div,
const T divider) {
119 EIGEN_STATIC_ASSERT(N == 32, YOU_MADE_A_PROGRAMMING_MISTAKE);
120 return static_cast<uint32_t
>((
static_cast<uint64_t
>(1) << (N+log_div)) / divider - (static_cast<uint64_t>(1) << N) + 1);
124 template <
typename T>
125 struct DividerHelper<64, T> {
126 static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(
const int log_div,
const T divider) {
127 #if EIGEN_HAS_BUILTIN_INT128 && !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY) 128 return static_cast<uint64_t
>((
static_cast<__uint128_t
>(1) << (64+log_div)) / static_cast<__uint128_t>(divider) - (
static_cast<__uint128_t
>(1) << 64) + 1);
130 const uint64_t shift = 1ULL << log_div;
131 TensorUInt128<uint64_t, uint64_t> result = TensorUInt128<uint64_t, static_val<0> >(shift, 0) / TensorUInt128<static_val<0>, uint64_t>(divider)
132 - TensorUInt128<static_val<1>, static_val<0> >(1, 0)
133 + TensorUInt128<static_val<0>, static_val<1> >(1);
134 return static_cast<uint64_t
>(result);
141 template <
typename T,
bool div_gt_one = false>
142 struct TensorIntDivisor {
144 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
153 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor(
const T divider) {
154 const int N = DividerTraits<T>::N;
155 eigen_assert(
static_cast<typename UnsignedTraits<T>::type
>(divider) < NumTraits<UnsignedType>::highest()/2);
156 eigen_assert(divider > 0);
159 const int leading_zeros = count_leading_zeros(static_cast<UnsignedType>(divider));
160 int log_div = N - leading_zeros;
162 if ((
static_cast<typename UnsignedTraits<T>::type
>(1) << (log_div-1)) ==
static_cast<typename UnsignedTraits<T>::type
>(divider))
165 multiplier = DividerHelper<N, T>::computeMultiplier(log_div, divider);
166 shift1 = log_div > 1 ? 1 : log_div;
167 shift2 = log_div > 1 ? log_div-1 : 0;
172 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(
const T numerator)
const {
173 eigen_assert(
static_cast<typename UnsignedTraits<T>::type
>(numerator) < NumTraits<UnsignedType>::highest()/2);
176 UnsignedType t1 = muluh(multiplier, numerator);
177 UnsignedType t = (
static_cast<UnsignedType
>(numerator) - t1) >> shift1;
178 return (t1 + t) >> shift2;
182 typedef typename DividerTraits<T>::type UnsignedType;
183 UnsignedType multiplier;
193 class TensorIntDivisor<int32_t, true> {
195 EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() {
200 EIGEN_DEVICE_FUNC TensorIntDivisor(int32_t divider) {
201 eigen_assert(divider >= 2);
205 EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE
int divide(
const int32_t n)
const {
206 #ifdef EIGEN_GPU_COMPILE_PHASE 207 return (__umulhi(magic, n) >> shift);
208 #elif defined(SYCL_DEVICE_ONLY) 209 return (cl::sycl::mul_hi(magic, static_cast<uint32_t>(n)) >> shift);
211 uint64_t v =
static_cast<uint64_t
>(magic) * static_cast<uint64_t>(n);
212 return (static_cast<uint32_t>(v >> 32) >> shift);
219 EIGEN_DEVICE_FUNC
void calcMagic(int32_t d) {
220 const unsigned two31 = 0x80000000;
222 unsigned t = two31 + (ad >> 31);
223 unsigned anc = t - 1 - t%ad;
225 unsigned q1 = two31/anc;
226 unsigned r1 = two31 - q1*anc;
227 unsigned q2 = two31/ad;
228 unsigned r2 = two31 - q2*ad;
243 }
while (q1 < delta || (q1 == delta && r1 == 0));
245 magic = (unsigned)(q2 + 1);
254 template <
typename T,
bool div_gt_one>
255 static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator / (
const T& numerator,
const TensorIntDivisor<T, div_gt_one>& divisor) {
256 return divisor.divide(numerator);
263 #endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H Namespace containing all symbols from the Eigen library.