1 //===-- llvm/Support/MathExtras.h - Useful math functions -------*- C++ -*-===// 2 // 3 // The LLVM Compiler Infrastructure 4 // 5 // This file is distributed under the University of Illinois Open Source 6 // License. See LICENSE.TXT for details. 7 // 8 //===----------------------------------------------------------------------===// 9 // 10 // This file contains some functions that are useful for math stuff. 11 // 12 //===----------------------------------------------------------------------===// 13 14 #ifndef LLVM_SUPPORT_MATHEXTRAS_H 15 #define LLVM_SUPPORT_MATHEXTRAS_H 16 17 #include "llvm/Support/Compiler.h" 18 #include "llvm/Support/SwapByteOrder.h" 19 #include <algorithm> 20 #include <cassert> 21 #include <climits> 22 #include <cstring> 23 #include <limits> 24 #include <type_traits> 25 26 #ifdef _MSC_VER 27 #include <intrin.h> 28 #endif 29 30 #ifdef __ANDROID_NDK__ 31 #include <android/api-level.h> 32 #endif 33 34 namespace llvm { 35 /// \brief The behavior an operation has on an input of 0. 36 enum ZeroBehavior { 37 /// \brief The returned value is undefined. 38 ZB_Undefined, 39 /// \brief The returned value is numeric_limits<T>::max() 40 ZB_Max, 41 /// \brief The returned value is numeric_limits<T>::digits 42 ZB_Width 43 }; 44 45 namespace detail { 46 template <typename T, std::size_t SizeOfT> struct TrailingZerosCounter { 47 static std::size_t count(T Val, ZeroBehavior) { 48 if (!Val) 49 return std::numeric_limits<T>::digits; 50 if (Val & 0x1) 51 return 0; 52 53 // Bisection method. 54 std::size_t ZeroBits = 0; 55 T Shift = std::numeric_limits<T>::digits >> 1; 56 T Mask = std::numeric_limits<T>::max() >> Shift; 57 while (Shift) { 58 if ((Val & Mask) == 0) { 59 Val >>= Shift; 60 ZeroBits |= Shift; 61 } 62 Shift >>= 1; 63 Mask >>= Shift; 64 } 65 return ZeroBits; 66 } 67 }; 68 69 #if __GNUC__ >= 4 || defined(_MSC_VER) 70 template <typename T> struct TrailingZerosCounter<T, 4> { 71 static std::size_t count(T Val, ZeroBehavior ZB) { 72 if (ZB != ZB_Undefined && Val == 0) 73 return 32; 74 75 #if __has_builtin(__builtin_ctz) || LLVM_GNUC_PREREQ(4, 0, 0) 76 return __builtin_ctz(Val); 77 #elif defined(_MSC_VER) 78 unsigned long Index; 79 _BitScanForward(&Index, Val); 80 return Index; 81 #endif 82 } 83 }; 84 85 #if !defined(_MSC_VER) || defined(_M_X64) 86 template <typename T> struct TrailingZerosCounter<T, 8> { 87 static std::size_t count(T Val, ZeroBehavior ZB) { 88 if (ZB != ZB_Undefined && Val == 0) 89 return 64; 90 91 #if __has_builtin(__builtin_ctzll) || LLVM_GNUC_PREREQ(4, 0, 0) 92 return __builtin_ctzll(Val); 93 #elif defined(_MSC_VER) 94 unsigned long Index; 95 _BitScanForward64(&Index, Val); 96 return Index; 97 #endif 98 } 99 }; 100 #endif 101 #endif 102 } // namespace detail 103 104 /// \brief Count number of 0's from the least significant bit to the most 105 /// stopping at the first 1. 106 /// 107 /// Only unsigned integral types are allowed. 108 /// 109 /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are 110 /// valid arguments. 111 template <typename T> 112 std::size_t countTrailingZeros(T Val, ZeroBehavior ZB = ZB_Width) { 113 static_assert(std::numeric_limits<T>::is_integer && 114 !std::numeric_limits<T>::is_signed, 115 "Only unsigned integral types are allowed."); 116 return llvm::detail::TrailingZerosCounter<T, sizeof(T)>::count(Val, ZB); 117 } 118 119 namespace detail { 120 template <typename T, std::size_t SizeOfT> struct LeadingZerosCounter { 121 static std::size_t count(T Val, ZeroBehavior) { 122 if (!Val) 123 return std::numeric_limits<T>::digits; 124 125 // Bisection method. 126 std::size_t ZeroBits = 0; 127 for (T Shift = std::numeric_limits<T>::digits >> 1; Shift; Shift >>= 1) { 128 T Tmp = Val >> Shift; 129 if (Tmp) 130 Val = Tmp; 131 else 132 ZeroBits |= Shift; 133 } 134 return ZeroBits; 135 } 136 }; 137 138 #if __GNUC__ >= 4 || defined(_MSC_VER) 139 template <typename T> struct LeadingZerosCounter<T, 4> { 140 static std::size_t count(T Val, ZeroBehavior ZB) { 141 if (ZB != ZB_Undefined && Val == 0) 142 return 32; 143 144 #if __has_builtin(__builtin_clz) || LLVM_GNUC_PREREQ(4, 0, 0) 145 return __builtin_clz(Val); 146 #elif defined(_MSC_VER) 147 unsigned long Index; 148 _BitScanReverse(&Index, Val); 149 return Index ^ 31; 150 #endif 151 } 152 }; 153 154 #if !defined(_MSC_VER) || defined(_M_X64) 155 template <typename T> struct LeadingZerosCounter<T, 8> { 156 static std::size_t count(T Val, ZeroBehavior ZB) { 157 if (ZB != ZB_Undefined && Val == 0) 158 return 64; 159 160 #if __has_builtin(__builtin_clzll) || LLVM_GNUC_PREREQ(4, 0, 0) 161 return __builtin_clzll(Val); 162 #elif defined(_MSC_VER) 163 unsigned long Index; 164 _BitScanReverse64(&Index, Val); 165 return Index ^ 63; 166 #endif 167 } 168 }; 169 #endif 170 #endif 171 } // namespace detail 172 173 /// \brief Count number of 0's from the most significant bit to the least 174 /// stopping at the first 1. 175 /// 176 /// Only unsigned integral types are allowed. 177 /// 178 /// \param ZB the behavior on an input of 0. Only ZB_Width and ZB_Undefined are 179 /// valid arguments. 180 template <typename T> 181 std::size_t countLeadingZeros(T Val, ZeroBehavior ZB = ZB_Width) { 182 static_assert(std::numeric_limits<T>::is_integer && 183 !std::numeric_limits<T>::is_signed, 184 "Only unsigned integral types are allowed."); 185 return llvm::detail::LeadingZerosCounter<T, sizeof(T)>::count(Val, ZB); 186 } 187 188 /// \brief Get the index of the first set bit starting from the least 189 /// significant bit. 190 /// 191 /// Only unsigned integral types are allowed. 192 /// 193 /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are 194 /// valid arguments. 195 template <typename T> T findFirstSet(T Val, ZeroBehavior ZB = ZB_Max) { 196 if (ZB == ZB_Max && Val == 0) 197 return std::numeric_limits<T>::max(); 198 199 return countTrailingZeros(Val, ZB_Undefined); 200 } 201 202 /// \brief Create a bitmask with the N right-most bits set to 1, and all other 203 /// bits set to 0. Only unsigned types are allowed. 204 template <typename T> T maskTrailingOnes(unsigned N) { 205 static_assert(std::is_unsigned<T>::value, "Invalid type!"); 206 const unsigned Bits = CHAR_BIT * sizeof(T); 207 assert(N <= Bits && "Invalid bit index"); 208 return N == 0 ? 0 : (T(-1) >> (Bits - N)); 209 } 210 211 /// \brief Create a bitmask with the N left-most bits set to 1, and all other 212 /// bits set to 0. Only unsigned types are allowed. 213 template <typename T> T maskLeadingOnes(unsigned N) { 214 return ~maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N); 215 } 216 217 /// \brief Create a bitmask with the N right-most bits set to 0, and all other 218 /// bits set to 1. Only unsigned types are allowed. 219 template <typename T> T maskTrailingZeros(unsigned N) { 220 return maskLeadingOnes<T>(CHAR_BIT * sizeof(T) - N); 221 } 222 223 /// \brief Create a bitmask with the N left-most bits set to 0, and all other 224 /// bits set to 1. Only unsigned types are allowed. 225 template <typename T> T maskLeadingZeros(unsigned N) { 226 return maskTrailingOnes<T>(CHAR_BIT * sizeof(T) - N); 227 } 228 229 /// \brief Get the index of the last set bit starting from the least 230 /// significant bit. 231 /// 232 /// Only unsigned integral types are allowed. 233 /// 234 /// \param ZB the behavior on an input of 0. Only ZB_Max and ZB_Undefined are 235 /// valid arguments. 236 template <typename T> T findLastSet(T Val, ZeroBehavior ZB = ZB_Max) { 237 if (ZB == ZB_Max && Val == 0) 238 return std::numeric_limits<T>::max(); 239 240 // Use ^ instead of - because both gcc and llvm can remove the associated ^ 241 // in the __builtin_clz intrinsic on x86. 242 return countLeadingZeros(Val, ZB_Undefined) ^ 243 (std::numeric_limits<T>::digits - 1); 244 } 245 246 /// \brief Macro compressed bit reversal table for 256 bits. 247 /// 248 /// http://graphics.stanford.edu/~seander/bithacks.html#BitReverseTable 249 static const unsigned char BitReverseTable256[256] = { 250 #define R2(n) n, n + 2 * 64, n + 1 * 64, n + 3 * 64 251 #define R4(n) R2(n), R2(n + 2 * 16), R2(n + 1 * 16), R2(n + 3 * 16) 252 #define R6(n) R4(n), R4(n + 2 * 4), R4(n + 1 * 4), R4(n + 3 * 4) 253 R6(0), R6(2), R6(1), R6(3) 254 #undef R2 255 #undef R4 256 #undef R6 257 }; 258 259 /// \brief Reverse the bits in \p Val. 260 template <typename T> 261 T reverseBits(T Val) { 262 unsigned char in[sizeof(Val)]; 263 unsigned char out[sizeof(Val)]; 264 std::memcpy(in, &Val, sizeof(Val)); 265 for (unsigned i = 0; i < sizeof(Val); ++i) 266 out[(sizeof(Val) - i) - 1] = BitReverseTable256[in[i]]; 267 std::memcpy(&Val, out, sizeof(Val)); 268 return Val; 269 } 270 271 // NOTE: The following support functions use the _32/_64 extensions instead of 272 // type overloading so that signed and unsigned integers can be used without 273 // ambiguity. 274 275 /// Return the high 32 bits of a 64 bit value. 276 constexpr inline uint32_t Hi_32(uint64_t Value) { 277 return static_cast<uint32_t>(Value >> 32); 278 } 279 280 /// Return the low 32 bits of a 64 bit value. 281 constexpr inline uint32_t Lo_32(uint64_t Value) { 282 return static_cast<uint32_t>(Value); 283 } 284 285 /// Make a 64-bit integer from a high / low pair of 32-bit integers. 286 constexpr inline uint64_t Make_64(uint32_t High, uint32_t Low) { 287 return ((uint64_t)High << 32) | (uint64_t)Low; 288 } 289 290 /// Checks if an integer fits into the given bit width. 291 template <unsigned N> constexpr inline bool isInt(int64_t x) { 292 return N >= 64 || (-(INT64_C(1)<<(N-1)) <= x && x < (INT64_C(1)<<(N-1))); 293 } 294 // Template specializations to get better code for common cases. 295 template <> constexpr inline bool isInt<8>(int64_t x) { 296 return static_cast<int8_t>(x) == x; 297 } 298 template <> constexpr inline bool isInt<16>(int64_t x) { 299 return static_cast<int16_t>(x) == x; 300 } 301 template <> constexpr inline bool isInt<32>(int64_t x) { 302 return static_cast<int32_t>(x) == x; 303 } 304 305 /// Checks if a signed integer is an N bit number shifted left by S. 306 template <unsigned N, unsigned S> 307 constexpr inline bool isShiftedInt(int64_t x) { 308 static_assert( 309 N > 0, "isShiftedInt<0> doesn't make sense (refers to a 0-bit number."); 310 static_assert(N + S <= 64, "isShiftedInt<N, S> with N + S > 64 is too wide."); 311 return isInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0); 312 } 313 314 /// Checks if an unsigned integer fits into the given bit width. 315 /// 316 /// This is written as two functions rather than as simply 317 /// 318 /// return N >= 64 || X < (UINT64_C(1) << N); 319 /// 320 /// to keep MSVC from (incorrectly) warning on isUInt<64> that we're shifting 321 /// left too many places. 322 template <unsigned N> 323 constexpr inline typename std::enable_if<(N < 64), bool>::type 324 isUInt(uint64_t X) { 325 static_assert(N > 0, "isUInt<0> doesn't make sense"); 326 return X < (UINT64_C(1) << (N)); 327 } 328 template <unsigned N> 329 constexpr inline typename std::enable_if<N >= 64, bool>::type 330 isUInt(uint64_t X) { 331 return true; 332 } 333 334 // Template specializations to get better code for common cases. 335 template <> constexpr inline bool isUInt<8>(uint64_t x) { 336 return static_cast<uint8_t>(x) == x; 337 } 338 template <> constexpr inline bool isUInt<16>(uint64_t x) { 339 return static_cast<uint16_t>(x) == x; 340 } 341 template <> constexpr inline bool isUInt<32>(uint64_t x) { 342 return static_cast<uint32_t>(x) == x; 343 } 344 345 /// Checks if a unsigned integer is an N bit number shifted left by S. 346 template <unsigned N, unsigned S> 347 constexpr inline bool isShiftedUInt(uint64_t x) { 348 static_assert( 349 N > 0, "isShiftedUInt<0> doesn't make sense (refers to a 0-bit number)"); 350 static_assert(N + S <= 64, 351 "isShiftedUInt<N, S> with N + S > 64 is too wide."); 352 // Per the two static_asserts above, S must be strictly less than 64. So 353 // 1 << S is not undefined behavior. 354 return isUInt<N + S>(x) && (x % (UINT64_C(1) << S) == 0); 355 } 356 357 /// Gets the maximum value for a N-bit unsigned integer. 358 inline uint64_t maxUIntN(uint64_t N) { 359 assert(N > 0 && N <= 64 && "integer width out of range"); 360 361 // uint64_t(1) << 64 is undefined behavior, so we can't do 362 // (uint64_t(1) << N) - 1 363 // without checking first that N != 64. But this works and doesn't have a 364 // branch. 365 return UINT64_MAX >> (64 - N); 366 } 367 368 /// Gets the minimum value for a N-bit signed integer. 369 inline int64_t minIntN(int64_t N) { 370 assert(N > 0 && N <= 64 && "integer width out of range"); 371 372 return -(UINT64_C(1)<<(N-1)); 373 } 374 375 /// Gets the maximum value for a N-bit signed integer. 376 inline int64_t maxIntN(int64_t N) { 377 assert(N > 0 && N <= 64 && "integer width out of range"); 378 379 // This relies on two's complement wraparound when N == 64, so we convert to 380 // int64_t only at the very end to avoid UB. 381 return (UINT64_C(1) << (N - 1)) - 1; 382 } 383 384 /// Checks if an unsigned integer fits into the given (dynamic) bit width. 385 inline bool isUIntN(unsigned N, uint64_t x) { 386 return N >= 64 || x <= maxUIntN(N); 387 } 388 389 /// Checks if an signed integer fits into the given (dynamic) bit width. 390 inline bool isIntN(unsigned N, int64_t x) { 391 return N >= 64 || (minIntN(N) <= x && x <= maxIntN(N)); 392 } 393 394 /// Return true if the argument is a non-empty sequence of ones starting at the 395 /// least significant bit with the remainder zero (32 bit version). 396 /// Ex. isMask_32(0x0000FFFFU) == true. 397 constexpr inline bool isMask_32(uint32_t Value) { 398 return Value && ((Value + 1) & Value) == 0; 399 } 400 401 /// Return true if the argument is a non-empty sequence of ones starting at the 402 /// least significant bit with the remainder zero (64 bit version). 403 constexpr inline bool isMask_64(uint64_t Value) { 404 return Value && ((Value + 1) & Value) == 0; 405 } 406 407 /// Return true if the argument contains a non-empty sequence of ones with the 408 /// remainder zero (32 bit version.) Ex. isShiftedMask_32(0x0000FF00U) == true. 409 constexpr inline bool isShiftedMask_32(uint32_t Value) { 410 return Value && isMask_32((Value - 1) | Value); 411 } 412 413 /// Return true if the argument contains a non-empty sequence of ones with the 414 /// remainder zero (64 bit version.) 415 constexpr inline bool isShiftedMask_64(uint64_t Value) { 416 return Value && isMask_64((Value - 1) | Value); 417 } 418 419 /// Return true if the argument is a power of two > 0. 420 /// Ex. isPowerOf2_32(0x00100000U) == true (32 bit edition.) 421 constexpr inline bool isPowerOf2_32(uint32_t Value) { 422 return Value && !(Value & (Value - 1)); 423 } 424 425 /// Return true if the argument is a power of two > 0 (64 bit edition.) 426 constexpr inline bool isPowerOf2_64(uint64_t Value) { 427 return Value && !(Value & (Value - 1)); 428 } 429 430 /// Return a byte-swapped representation of the 16-bit argument. 431 inline uint16_t ByteSwap_16(uint16_t Value) { 432 return sys::SwapByteOrder_16(Value); 433 } 434 435 /// Return a byte-swapped representation of the 32-bit argument. 436 inline uint32_t ByteSwap_32(uint32_t Value) { 437 return sys::SwapByteOrder_32(Value); 438 } 439 440 /// Return a byte-swapped representation of the 64-bit argument. 441 inline uint64_t ByteSwap_64(uint64_t Value) { 442 return sys::SwapByteOrder_64(Value); 443 } 444 445 /// \brief Count the number of ones from the most significant bit to the first 446 /// zero bit. 447 /// 448 /// Ex. countLeadingOnes(0xFF0FFF00) == 8. 449 /// Only unsigned integral types are allowed. 450 /// 451 /// \param ZB the behavior on an input of all ones. Only ZB_Width and 452 /// ZB_Undefined are valid arguments. 453 template <typename T> 454 std::size_t countLeadingOnes(T Value, ZeroBehavior ZB = ZB_Width) { 455 static_assert(std::numeric_limits<T>::is_integer && 456 !std::numeric_limits<T>::is_signed, 457 "Only unsigned integral types are allowed."); 458 return countLeadingZeros(~Value, ZB); 459 } 460 461 /// \brief Count the number of ones from the least significant bit to the first 462 /// zero bit. 463 /// 464 /// Ex. countTrailingOnes(0x00FF00FF) == 8. 465 /// Only unsigned integral types are allowed. 466 /// 467 /// \param ZB the behavior on an input of all ones. Only ZB_Width and 468 /// ZB_Undefined are valid arguments. 469 template <typename T> 470 std::size_t countTrailingOnes(T Value, ZeroBehavior ZB = ZB_Width) { 471 static_assert(std::numeric_limits<T>::is_integer && 472 !std::numeric_limits<T>::is_signed, 473 "Only unsigned integral types are allowed."); 474 return countTrailingZeros(~Value, ZB); 475 } 476 477 namespace detail { 478 template <typename T, std::size_t SizeOfT> struct PopulationCounter { 479 static unsigned count(T Value) { 480 // Generic version, forward to 32 bits. 481 static_assert(SizeOfT <= 4, "Not implemented!"); 482 #if __GNUC__ >= 4 483 return __builtin_popcount(Value); 484 #else 485 uint32_t v = Value; 486 v = v - ((v >> 1) & 0x55555555); 487 v = (v & 0x33333333) + ((v >> 2) & 0x33333333); 488 return ((v + (v >> 4) & 0xF0F0F0F) * 0x1010101) >> 24; 489 #endif 490 } 491 }; 492 493 template <typename T> struct PopulationCounter<T, 8> { 494 static unsigned count(T Value) { 495 #if __GNUC__ >= 4 496 return __builtin_popcountll(Value); 497 #else 498 uint64_t v = Value; 499 v = v - ((v >> 1) & 0x5555555555555555ULL); 500 v = (v & 0x3333333333333333ULL) + ((v >> 2) & 0x3333333333333333ULL); 501 v = (v + (v >> 4)) & 0x0F0F0F0F0F0F0F0FULL; 502 return unsigned((uint64_t)(v * 0x0101010101010101ULL) >> 56); 503 #endif 504 } 505 }; 506 } // namespace detail 507 508 /// \brief Count the number of set bits in a value. 509 /// Ex. countPopulation(0xF000F000) = 8 510 /// Returns 0 if the word is zero. 511 template <typename T> 512 inline unsigned countPopulation(T Value) { 513 static_assert(std::numeric_limits<T>::is_integer && 514 !std::numeric_limits<T>::is_signed, 515 "Only unsigned integral types are allowed."); 516 return detail::PopulationCounter<T, sizeof(T)>::count(Value); 517 } 518 519 /// Return the log base 2 of the specified value. 520 inline double Log2(double Value) { 521 #if defined(__ANDROID_API__) && __ANDROID_API__ < 18 522 return __builtin_log(Value) / __builtin_log(2.0); 523 #else 524 return log2(Value); 525 #endif 526 } 527 528 /// Return the floor log base 2 of the specified value, -1 if the value is zero. 529 /// (32 bit edition.) 530 /// Ex. Log2_32(32) == 5, Log2_32(1) == 0, Log2_32(0) == -1, Log2_32(6) == 2 531 inline unsigned Log2_32(uint32_t Value) { 532 return 31 - countLeadingZeros(Value); 533 } 534 535 /// Return the floor log base 2 of the specified value, -1 if the value is zero. 536 /// (64 bit edition.) 537 inline unsigned Log2_64(uint64_t Value) { 538 return 63 - countLeadingZeros(Value); 539 } 540 541 /// Return the ceil log base 2 of the specified value, 32 if the value is zero. 542 /// (32 bit edition). 543 /// Ex. Log2_32_Ceil(32) == 5, Log2_32_Ceil(1) == 0, Log2_32_Ceil(6) == 3 544 inline unsigned Log2_32_Ceil(uint32_t Value) { 545 return 32 - countLeadingZeros(Value - 1); 546 } 547 548 /// Return the ceil log base 2 of the specified value, 64 if the value is zero. 549 /// (64 bit edition.) 550 inline unsigned Log2_64_Ceil(uint64_t Value) { 551 return 64 - countLeadingZeros(Value - 1); 552 } 553 554 /// Return the greatest common divisor of the values using Euclid's algorithm. 555 inline uint64_t GreatestCommonDivisor64(uint64_t A, uint64_t B) { 556 while (B) { 557 uint64_t T = B; 558 B = A % B; 559 A = T; 560 } 561 return A; 562 } 563 564 /// This function takes a 64-bit integer and returns the bit equivalent double. 565 inline double BitsToDouble(uint64_t Bits) { 566 double D; 567 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); 568 memcpy(&D, &Bits, sizeof(Bits)); 569 return D; 570 } 571 572 /// This function takes a 32-bit integer and returns the bit equivalent float. 573 inline float BitsToFloat(uint32_t Bits) { 574 float F; 575 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); 576 memcpy(&F, &Bits, sizeof(Bits)); 577 return F; 578 } 579 580 /// This function takes a double and returns the bit equivalent 64-bit integer. 581 /// Note that copying doubles around changes the bits of NaNs on some hosts, 582 /// notably x86, so this routine cannot be used if these bits are needed. 583 inline uint64_t DoubleToBits(double Double) { 584 uint64_t Bits; 585 static_assert(sizeof(uint64_t) == sizeof(double), "Unexpected type sizes"); 586 memcpy(&Bits, &Double, sizeof(Double)); 587 return Bits; 588 } 589 590 /// This function takes a float and returns the bit equivalent 32-bit integer. 591 /// Note that copying floats around changes the bits of NaNs on some hosts, 592 /// notably x86, so this routine cannot be used if these bits are needed. 593 inline uint32_t FloatToBits(float Float) { 594 uint32_t Bits; 595 static_assert(sizeof(uint32_t) == sizeof(float), "Unexpected type sizes"); 596 memcpy(&Bits, &Float, sizeof(Float)); 597 return Bits; 598 } 599 600 /// A and B are either alignments or offsets. Return the minimum alignment that 601 /// may be assumed after adding the two together. 602 constexpr inline uint64_t MinAlign(uint64_t A, uint64_t B) { 603 // The largest power of 2 that divides both A and B. 604 // 605 // Replace "-Value" by "1+~Value" in the following commented code to avoid 606 // MSVC warning C4146 607 // return (A | B) & -(A | B); 608 return (A | B) & (1 + ~(A | B)); 609 } 610 611 /// \brief Aligns \c Addr to \c Alignment bytes, rounding up. 612 /// 613 /// Alignment should be a power of two. This method rounds up, so 614 /// alignAddr(7, 4) == 8 and alignAddr(8, 4) == 8. 615 inline uintptr_t alignAddr(const void *Addr, size_t Alignment) { 616 assert(Alignment && isPowerOf2_64((uint64_t)Alignment) && 617 "Alignment is not a power of two!"); 618 619 assert((uintptr_t)Addr + Alignment - 1 >= (uintptr_t)Addr); 620 621 return (((uintptr_t)Addr + Alignment - 1) & ~(uintptr_t)(Alignment - 1)); 622 } 623 624 /// \brief Returns the necessary adjustment for aligning \c Ptr to \c Alignment 625 /// bytes, rounding up. 626 inline size_t alignmentAdjustment(const void *Ptr, size_t Alignment) { 627 return alignAddr(Ptr, Alignment) - (uintptr_t)Ptr; 628 } 629 630 /// Returns the next power of two (in 64-bits) that is strictly greater than A. 631 /// Returns zero on overflow. 632 inline uint64_t NextPowerOf2(uint64_t A) { 633 A |= (A >> 1); 634 A |= (A >> 2); 635 A |= (A >> 4); 636 A |= (A >> 8); 637 A |= (A >> 16); 638 A |= (A >> 32); 639 return A + 1; 640 } 641 642 /// Returns the power of two which is less than or equal to the given value. 643 /// Essentially, it is a floor operation across the domain of powers of two. 644 inline uint64_t PowerOf2Floor(uint64_t A) { 645 if (!A) return 0; 646 return 1ull << (63 - countLeadingZeros(A, ZB_Undefined)); 647 } 648 649 /// Returns the power of two which is greater than or equal to the given value. 650 /// Essentially, it is a ceil operation across the domain of powers of two. 651 inline uint64_t PowerOf2Ceil(uint64_t A) { 652 if (!A) 653 return 0; 654 return NextPowerOf2(A - 1); 655 } 656 657 /// Returns the next integer (mod 2**64) that is greater than or equal to 658 /// \p Value and is a multiple of \p Align. \p Align must be non-zero. 659 /// 660 /// If non-zero \p Skew is specified, the return value will be a minimal 661 /// integer that is greater than or equal to \p Value and equal to 662 /// \p Align * N + \p Skew for some integer N. If \p Skew is larger than 663 /// \p Align, its value is adjusted to '\p Skew mod \p Align'. 664 /// 665 /// Examples: 666 /// \code 667 /// alignTo(5, 8) = 8 668 /// alignTo(17, 8) = 24 669 /// alignTo(~0LL, 8) = 0 670 /// alignTo(321, 255) = 510 671 /// 672 /// alignTo(5, 8, 7) = 7 673 /// alignTo(17, 8, 1) = 17 674 /// alignTo(~0LL, 8, 3) = 3 675 /// alignTo(321, 255, 42) = 552 676 /// \endcode 677 inline uint64_t alignTo(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { 678 assert(Align != 0u && "Align can't be 0."); 679 Skew %= Align; 680 return (Value + Align - 1 - Skew) / Align * Align + Skew; 681 } 682 683 /// Returns the next integer (mod 2**64) that is greater than or equal to 684 /// \p Value and is a multiple of \c Align. \c Align must be non-zero. 685 template <uint64_t Align> constexpr inline uint64_t alignTo(uint64_t Value) { 686 static_assert(Align != 0u, "Align must be non-zero"); 687 return (Value + Align - 1) / Align * Align; 688 } 689 690 /// Returns the integer ceil(Numerator / Denominator). 691 inline uint64_t divideCeil(uint64_t Numerator, uint64_t Denominator) { 692 return alignTo(Numerator, Denominator) / Denominator; 693 } 694 695 /// \c alignTo for contexts where a constant expression is required. 696 /// \sa alignTo 697 /// 698 /// \todo FIXME: remove when \c constexpr becomes really \c constexpr 699 template <uint64_t Align> 700 struct AlignTo { 701 static_assert(Align != 0u, "Align must be non-zero"); 702 template <uint64_t Value> 703 struct from_value { 704 static const uint64_t value = (Value + Align - 1) / Align * Align; 705 }; 706 }; 707 708 /// Returns the largest uint64_t less than or equal to \p Value and is 709 /// \p Skew mod \p Align. \p Align must be non-zero 710 inline uint64_t alignDown(uint64_t Value, uint64_t Align, uint64_t Skew = 0) { 711 assert(Align != 0u && "Align can't be 0."); 712 Skew %= Align; 713 return (Value - Skew) / Align * Align + Skew; 714 } 715 716 /// Returns the offset to the next integer (mod 2**64) that is greater than 717 /// or equal to \p Value and is a multiple of \p Align. \p Align must be 718 /// non-zero. 719 inline uint64_t OffsetToAlignment(uint64_t Value, uint64_t Align) { 720 return alignTo(Value, Align) - Value; 721 } 722 723 /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. 724 /// Requires 0 < B <= 32. 725 template <unsigned B> constexpr inline int32_t SignExtend32(uint32_t X) { 726 static_assert(B > 0, "Bit width can't be 0."); 727 static_assert(B <= 32, "Bit width out of range."); 728 return int32_t(X << (32 - B)) >> (32 - B); 729 } 730 731 /// Sign-extend the number in the bottom B bits of X to a 32-bit integer. 732 /// Requires 0 < B < 32. 733 inline int32_t SignExtend32(uint32_t X, unsigned B) { 734 assert(B > 0 && "Bit width can't be 0."); 735 assert(B <= 32 && "Bit width out of range."); 736 return int32_t(X << (32 - B)) >> (32 - B); 737 } 738 739 /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. 740 /// Requires 0 < B < 64. 741 template <unsigned B> constexpr inline int64_t SignExtend64(uint64_t x) { 742 static_assert(B > 0, "Bit width can't be 0."); 743 static_assert(B <= 64, "Bit width out of range."); 744 return int64_t(x << (64 - B)) >> (64 - B); 745 } 746 747 /// Sign-extend the number in the bottom B bits of X to a 64-bit integer. 748 /// Requires 0 < B < 64. 749 inline int64_t SignExtend64(uint64_t X, unsigned B) { 750 assert(B > 0 && "Bit width can't be 0."); 751 assert(B <= 64 && "Bit width out of range."); 752 return int64_t(X << (64 - B)) >> (64 - B); 753 } 754 755 /// Subtract two unsigned integers, X and Y, of type T and return the absolute 756 /// value of the result. 757 template <typename T> 758 typename std::enable_if<std::is_unsigned<T>::value, T>::type 759 AbsoluteDifference(T X, T Y) { 760 return std::max(X, Y) - std::min(X, Y); 761 } 762 763 /// Add two unsigned integers, X and Y, of type T. Clamp the result to the 764 /// maximum representable value of T on overflow. ResultOverflowed indicates if 765 /// the result is larger than the maximum representable value of type T. 766 template <typename T> 767 typename std::enable_if<std::is_unsigned<T>::value, T>::type 768 SaturatingAdd(T X, T Y, bool *ResultOverflowed = nullptr) { 769 bool Dummy; 770 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; 771 // Hacker's Delight, p. 29 772 T Z = X + Y; 773 Overflowed = (Z < X || Z < Y); 774 if (Overflowed) 775 return std::numeric_limits<T>::max(); 776 else 777 return Z; 778 } 779 780 /// Multiply two unsigned integers, X and Y, of type T. Clamp the result to the 781 /// maximum representable value of T on overflow. ResultOverflowed indicates if 782 /// the result is larger than the maximum representable value of type T. 783 template <typename T> 784 typename std::enable_if<std::is_unsigned<T>::value, T>::type 785 SaturatingMultiply(T X, T Y, bool *ResultOverflowed = nullptr) { 786 bool Dummy; 787 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; 788 789 // Hacker's Delight, p. 30 has a different algorithm, but we don't use that 790 // because it fails for uint16_t (where multiplication can have undefined 791 // behavior due to promotion to int), and requires a division in addition 792 // to the multiplication. 793 794 Overflowed = false; 795 796 // Log2(Z) would be either Log2Z or Log2Z + 1. 797 // Special case: if X or Y is 0, Log2_64 gives -1, and Log2Z 798 // will necessarily be less than Log2Max as desired. 799 int Log2Z = Log2_64(X) + Log2_64(Y); 800 const T Max = std::numeric_limits<T>::max(); 801 int Log2Max = Log2_64(Max); 802 if (Log2Z < Log2Max) { 803 return X * Y; 804 } 805 if (Log2Z > Log2Max) { 806 Overflowed = true; 807 return Max; 808 } 809 810 // We're going to use the top bit, and maybe overflow one 811 // bit past it. Multiply all but the bottom bit then add 812 // that on at the end. 813 T Z = (X >> 1) * Y; 814 if (Z & ~(Max >> 1)) { 815 Overflowed = true; 816 return Max; 817 } 818 Z <<= 1; 819 if (X & 1) 820 return SaturatingAdd(Z, Y, ResultOverflowed); 821 822 return Z; 823 } 824 825 /// Multiply two unsigned integers, X and Y, and add the unsigned integer, A to 826 /// the product. Clamp the result to the maximum representable value of T on 827 /// overflow. ResultOverflowed indicates if the result is larger than the 828 /// maximum representable value of type T. 829 template <typename T> 830 typename std::enable_if<std::is_unsigned<T>::value, T>::type 831 SaturatingMultiplyAdd(T X, T Y, T A, bool *ResultOverflowed = nullptr) { 832 bool Dummy; 833 bool &Overflowed = ResultOverflowed ? *ResultOverflowed : Dummy; 834 835 T Product = SaturatingMultiply(X, Y, &Overflowed); 836 if (Overflowed) 837 return Product; 838 839 return SaturatingAdd(A, Product, &Overflowed); 840 } 841 842 /// Use this rather than HUGE_VALF; the latter causes warnings on MSVC. 843 extern const float huge_valf; 844 } // End llvm namespace 845 846 #endif 847