1 // Copyright 2013, ARM Limited 2 // All rights reserved. 3 // 4 // Redistribution and use in source and binary forms, with or without 5 // modification, are permitted provided that the following conditions are met: 6 // 7 // * Redistributions of source code must retain the above copyright notice, 8 // this list of conditions and the following disclaimer. 9 // * Redistributions in binary form must reproduce the above copyright notice, 10 // this list of conditions and the following disclaimer in the documentation 11 // and/or other materials provided with the distribution. 12 // * Neither the name of ARM Limited nor the names of its contributors may be 13 // used to endorse or promote products derived from this software without 14 // specific prior written permission. 15 // 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS CONTRIBUTORS "AS IS" AND 17 // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED 18 // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE 19 // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE 20 // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR 22 // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER 23 // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 24 // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 27 #ifndef VIXL_UTILS_H 28 #define VIXL_UTILS_H 29 30 #include <cmath> 31 #include <string.h> 32 #include "globals-vixl.h" 33 34 namespace vixl { 35 36 // Check number width. 37 inline bool is_intn(unsigned n, int64_t x) { 38 VIXL_ASSERT((0 < n) && (n < 64)); 39 int64_t limit = INT64_C(1) << (n - 1); 40 return (-limit <= x) && (x < limit); 41 } 42 43 inline bool is_uintn(unsigned n, int64_t x) { 44 VIXL_ASSERT((0 < n) && (n < 64)); 45 return !(x >> n); 46 } 47 48 inline unsigned truncate_to_intn(unsigned n, int64_t x) { 49 VIXL_ASSERT((0 < n) && (n < 64)); 50 return (x & ((INT64_C(1) << n) - 1)); 51 } 52 53 #define INT_1_TO_63_LIST(V) \ 54 V(1) V(2) V(3) V(4) V(5) V(6) V(7) V(8) \ 55 V(9) V(10) V(11) V(12) V(13) V(14) V(15) V(16) \ 56 V(17) V(18) V(19) V(20) V(21) V(22) V(23) V(24) \ 57 V(25) V(26) V(27) V(28) V(29) V(30) V(31) V(32) \ 58 V(33) V(34) V(35) V(36) V(37) V(38) V(39) V(40) \ 59 V(41) V(42) V(43) V(44) V(45) V(46) V(47) V(48) \ 60 V(49) V(50) V(51) V(52) V(53) V(54) V(55) V(56) \ 61 V(57) V(58) V(59) V(60) V(61) V(62) V(63) 62 63 #define DECLARE_IS_INT_N(N) \ 64 inline bool is_int##N(int64_t x) { return is_intn(N, x); } 65 #define DECLARE_IS_UINT_N(N) \ 66 inline bool is_uint##N(int64_t x) { return is_uintn(N, x); } 67 #define DECLARE_TRUNCATE_TO_INT_N(N) \ 68 inline int truncate_to_int##N(int x) { return truncate_to_intn(N, x); } 69 INT_1_TO_63_LIST(DECLARE_IS_INT_N) 70 INT_1_TO_63_LIST(DECLARE_IS_UINT_N) 71 INT_1_TO_63_LIST(DECLARE_TRUNCATE_TO_INT_N) 72 #undef DECLARE_IS_INT_N 73 #undef DECLARE_IS_UINT_N 74 #undef DECLARE_TRUNCATE_TO_INT_N 75 76 // Bit field extraction. 77 inline uint32_t unsigned_bitextract_32(int msb, int lsb, uint32_t x) { 78 return (x >> lsb) & ((1 << (1 + msb - lsb)) - 1); 79 } 80 81 inline uint64_t unsigned_bitextract_64(int msb, int lsb, uint64_t x) { 82 return (x >> lsb) & ((static_cast<uint64_t>(1) << (1 + msb - lsb)) - 1); 83 } 84 85 inline int32_t signed_bitextract_32(int msb, int lsb, int32_t x) { 86 return (x << (31 - msb)) >> (lsb + 31 - msb); 87 } 88 89 inline int64_t signed_bitextract_64(int msb, int lsb, int64_t x) { 90 return (x << (63 - msb)) >> (lsb + 63 - msb); 91 } 92 93 // Floating point representation. 94 uint32_t float_to_rawbits(float value); 95 uint64_t double_to_rawbits(double value); 96 float rawbits_to_float(uint32_t bits); 97 double rawbits_to_double(uint64_t bits); 98 99 100 // NaN tests. 101 inline bool IsSignallingNaN(double num) { 102 const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); 103 uint64_t raw = double_to_rawbits(num); 104 if (std::isnan(num) && ((raw & kFP64QuietNaNMask) == 0)) { 105 return true; 106 } 107 return false; 108 } 109 110 111 inline bool IsSignallingNaN(float num) { 112 const uint32_t kFP32QuietNaNMask = 0x00400000; 113 uint32_t raw = float_to_rawbits(num); 114 if (std::isnan(num) && ((raw & kFP32QuietNaNMask) == 0)) { 115 return true; 116 } 117 return false; 118 } 119 120 121 template <typename T> 122 inline bool IsQuietNaN(T num) { 123 return std::isnan(num) && !IsSignallingNaN(num); 124 } 125 126 127 // Convert the NaN in 'num' to a quiet NaN. 128 inline double ToQuietNaN(double num) { 129 const uint64_t kFP64QuietNaNMask = UINT64_C(0x0008000000000000); 130 VIXL_ASSERT(isnan(num)); 131 return rawbits_to_double(double_to_rawbits(num) | kFP64QuietNaNMask); 132 } 133 134 135 inline float ToQuietNaN(float num) { 136 const uint32_t kFP32QuietNaNMask = 0x00400000; 137 VIXL_ASSERT(isnan(num)); 138 return rawbits_to_float(float_to_rawbits(num) | kFP32QuietNaNMask); 139 } 140 141 142 // Fused multiply-add. 143 inline double FusedMultiplyAdd(double op1, double op2, double a) { 144 return fma(op1, op2, a); 145 } 146 147 148 inline float FusedMultiplyAdd(float op1, float op2, float a) { 149 return fmaf(op1, op2, a); 150 } 151 152 153 // Bit counting. 154 int CountLeadingZeros(uint64_t value, int width); 155 int CountLeadingSignBits(int64_t value, int width); 156 int CountTrailingZeros(uint64_t value, int width); 157 int CountSetBits(uint64_t value, int width); 158 159 // Pointer alignment 160 // TODO: rename/refactor to make it specific to instructions. 161 template<typename T> 162 bool IsWordAligned(T pointer) { 163 VIXL_ASSERT(sizeof(pointer) == sizeof(intptr_t)); // NOLINT(runtime/sizeof) 164 return (reinterpret_cast<intptr_t>(pointer) & 3) == 0; 165 } 166 167 // Increment a pointer until it has the specified alignment. 168 template<class T> 169 T AlignUp(T pointer, size_t alignment) { 170 VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(uintptr_t)); 171 uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer); 172 size_t align_step = (alignment - pointer_raw) % alignment; 173 VIXL_ASSERT((pointer_raw + align_step) % alignment == 0); 174 return reinterpret_cast<T>(pointer_raw + align_step); 175 } 176 177 // Decrement a pointer until it has the specified alignment. 178 template<class T> 179 T AlignDown(T pointer, size_t alignment) { 180 VIXL_STATIC_ASSERT(sizeof(pointer) == sizeof(uintptr_t)); 181 uintptr_t pointer_raw = reinterpret_cast<uintptr_t>(pointer); 182 size_t align_step = pointer_raw % alignment; 183 VIXL_ASSERT((pointer_raw - align_step) % alignment == 0); 184 return reinterpret_cast<T>(pointer_raw - align_step); 185 } 186 187 188 } // namespace vixl 189 190 #endif // VIXL_UTILS_H 191