Home | History | Annotate | Download | only in core
      1 
      2 /*
      3  * Copyright 2006 The Android Open Source Project
      4  *
      5  * Use of this source code is governed by a BSD-style license that can be
      6  * found in the LICENSE file.
      7  */
      8 
      9 
     10 #ifndef SkMath_DEFINED
     11 #define SkMath_DEFINED
     12 
     13 #include "SkTypes.h"
     14 
     15 // 64bit -> 32bit utilities
     16 
     17 /**
     18  *  Return true iff the 64bit value can exactly be represented in signed 32bits
     19  */
     20 static inline bool sk_64_isS32(int64_t value) {
     21     return (int32_t)value == value;
     22 }
     23 
     24 /**
     25  *  Return the 64bit argument as signed 32bits, asserting in debug that the arg
     26  *  exactly fits in signed 32bits. In the release build, no checks are preformed
     27  *  and the return value if the arg does not fit is undefined.
     28  */
     29 static inline int32_t sk_64_asS32(int64_t value) {
     30     SkASSERT(sk_64_isS32(value));
     31     return (int32_t)value;
     32 }
     33 
     34 // Handy util that can be passed two ints, and will automatically promote to
     35 // 64bits before the multiply, so the caller doesn't have to remember to cast
     36 // e.g. (int64_t)a * b;
     37 static inline int64_t sk_64_mul(int64_t a, int64_t b) {
     38     return a * b;
     39 }
     40 
     41 ///////////////////////////////////////////////////////////////////////////////
     42 
     43 /**
     44  *  Computes numer1 * numer2 / denom in full 64 intermediate precision.
     45  *  It is an error for denom to be 0. There is no special handling if
     46  *  the result overflows 32bits.
     47  */
     48 static inline int32_t SkMulDiv(int32_t numer1, int32_t numer2, int32_t denom) {
     49     SkASSERT(denom);
     50 
     51     int64_t tmp = sk_64_mul(numer1, numer2) / denom;
     52     return sk_64_asS32(tmp);
     53 }
     54 
     55 /**
     56  *  Computes (numer1 << shift) / denom in full 64 intermediate precision.
     57  *  It is an error for denom to be 0. There is no special handling if
     58  *  the result overflows 32bits.
     59  */
     60 int32_t SkDivBits(int32_t numer, int32_t denom, int shift);
     61 
     62 /**
     63  *  Return the integer square root of value, with a bias of bitBias
     64  */
     65 int32_t SkSqrtBits(int32_t value, int bitBias);
     66 
     67 /** Return the integer square root of n, treated as a SkFixed (16.16)
     68  */
     69 #define SkSqrt32(n)         SkSqrtBits(n, 15)
     70 
     71 //! Returns the number of leading zero bits (0...32)
     72 int SkCLZ_portable(uint32_t);
     73 
     74 #ifndef SkCLZ
     75     #if defined(_MSC_VER) && _MSC_VER >= 1400
     76         #include <intrin.h>
     77 
     78         static inline int SkCLZ(uint32_t mask) {
     79             if (mask) {
     80                 DWORD index;
     81                 _BitScanReverse(&index, mask);
     82                 // Suppress this bogus /analyze warning. The check for non-zero
     83                 // guarantees that _BitScanReverse will succeed.
     84 #pragma warning(suppress : 6102) // Using 'index' from failed function call
     85                 return index ^ 0x1F;
     86             } else {
     87                 return 32;
     88             }
     89         }
     90     #elif defined(SK_CPU_ARM32) || defined(__GNUC__) || defined(__clang__)
     91         static inline int SkCLZ(uint32_t mask) {
     92             // __builtin_clz(0) is undefined, so we have to detect that case.
     93             return mask ? __builtin_clz(mask) : 32;
     94         }
     95     #else
     96         #define SkCLZ(x)    SkCLZ_portable(x)
     97     #endif
     98 #endif
     99 
    100 /**
    101  *  Returns (value < 0 ? 0 : value) efficiently (i.e. no compares or branches)
    102  */
    103 static inline int SkClampPos(int value) {
    104     return value & ~(value >> 31);
    105 }
    106 
    107 /** Given an integer and a positive (max) integer, return the value
    108  *  pinned against 0 and max, inclusive.
    109  *  @param value    The value we want returned pinned between [0...max]
    110  *  @param max      The positive max value
    111  *  @return 0 if value < 0, max if value > max, else value
    112  */
    113 static inline int SkClampMax(int value, int max) {
    114     // ensure that max is positive
    115     SkASSERT(max >= 0);
    116     if (value < 0) {
    117         value = 0;
    118     }
    119     if (value > max) {
    120         value = max;
    121     }
    122     return value;
    123 }
    124 
    125 /**
    126  *  Returns the smallest power-of-2 that is >= the specified value. If value
    127  *  is already a power of 2, then it is returned unchanged. It is undefined
    128  *  if value is <= 0.
    129  */
    130 static inline int SkNextPow2(int value) {
    131     SkASSERT(value > 0);
    132     return 1 << (32 - SkCLZ(value - 1));
    133 }
    134 
    135 /**
    136  *  Returns the log2 of the specified value, were that value to be rounded up
    137  *  to the next power of 2. It is undefined to pass 0. Examples:
    138  *  SkNextLog2(1) -> 0
    139  *  SkNextLog2(2) -> 1
    140  *  SkNextLog2(3) -> 2
    141  *  SkNextLog2(4) -> 2
    142  *  SkNextLog2(5) -> 3
    143  */
    144 static inline int SkNextLog2(uint32_t value) {
    145     SkASSERT(value != 0);
    146     return 32 - SkCLZ(value - 1);
    147 }
    148 
    149 /**
    150  *  Returns true if value is a power of 2. Does not explicitly check for
    151  *  value <= 0.
    152  */
    153 static inline bool SkIsPow2(int value) {
    154     return (value & (value - 1)) == 0;
    155 }
    156 
    157 ///////////////////////////////////////////////////////////////////////////////
    158 
    159 /**
    160  *  SkMulS16(a, b) multiplies a * b, but requires that a and b are both int16_t.
    161  *  With this requirement, we can generate faster instructions on some
    162  *  architectures.
    163  */
    164 #ifdef SK_ARM_HAS_EDSP
    165     static inline int32_t SkMulS16(S16CPU x, S16CPU y) {
    166         SkASSERT((int16_t)x == x);
    167         SkASSERT((int16_t)y == y);
    168         int32_t product;
    169         asm("smulbb %0, %1, %2 \n"
    170             : "=r"(product)
    171             : "r"(x), "r"(y)
    172             );
    173         return product;
    174     }
    175 #else
    176     #ifdef SK_DEBUG
    177         static inline int32_t SkMulS16(S16CPU x, S16CPU y) {
    178             SkASSERT((int16_t)x == x);
    179             SkASSERT((int16_t)y == y);
    180             return x * y;
    181         }
    182     #else
    183         #define SkMulS16(x, y)  ((x) * (y))
    184     #endif
    185 #endif
    186 
    187 /**
    188  *  Return a*b/((1 << shift) - 1), rounding any fractional bits.
    189  *  Only valid if a and b are unsigned and <= 32767 and shift is > 0 and <= 8
    190  */
    191 static inline unsigned SkMul16ShiftRound(U16CPU a, U16CPU b, int shift) {
    192     SkASSERT(a <= 32767);
    193     SkASSERT(b <= 32767);
    194     SkASSERT(shift > 0 && shift <= 8);
    195     unsigned prod = SkMulS16(a, b) + (1 << (shift - 1));
    196     return (prod + (prod >> shift)) >> shift;
    197 }
    198 
    199 /**
    200  *  Return a*b/255, rounding any fractional bits.
    201  *  Only valid if a and b are unsigned and <= 32767.
    202  */
    203 static inline U8CPU SkMulDiv255Round(U16CPU a, U16CPU b) {
    204     SkASSERT(a <= 32767);
    205     SkASSERT(b <= 32767);
    206     unsigned prod = SkMulS16(a, b) + 128;
    207     return (prod + (prod >> 8)) >> 8;
    208 }
    209 
    210 /**
    211  * Stores numer/denom and numer%denom into div and mod respectively.
    212  */
    213 template <typename In, typename Out>
    214 inline void SkTDivMod(In numer, In denom, Out* div, Out* mod) {
    215 #ifdef SK_CPU_ARM32
    216     // If we wrote this as in the else branch, GCC won't fuse the two into one
    217     // divmod call, but rather a div call followed by a divmod.  Silly!  This
    218     // version is just as fast as calling __aeabi_[u]idivmod manually, but with
    219     // prettier code.
    220     //
    221     // This benches as around 2x faster than the code in the else branch.
    222     const In d = numer/denom;
    223     *div = static_cast<Out>(d);
    224     *mod = static_cast<Out>(numer-d*denom);
    225 #else
    226     // On x86 this will just be a single idiv.
    227     *div = static_cast<Out>(numer/denom);
    228     *mod = static_cast<Out>(numer%denom);
    229 #endif
    230 }
    231 
    232 #endif
    233