Home | History | Annotate | Download | only in libutils
      1 /*
      2  * Copyright (C) 2011 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #define __STDC_LIMIT_MACROS
     18 
     19 #include <assert.h>
     20 #include <stdint.h>
     21 
     22 #include <utils/LinearTransform.h>
     23 
     24 // disable sanitize as these functions may intentionally overflow (see comments below).
     25 // the ifdef can be removed when host builds use clang.
     26 #if defined(__clang__)
     27 #define ATTRIBUTE_NO_SANITIZE_INTEGER __attribute__((no_sanitize("integer")))
     28 #else
     29 #define ATTRIBUTE_NO_SANITIZE_INTEGER
     30 #endif
     31 
     32 namespace android {
     33 
     34 // sanitize failure with T = int32_t and x = 0x80000000
     35 template<class T>
     36 ATTRIBUTE_NO_SANITIZE_INTEGER
     37 static inline T ABS(T x) { return (x < 0) ? -x : x; }
     38 
     39 // Static math methods involving linear transformations
     40 // remote sanitize failure on overflow case.
     41 ATTRIBUTE_NO_SANITIZE_INTEGER
     42 static bool scale_u64_to_u64(
     43         uint64_t val,
     44         uint32_t N,
     45         uint32_t D,
     46         uint64_t* res,
     47         bool round_up_not_down) {
     48     uint64_t tmp1, tmp2;
     49     uint32_t r;
     50 
     51     assert(res);
     52     assert(D);
     53 
     54     // Let U32(X) denote a uint32_t containing the upper 32 bits of a 64 bit
     55     // integer X.
     56     // Let L32(X) denote a uint32_t containing the lower 32 bits of a 64 bit
     57     // integer X.
     58     // Let X[A, B] with A <= B denote bits A through B of the integer X.
     59     // Let (A | B) denote the concatination of two 32 bit ints, A and B.
     60     // IOW X = (A | B) => U32(X) == A && L32(X) == B
     61     //
     62     // compute M = val * N (a 96 bit int)
     63     // ---------------------------------
     64     // tmp2 = U32(val) * N (a 64 bit int)
     65     // tmp1 = L32(val) * N (a 64 bit int)
     66     // which means
     67     // M = val * N = (tmp2 << 32) + tmp1
     68     tmp2 = (val >> 32) * N;
     69     tmp1 = (val & UINT32_MAX) * N;
     70 
     71     // compute M[32, 95]
     72     // tmp2 = tmp2 + U32(tmp1)
     73     //      = (U32(val) * N) + U32(L32(val) * N)
     74     //      = M[32, 95]
     75     tmp2 += tmp1 >> 32;
     76 
     77     // if M[64, 95] >= D, then M/D has bits > 63 set and we have
     78     // an overflow.
     79     if ((tmp2 >> 32) >= D) {
     80         *res = UINT64_MAX;
     81         return false;
     82     }
     83 
     84     // Divide.  Going in we know
     85     // tmp2 = M[32, 95]
     86     // U32(tmp2) < D
     87     r = tmp2 % D;
     88     tmp2 /= D;
     89 
     90     // At this point
     91     // tmp1      = L32(val) * N
     92     // tmp2      = M[32, 95] / D
     93     //           = (M / D)[32, 95]
     94     // r         = M[32, 95] % D
     95     // U32(tmp2) = 0
     96     //
     97     // compute tmp1 = (r | M[0, 31])
     98     tmp1 = (tmp1 & UINT32_MAX) | ((uint64_t)r << 32);
     99 
    100     // Divide again.  Keep the remainder around in order to round properly.
    101     r = tmp1 % D;
    102     tmp1 /= D;
    103 
    104     // At this point
    105     // tmp2      = (M / D)[32, 95]
    106     // tmp1      = (M / D)[ 0, 31]
    107     // r         =  M % D
    108     // U32(tmp1) = 0
    109     // U32(tmp2) = 0
    110 
    111     // Pack the result and deal with the round-up case (As well as the
    112     // remote possiblility over overflow in such a case).
    113     *res = (tmp2 << 32) | tmp1;
    114     if (r && round_up_not_down) {
    115         ++(*res);
    116         if (!(*res)) {
    117             *res = UINT64_MAX;
    118             return false;
    119         }
    120     }
    121 
    122     return true;
    123 }
    124 
    125 // at least one known sanitize failure (see comment below)
    126 ATTRIBUTE_NO_SANITIZE_INTEGER
    127 static bool linear_transform_s64_to_s64(
    128         int64_t  val,
    129         int64_t  basis1,
    130         int32_t  N,
    131         uint32_t D,
    132         bool     invert_frac,
    133         int64_t  basis2,
    134         int64_t* out) {
    135     uint64_t scaled, res;
    136     uint64_t abs_val;
    137     bool is_neg;
    138 
    139     if (!out)
    140         return false;
    141 
    142     // Compute abs(val - basis_64). Keep track of whether or not this delta
    143     // will be negative after the scale opertaion.
    144     if (val < basis1) {
    145         is_neg = true;
    146         abs_val = basis1 - val;
    147     } else {
    148         is_neg = false;
    149         abs_val = val - basis1;
    150     }
    151 
    152     if (N < 0)
    153         is_neg = !is_neg;
    154 
    155     if (!scale_u64_to_u64(abs_val,
    156                           invert_frac ? D : ABS(N),
    157                           invert_frac ? ABS(N) : D,
    158                           &scaled,
    159                           is_neg))
    160         return false; // overflow/undeflow
    161 
    162     // if scaled is >= 0x8000<etc>, then we are going to overflow or
    163     // underflow unless ABS(basis2) is large enough to pull us back into the
    164     // non-overflow/underflow region.
    165     if (scaled & INT64_MIN) {
    166         if (is_neg && (basis2 < 0))
    167             return false; // certain underflow
    168 
    169         if (!is_neg && (basis2 >= 0))
    170             return false; // certain overflow
    171 
    172         if (ABS(basis2) <= static_cast<int64_t>(scaled & INT64_MAX))
    173             return false; // not enough
    174 
    175         // Looks like we are OK
    176         *out = (is_neg ? (-scaled) : scaled) + basis2;
    177     } else {
    178         // Scaled fits within signed bounds, so we just need to check for
    179         // over/underflow for two signed integers.  Basically, if both scaled
    180         // and basis2 have the same sign bit, and the result has a different
    181         // sign bit, then we have under/overflow.  An easy way to compute this
    182         // is
    183         // (scaled_signbit XNOR basis_signbit) &&
    184         // (scaled_signbit XOR res_signbit)
    185         // ==
    186         // (scaled_signbit XOR basis_signbit XOR 1) &&
    187         // (scaled_signbit XOR res_signbit)
    188 
    189         if (is_neg)
    190             scaled = -scaled; // known sanitize failure
    191         res = scaled + basis2;
    192 
    193         if ((scaled ^ basis2 ^ INT64_MIN) & (scaled ^ res) & INT64_MIN)
    194             return false;
    195 
    196         *out = res;
    197     }
    198 
    199     return true;
    200 }
    201 
    202 bool LinearTransform::doForwardTransform(int64_t a_in, int64_t* b_out) const {
    203     if (0 == a_to_b_denom)
    204         return false;
    205 
    206     return linear_transform_s64_to_s64(a_in,
    207                                        a_zero,
    208                                        a_to_b_numer,
    209                                        a_to_b_denom,
    210                                        false,
    211                                        b_zero,
    212                                        b_out);
    213 }
    214 
    215 bool LinearTransform::doReverseTransform(int64_t b_in, int64_t* a_out) const {
    216     if (0 == a_to_b_numer)
    217         return false;
    218 
    219     return linear_transform_s64_to_s64(b_in,
    220                                        b_zero,
    221                                        a_to_b_numer,
    222                                        a_to_b_denom,
    223                                        true,
    224                                        a_zero,
    225                                        a_out);
    226 }
    227 
    228 template <class T> void LinearTransform::reduce(T* N, T* D) {
    229     T a, b;
    230     if (!N || !D || !(*D)) {
    231         assert(false);
    232         return;
    233     }
    234 
    235     a = *N;
    236     b = *D;
    237 
    238     if (a == 0) {
    239         *D = 1;
    240         return;
    241     }
    242 
    243     // This implements Euclid's method to find GCD.
    244     if (a < b) {
    245         T tmp = a;
    246         a = b;
    247         b = tmp;
    248     }
    249 
    250     while (1) {
    251         // a is now the greater of the two.
    252         const T remainder = a % b;
    253         if (remainder == 0) {
    254             *N /= b;
    255             *D /= b;
    256             return;
    257         }
    258         // by swapping remainder and b, we are guaranteeing that a is
    259         // still the greater of the two upon entrance to the loop.
    260         a = b;
    261         b = remainder;
    262     }
    263 };
    264 
    265 template void LinearTransform::reduce<uint64_t>(uint64_t* N, uint64_t* D);
    266 template void LinearTransform::reduce<uint32_t>(uint32_t* N, uint32_t* D);
    267 
    268 // sanitize failure if *N = 0x80000000
    269 ATTRIBUTE_NO_SANITIZE_INTEGER
    270 void LinearTransform::reduce(int32_t* N, uint32_t* D) {
    271     if (N && D && *D) {
    272         if (*N < 0) {
    273             *N = -(*N);
    274             reduce(reinterpret_cast<uint32_t*>(N), D);
    275             *N = -(*N);
    276         } else {
    277             reduce(reinterpret_cast<uint32_t*>(N), D);
    278         }
    279     }
    280 }
    281 
    282 }  // namespace android
    283