Home | History | Annotate | Download | only in core
      1 
      2 /*
      3  * Copyright 2006 The Android Open Source Project
      4  *
      5  * Use of this source code is governed by a BSD-style license that can be
      6  * found in the LICENSE file.
      7  */
      8 
      9 
     10 #ifndef SkThread_platform_DEFINED
     11 #define SkThread_platform_DEFINED
     12 
     13 #if defined(SK_BUILD_FOR_ANDROID)
     14 
     15 #if defined(SK_BUILD_FOR_ANDROID_NDK)
     16 
     17 #include <stdint.h>
     18 
     19 /* Just use the GCC atomic intrinsics. They're supported by the NDK toolchain,
     20  * have reasonable performance, and provide full memory barriers
     21  */
     22 static inline __attribute__((always_inline)) int32_t sk_atomic_inc(int32_t *addr) {
     23     return __sync_fetch_and_add(addr, 1);
     24 }
     25 
     26 static inline __attribute__((always_inline)) int32_t sk_atomic_add(int32_t *addr, int32_t inc) {
     27     return __sync_fetch_and_add(addr, inc);
     28 }
     29 
     30 static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t *addr) {
     31     return __sync_fetch_and_add(addr, -1);
     32 }
     33 static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_dec() { }
     34 
     35 static inline __attribute__((always_inline)) int32_t sk_atomic_conditional_inc(int32_t* addr) {
     36     int32_t value = *addr;
     37 
     38     while (true) {
     39         if (value == 0) {
     40             return 0;
     41         }
     42 
     43         int32_t before = __sync_val_compare_and_swap(addr, value, value + 1);
     44 
     45         if (before == value) {
     46             return value;
     47         } else {
     48             value = before;
     49         }
     50     }
     51 }
     52 static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_conditional_inc() { }
     53 
     54 #else // !SK_BUILD_FOR_ANDROID_NDK
     55 
     56 /* The platform atomics operations are slightly more efficient than the
     57  * GCC built-ins, so use them.
     58  */
     59 #include <utils/Atomic.h>
     60 
     61 #define sk_atomic_inc(addr)         android_atomic_inc(addr)
     62 #define sk_atomic_add(addr, inc)    android_atomic_add(inc, addr)
     63 #define sk_atomic_dec(addr)         android_atomic_dec(addr)
     64 
     65 static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_dec() {
     66     //HACK: Android is actually using full memory barriers.
     67     //      Should this change, uncomment below.
     68     //int dummy;
     69     //android_atomic_aquire_store(0, &dummy);
     70 }
     71 static inline __attribute__((always_inline)) int32_t sk_atomic_conditional_inc(int32_t* addr) {
     72     while (true) {
     73         int32_t value = *addr;
     74         if (value == 0) {
     75             return 0;
     76         }
     77         if (0 == android_atomic_release_cas(value, value + 1, addr)) {
     78             return value;
     79         }
     80     }
     81 }
     82 static inline __attribute__((always_inline)) void sk_membar_aquire__after_atomic_conditional_inc() {
     83     //HACK: Android is actually using full memory barriers.
     84     //      Should this change, uncomment below.
     85     //int dummy;
     86     //android_atomic_aquire_store(0, &dummy);
     87 }
     88 
     89 #endif // !SK_BUILD_FOR_ANDROID_NDK
     90 
     91 #else  // !SK_BUILD_FOR_ANDROID
     92 
     93 /** Implemented by the porting layer, this function adds one to the int
     94     specified by the address (in a thread-safe manner), and returns the
     95     previous value.
     96     No additional memory barrier is required.
     97     This must act as a compiler barrier.
     98 */
     99 SK_API int32_t sk_atomic_inc(int32_t* addr);
    100 
    101 /** Implemented by the porting layer, this function adds inc to the int
    102     specified by the address (in a thread-safe manner), and returns the
    103     previous value.
    104     No additional memory barrier is required.
    105     This must act as a compiler barrier.
    106  */
    107 SK_API int32_t sk_atomic_add(int32_t* addr, int32_t inc);
    108 
    109 /** Implemented by the porting layer, this function subtracts one from the int
    110     specified by the address (in a thread-safe manner), and returns the
    111     previous value.
    112     Expected to act as a release (SL/S) memory barrier and a compiler barrier.
    113 */
    114 SK_API int32_t sk_atomic_dec(int32_t* addr);
    115 /** If sk_atomic_dec does not act as an aquire (L/SL) barrier, this is expected
    116     to act as an aquire (L/SL) memory barrier and as a compiler barrier.
    117 */
    118 SK_API void sk_membar_aquire__after_atomic_dec();
    119 
    120 /** Implemented by the porting layer, this function adds one to the int
    121     specified by the address iff the int specified by the address is not zero
    122     (in a thread-safe manner), and returns the previous value.
    123     No additional memory barrier is required.
    124     This must act as a compiler barrier.
    125 */
    126 SK_API int32_t sk_atomic_conditional_inc(int32_t*);
    127 /** If sk_atomic_conditional_inc does not act as an aquire (L/SL) barrier, this
    128     is expected to act as an aquire (L/SL) memory barrier and as a compiler
    129     barrier.
    130 */
    131 SK_API void sk_membar_aquire__after_atomic_conditional_inc();
    132 
    133 #endif // !SK_BUILD_FOR_ANDROID
    134 
    135 #ifdef SK_USE_POSIX_THREADS
    136 
    137 #include <pthread.h>
    138 
    139 // A SkBaseMutex is a POD structure that can be directly initialized
    140 // at declaration time with SK_DECLARE_STATIC/GLOBAL_MUTEX. This avoids the
    141 // generation of a static initializer in the final machine code (and
    142 // a corresponding static finalizer).
    143 //
    144 struct SkBaseMutex {
    145     void    acquire() { pthread_mutex_lock(&fMutex); }
    146     void    release() { pthread_mutex_unlock(&fMutex); }
    147     pthread_mutex_t  fMutex;
    148 };
    149 
    150 // Using POD-style initialization prevents the generation of a static initializer
    151 // and keeps the acquire() implementation small and fast.
    152 #define SK_DECLARE_STATIC_MUTEX(name)   static SkBaseMutex  name = { PTHREAD_MUTEX_INITIALIZER }
    153 
    154 // Special case used when the static mutex must be available globally.
    155 #define SK_DECLARE_GLOBAL_MUTEX(name)   SkBaseMutex  name = { PTHREAD_MUTEX_INITIALIZER }
    156 
    157 #define SK_DECLARE_MUTEX_ARRAY(name, count)    SkBaseMutex name[count] = { { PTHREAD_MUTEX_INITIALIZER } }
    158 
    159 // A normal mutex that requires to be initialized through normal C++ construction,
    160 // i.e. when it's a member of another class, or allocated on the heap.
    161 class SK_API SkMutex : public SkBaseMutex, SkNoncopyable {
    162 public:
    163     SkMutex();
    164     ~SkMutex();
    165 };
    166 
    167 #else // !SK_USE_POSIX_THREADS
    168 
    169 // In the generic case, SkBaseMutex and SkMutex are the same thing, and we
    170 // can't easily get rid of static initializers.
    171 //
    172 class SK_API SkMutex : SkNoncopyable {
    173 public:
    174     SkMutex();
    175     ~SkMutex();
    176 
    177     void    acquire();
    178     void    release();
    179 
    180 private:
    181     bool fIsGlobal;
    182     enum {
    183         kStorageIntCount = 64
    184     };
    185     uint32_t    fStorage[kStorageIntCount];
    186 };
    187 
    188 typedef SkMutex SkBaseMutex;
    189 
    190 #define SK_DECLARE_STATIC_MUTEX(name)           static SkBaseMutex  name
    191 #define SK_DECLARE_GLOBAL_MUTEX(name)           SkBaseMutex  name
    192 #define SK_DECLARE_MUTEX_ARRAY(name, count)     SkBaseMutex name[count]
    193 
    194 #endif // !SK_USE_POSIX_THREADS
    195 
    196 
    197 #endif
    198