Home | History | Annotate | Download | only in core
      1 /*
      2  * Copyright 2015 Google Inc.
      3  *
      4  * Use of this source code is governed by a BSD-style license that can be
      5  * found in the LICENSE file.
      6  */
      7 
      8 #ifndef SkAtomics_DEFINED
      9 #define SkAtomics_DEFINED
     10 
     11 // This file is not part of the public Skia API.
     12 #include "SkTypes.h"
     13 
     14 enum sk_memory_order {
     15     sk_memory_order_relaxed,
     16     sk_memory_order_consume,
     17     sk_memory_order_acquire,
     18     sk_memory_order_release,
     19     sk_memory_order_acq_rel,
     20     sk_memory_order_seq_cst,
     21 };
     22 
     23 template <typename T>
     24 T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);
     25 
     26 template <typename T>
     27 void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);
     28 
     29 template <typename T>
     30 T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);
     31 
     32 template <typename T>
     33 bool sk_atomic_compare_exchange(T*, T* expected, T desired,
     34                                 sk_memory_order success = sk_memory_order_seq_cst,
     35                                 sk_memory_order failure = sk_memory_order_seq_cst);
     36 
     37 template <typename T>
     38 T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst);
     39 
     40 // A little wrapper class for small T (think, builtins: int, float, void*) to
     41 // ensure they're always used atomically.  This is our stand-in for std::atomic<T>.
     42 template <typename T>
     43 class SkAtomic : SkNoncopyable {
     44 public:
     45     SkAtomic() {}
     46 
     47     // It is essential we return by value rather than by const&.  fVal may change at any time.
     48     T load(sk_memory_order mo = sk_memory_order_seq_cst) const {
     49         return sk_atomic_load(&fVal, mo);
     50     }
     51 
     52     void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
     53         sk_atomic_store(&fVal, val, mo);
     54     }
     55 
     56     bool compare_exchange(T* expected, const T& desired,
     57                           sk_memory_order success = sk_memory_order_seq_cst,
     58                           sk_memory_order failure = sk_memory_order_seq_cst) {
     59         return sk_atomic_compare_exchange(&fVal, expected, desired, success, failure);
     60     }
     61 private:
     62     T fVal;
     63 };
     64 
     65 #if defined(_MSC_VER)
     66     #include "../ports/SkAtomics_std.h"
     67 #elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED)
     68     #include "../ports/SkAtomics_atomic.h"
     69 #else
     70     #include "../ports/SkAtomics_sync.h"
     71 #endif
     72 
     73 // From here down we have shims for our old atomics API, to be weaned off of.
     74 // We use the default sequentially-consistent memory order to make things simple
     75 // and to match the practical reality of our old _sync and _win implementations.
     76 
     77 inline int32_t sk_atomic_inc(int32_t* ptr)            { return sk_atomic_fetch_add(ptr, +1); }
     78 inline int32_t sk_atomic_dec(int32_t* ptr)            { return sk_atomic_fetch_add(ptr, -1); }
     79 inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_add(ptr,  v); }
     80 
     81 inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>(ptr, +1); }
     82 
     83 inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) {
     84     return sk_atomic_compare_exchange(ptr, &expected, desired);
     85 }
     86 
     87 inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) {
     88     (void)sk_atomic_compare_exchange(ptr, &expected, desired);
     89     return expected;
     90 }
     91 
     92 inline int32_t sk_atomic_conditional_inc(int32_t* ptr) {
     93     int32_t prev = sk_atomic_load(ptr);
     94     do {
     95         if (0 == prev) {
     96             break;
     97         }
     98     } while(!sk_atomic_compare_exchange(ptr, &prev, prev+1));
     99     return prev;
    100 }
    101 
    102 template <typename T>
    103 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); }
    104 
    105 template <typename T>
    106 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order_release); }
    107 
    108 inline void sk_membar_acquire__after_atomic_dec() {}
    109 inline void sk_membar_acquire__after_atomic_conditional_inc() {}
    110 
    111 #endif//SkAtomics_DEFINED
    112