Home | History | Annotate | Download | only in lib
      1 /*===-- atomic.c - Implement support functions for atomic operations.------===
      2  *
      3  *                     The LLVM Compiler Infrastructure
      4  *
      5  * This file is dual licensed under the MIT and the University of Illinois Open
      6  * Source Licenses. See LICENSE.TXT for details.
      7  *
      8  *===----------------------------------------------------------------------===
      9  *
     10  *  atomic.c defines a set of functions for performing atomic accesses on
     11  *  arbitrary-sized memory locations.  This design uses locks that should
     12  *  be fast in the uncontended case, for two reasons:
     13  *
     14  *  1) This code must work with C programs that do not link to anything
     15  *     (including pthreads) and so it should not depend on any pthread
     16  *     functions.
     17  *  2) Atomic operations, rather than explicit mutexes, are most commonly used
     18  *     on code where contended operations are rate.
     19  *
     20  *  To avoid needing a per-object lock, this code allocates an array of
     21  *  locks and hashes the object pointers to find the one that it should use.
     22  *  For operations that must be atomic on two locations, the lower lock is
     23  *  always acquired first, to avoid deadlock.
     24  *
     25  *===----------------------------------------------------------------------===
     26  */
     27 
     28 #include <stdint.h>
     29 #include <string.h>
     30 
     31 // Clang objects if you redefine a builtin.  This little hack allows us to
     32 // define a function with the same name as an intrinsic.
     33 #pragma redefine_extname __atomic_load_n __atomic_load
     34 #pragma redefine_extname __atomic_store_n __atomic_store
     35 #pragma redefine_extname __atomic_exchange_n __atomic_exchange
     36 #pragma redefine_extname __atomic_compare_exchange_n __atomic_compare_exchange
     37 
     38 /// Number of locks.  This allocates one page on 32-bit platforms, two on
     39 /// 64-bit.  This can be specified externally if a different trade between
     40 /// memory usage and contention probability is required for a given platform.
     41 #ifndef SPINLOCK_COUNT
     42 #define SPINLOCK_COUNT (1<<10)
     43 #endif
     44 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
     45 
     46 ////////////////////////////////////////////////////////////////////////////////
     47 // Platform-specific lock implementation.  Falls back to spinlocks if none is
     48 // defined.  Each platform should define the Lock type, and corresponding
     49 // lock() and unlock() functions.
     50 ////////////////////////////////////////////////////////////////////////////////
     51 #ifdef __FreeBSD__
     52 #include <errno.h>
     53 #include <sys/types.h>
     54 #include <machine/atomic.h>
     55 #include <sys/umtx.h>
     56 typedef struct _usem Lock;
     57 inline static void unlock(Lock *l) {
     58   __atomic_store((_Atomic(uint32_t)*)&l->_count, 1, __ATOMIC_RELEASE);
     59   __atomic_thread_fence(__ATOMIC_SEQ_CST);
     60   if (l->_has_waiters)
     61       _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
     62 }
     63 inline static void lock(Lock *l) {
     64   uint32_t old = 1;
     65   while (!__atomic_compare_exchange_weak((_Atomic(uint32_t)*)&l->_count, &old,
     66         0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
     67     _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
     68     old = 1;
     69   }
     70 }
     71 /// locks for atomic operations
     72 static Lock locks[SPINLOCK_COUNT] = { [0 ...  SPINLOCK_COUNT-1] = {0,1,0} };
     73 #else
     74 typedef _Atomic(uintptr_t) Lock;
     75 /// Unlock a lock.  This is a release operation.
     76 inline static void unlock(Lock *l) {
     77   __atomic_store(l, 0, __ATOMIC_RELEASE);
     78 }
     79 /// Locks a lock.  In the current implementation, this is potentially
     80 /// unbounded in the contended case.
     81 inline static void lock(Lock *l) {
     82   uintptr_t old = 0;
     83   while (!__atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
     84         __ATOMIC_RELAXED))
     85     old = 0;
     86 }
     87 /// locks for atomic operations
     88 static Lock locks[SPINLOCK_COUNT];
     89 #endif
     90 
     91 
     92 /// Returns a lock to use for a given pointer.
     93 static inline Lock *lock_for_pointer(void *ptr) {
     94   intptr_t hash = (intptr_t)ptr;
     95   // Disregard the lowest 4 bits.  We want all values that may be part of the
     96   // same memory operation to hash to the same value and therefore use the same
     97   // lock.
     98   hash >>= 4;
     99   // Use the next bits as the basis for the hash
    100   intptr_t low = hash & SPINLOCK_MASK;
    101   // Now use the high(er) set of bits to perturb the hash, so that we don't
    102   // get collisions from atomic fields in a single object
    103   hash >>= 16;
    104   hash ^= low;
    105   // Return a pointer to the word to use
    106   return locks + (hash & SPINLOCK_MASK);
    107 }
    108 
    109 /// Macros for determining whether a size is lock free.  Clang can not yet
    110 /// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are
    111 /// not lock free.
    112 #define IS_LOCK_FREE_1 __atomic_is_lock_free(1)
    113 #define IS_LOCK_FREE_2 __atomic_is_lock_free(2)
    114 #define IS_LOCK_FREE_4 __atomic_is_lock_free(4)
    115 #define IS_LOCK_FREE_8 __atomic_is_lock_free(8)
    116 #define IS_LOCK_FREE_16 0
    117 
    118 /// Macro that calls the compiler-generated lock-free versions of functions
    119 /// when they exist.
    120 #define LOCK_FREE_CASES() \
    121   do {\
    122   switch (size) {\
    123     case 2:\
    124       if (IS_LOCK_FREE_2) {\
    125         LOCK_FREE_ACTION(uint16_t);\
    126       }\
    127     case 4:\
    128       if (IS_LOCK_FREE_4) {\
    129         LOCK_FREE_ACTION(uint32_t);\
    130       }\
    131     case 8:\
    132       if (IS_LOCK_FREE_8) {\
    133         LOCK_FREE_ACTION(uint64_t);\
    134       }\
    135     case 16:\
    136       if (IS_LOCK_FREE_16) {\
    137         /* FIXME: __uint128_t isn't available on 32 bit platforms.
    138         LOCK_FREE_ACTION(__uint128_t);*/\
    139       }\
    140   }\
    141   } while (0)
    142 
    143 
    144 /// An atomic load operation.  This is atomic with respect to the source
    145 /// pointer only.
    146 void __atomic_load_n(int size, void *src, void *dest, int model) {
    147 #define LOCK_FREE_ACTION(type) \
    148     *((type*)dest) = __atomic_load((_Atomic(type)*)src, model);\
    149     return;
    150   LOCK_FREE_CASES();
    151 #undef LOCK_FREE_ACTION
    152   Lock *l = lock_for_pointer(src);
    153   lock(l);
    154   memcpy(dest, src, size);
    155   unlock(l);
    156 }
    157 
    158 /// An atomic store operation.  This is atomic with respect to the destination
    159 /// pointer only.
    160 void __atomic_store_n(int size, void *dest, void *src, int model) {
    161 #define LOCK_FREE_ACTION(type) \
    162     __atomic_store((_Atomic(type)*)dest, *(type*)dest, model);\
    163     return;
    164   LOCK_FREE_CASES();
    165 #undef LOCK_FREE_ACTION
    166   Lock *l = lock_for_pointer(dest);
    167   lock(l);
    168   memcpy(dest, src, size);
    169   unlock(l);
    170 }
    171 
    172 /// Atomic compare and exchange operation.  If the value at *ptr is identical
    173 /// to the value at *expected, then this copies value at *desired to *ptr.  If
    174 /// they  are not, then this stores the current value from *ptr in *expected.
    175 ///
    176 /// This function returns 1 if the exchange takes place or 0 if it fails.
    177 int __atomic_compare_exchange_n(int size, void *ptr, void *expected,
    178     void *desired, int success, int failure) {
    179 #define LOCK_FREE_ACTION(type) \
    180   return __atomic_compare_exchange_strong((_Atomic(type)*)ptr, (type*)expected,\
    181       *(type*)desired, success, failure)
    182   LOCK_FREE_CASES();
    183 #undef LOCK_FREE_ACTION
    184   Lock *l = lock_for_pointer(ptr);
    185   lock(l);
    186   if (memcmp(ptr, expected, size) == 0) {
    187     memcpy(ptr, desired, size);
    188     unlock(l);
    189     return 1;
    190   }
    191   memcpy(expected, ptr, size);
    192   unlock(l);
    193   return 0;
    194 }
    195 
    196 /// Performs an atomic exchange operation between two pointers.  This is atomic
    197 /// with respect to the target address.
    198 void __atomic_exchange_n(int size, void *ptr, void *val, void *old, int model) {
    199 #define LOCK_FREE_ACTION(type) \
    200     *(type*)old = __atomic_exchange((_Atomic(type)*)ptr, *(type*)val,\
    201         model);\
    202     return;
    203   LOCK_FREE_CASES();
    204 #undef LOCK_FREE_ACTION
    205   Lock *l = lock_for_pointer(ptr);
    206   lock(l);
    207   memcpy(old, ptr, size);
    208   memcpy(ptr, val, size);
    209   unlock(l);
    210 }
    211 
    212 ////////////////////////////////////////////////////////////////////////////////
    213 // Where the size is known at compile time, the compiler may emit calls to
    214 // specialised versions of the above functions.
    215 ////////////////////////////////////////////////////////////////////////////////
    216 #define OPTIMISED_CASES\
    217   OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
    218   OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
    219   OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
    220   OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
    221   /* FIXME: __uint128_t isn't available on 32 bit platforms.
    222   OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)*/\
    223 
    224 #define OPTIMISED_CASE(n, lockfree, type)\
    225 type __atomic_load_##n(type *src, int model) {\
    226   if (lockfree)\
    227     return __atomic_load((_Atomic(type)*)src, model);\
    228   Lock *l = lock_for_pointer(src);\
    229   lock(l);\
    230   type val = *src;\
    231   unlock(l);\
    232   return val;\
    233 }
    234 OPTIMISED_CASES
    235 #undef OPTIMISED_CASE
    236 
    237 #define OPTIMISED_CASE(n, lockfree, type)\
    238 void  __atomic_store_##n(type *dest, type val, int model) {\
    239   if (lockfree) {\
    240     __atomic_store((_Atomic(type)*)dest, val, model);\
    241     return;\
    242   }\
    243   Lock *l = lock_for_pointer(dest);\
    244   lock(l);\
    245   *dest = val;\
    246   unlock(l);\
    247   return;\
    248 }
    249 OPTIMISED_CASES
    250 #undef OPTIMISED_CASE
    251 
    252 #define OPTIMISED_CASE(n, lockfree, type)\
    253 type __atomic_exchange_##n(type *dest, type val, int model) {\
    254   if (lockfree)\
    255     return __atomic_exchange((_Atomic(type)*)dest, val, model);\
    256   Lock *l = lock_for_pointer(dest);\
    257   lock(l);\
    258   type tmp = *dest;\
    259   *dest = val;\
    260   unlock(l);\
    261   return tmp;\
    262 }
    263 OPTIMISED_CASES
    264 #undef OPTIMISED_CASE
    265 
    266 #define OPTIMISED_CASE(n, lockfree, type)\
    267 int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,\
    268     int success, int failure) {\
    269   if (lockfree)\
    270     return __atomic_compare_exchange_strong((_Atomic(type)*)ptr, expected, desired,\
    271         success, failure);\
    272   Lock *l = lock_for_pointer(ptr);\
    273   lock(l);\
    274   if (*ptr == *expected) {\
    275     *ptr = desired;\
    276     unlock(l);\
    277     return 1;\
    278   }\
    279   *expected = *ptr;\
    280   unlock(l);\
    281   return 0;\
    282 }
    283 OPTIMISED_CASES
    284 #undef OPTIMISED_CASE
    285 
    286 ////////////////////////////////////////////////////////////////////////////////
    287 // Atomic read-modify-write operations for integers of various sizes.
    288 ////////////////////////////////////////////////////////////////////////////////
    289 #define ATOMIC_RMW(n, lockfree, type, opname, op) \
    290 type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {\
    291   if (lockfree) \
    292     return __atomic_fetch_##opname((_Atomic(type)*)ptr, val, model);\
    293   Lock *l = lock_for_pointer(ptr);\
    294   lock(l);\
    295   type tmp = *ptr;\
    296   *ptr = tmp op val;\
    297   unlock(l);\
    298   return tmp;\
    299 }
    300 
    301 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
    302 OPTIMISED_CASES
    303 #undef OPTIMISED_CASE
    304 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
    305 OPTIMISED_CASES
    306 #undef OPTIMISED_CASE
    307 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
    308 OPTIMISED_CASES
    309 #undef OPTIMISED_CASE
    310 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
    311 OPTIMISED_CASES
    312 #undef OPTIMISED_CASE
    313 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
    314 OPTIMISED_CASES
    315 #undef OPTIMISED_CASE
    316