Home | History | Annotate | Download | only in lib
      1 /*===-- atomic.c - Implement support functions for atomic operations.------===
      2  *
      3  *                     The LLVM Compiler Infrastructure
      4  *
      5  * This file is dual licensed under the MIT and the University of Illinois Open
      6  * Source Licenses. See LICENSE.TXT for details.
      7  *
      8  *===----------------------------------------------------------------------===
      9  *
     10  *  atomic.c defines a set of functions for performing atomic accesses on
     11  *  arbitrary-sized memory locations.  This design uses locks that should
     12  *  be fast in the uncontended case, for two reasons:
     13  *
     14  *  1) This code must work with C programs that do not link to anything
     15  *     (including pthreads) and so it should not depend on any pthread
     16  *     functions.
     17  *  2) Atomic operations, rather than explicit mutexes, are most commonly used
     18  *     on code where contended operations are rate.
     19  *
     20  *  To avoid needing a per-object lock, this code allocates an array of
     21  *  locks and hashes the object pointers to find the one that it should use.
     22  *  For operations that must be atomic on two locations, the lower lock is
     23  *  always acquired first, to avoid deadlock.
     24  *
     25  *===----------------------------------------------------------------------===
     26  */
     27 
     28 #include <stdint.h>
     29 #include <string.h>
     30 
     31 // Clang objects if you redefine a builtin.  This little hack allows us to
     32 // define a function with the same name as an intrinsic.
     33 #if __APPLE__
     34 // mach-o has extra leading underscore
     35 #pragma redefine_extname __atomic_load_c ___atomic_load
     36 #pragma redefine_extname __atomic_store_c ___atomic_store
     37 #pragma redefine_extname __atomic_exchange_c ___atomic_exchange
     38 #pragma redefine_extname __atomic_compare_exchange_c ___atomic_compare_exchange
     39 #else
     40 #pragma redefine_extname __atomic_load_c __atomic_load
     41 #pragma redefine_extname __atomic_store_c __atomic_store
     42 #pragma redefine_extname __atomic_exchange_c __atomic_exchange
     43 #pragma redefine_extname __atomic_compare_exchange_c __atomic_compare_exchange
     44 #endif
     45 
     46 /// Number of locks.  This allocates one page on 32-bit platforms, two on
     47 /// 64-bit.  This can be specified externally if a different trade between
     48 /// memory usage and contention probability is required for a given platform.
     49 #ifndef SPINLOCK_COUNT
     50 #define SPINLOCK_COUNT (1<<10)
     51 #endif
     52 static const long SPINLOCK_MASK = SPINLOCK_COUNT - 1;
     53 
     54 ////////////////////////////////////////////////////////////////////////////////
     55 // Platform-specific lock implementation.  Falls back to spinlocks if none is
     56 // defined.  Each platform should define the Lock type, and corresponding
     57 // lock() and unlock() functions.
     58 ////////////////////////////////////////////////////////////////////////////////
     59 #ifdef __FreeBSD__
     60 #include <errno.h>
     61 #include <sys/types.h>
     62 #include <machine/atomic.h>
     63 #include <sys/umtx.h>
     64 typedef struct _usem Lock;
     65 inline static void unlock(Lock *l) {
     66   __c11_atomic_store((_Atomic(uint32_t)*)&l->_count, 1, __ATOMIC_RELEASE);
     67   __c11_atomic_thread_fence(__ATOMIC_SEQ_CST);
     68   if (l->_has_waiters)
     69       _umtx_op(l, UMTX_OP_SEM_WAKE, 1, 0, 0);
     70 }
     71 inline static void lock(Lock *l) {
     72   uint32_t old = 1;
     73   while (!__c11_atomic_compare_exchange_weak((_Atomic(uint32_t)*)&l->_count, &old,
     74         0, __ATOMIC_ACQUIRE, __ATOMIC_RELAXED)) {
     75     _umtx_op(l, UMTX_OP_SEM_WAIT, 0, 0, 0);
     76     old = 1;
     77   }
     78 }
     79 /// locks for atomic operations
     80 static Lock locks[SPINLOCK_COUNT] = { [0 ...  SPINLOCK_COUNT-1] = {0,1,0} };
     81 
     82 #elif defined(__APPLE__)
     83 #include <libkern/OSAtomic.h>
     84 typedef OSSpinLock Lock;
     85 inline static void unlock(Lock *l) {
     86   OSSpinLockUnlock(l);
     87 }
     88 /// Locks a lock.  In the current implementation, this is potentially
     89 /// unbounded in the contended case.
     90 inline static void lock(Lock *l) {
     91   OSSpinLockLock(l);
     92 }
     93 static Lock locks[SPINLOCK_COUNT]; // initialized to OS_SPINLOCK_INIT which is 0
     94 
     95 #else
     96 typedef _Atomic(uintptr_t) Lock;
     97 /// Unlock a lock.  This is a release operation.
     98 inline static void unlock(Lock *l) {
     99   __c11_atomic_store(l, 0, __ATOMIC_RELEASE);
    100 }
    101 /// Locks a lock.  In the current implementation, this is potentially
    102 /// unbounded in the contended case.
    103 inline static void lock(Lock *l) {
    104   uintptr_t old = 0;
    105   while (!__c11_atomic_compare_exchange_weak(l, &old, 1, __ATOMIC_ACQUIRE,
    106         __ATOMIC_RELAXED))
    107     old = 0;
    108 }
    109 /// locks for atomic operations
    110 static Lock locks[SPINLOCK_COUNT];
    111 #endif
    112 
    113 
    114 /// Returns a lock to use for a given pointer.
    115 static inline Lock *lock_for_pointer(void *ptr) {
    116   intptr_t hash = (intptr_t)ptr;
    117   // Disregard the lowest 4 bits.  We want all values that may be part of the
    118   // same memory operation to hash to the same value and therefore use the same
    119   // lock.
    120   hash >>= 4;
    121   // Use the next bits as the basis for the hash
    122   intptr_t low = hash & SPINLOCK_MASK;
    123   // Now use the high(er) set of bits to perturb the hash, so that we don't
    124   // get collisions from atomic fields in a single object
    125   hash >>= 16;
    126   hash ^= low;
    127   // Return a pointer to the word to use
    128   return locks + (hash & SPINLOCK_MASK);
    129 }
    130 
    131 /// Macros for determining whether a size is lock free.  Clang can not yet
    132 /// codegen __atomic_is_lock_free(16), so for now we assume 16-byte values are
    133 /// not lock free.
    134 #define IS_LOCK_FREE_1 __c11_atomic_is_lock_free(1)
    135 #define IS_LOCK_FREE_2 __c11_atomic_is_lock_free(2)
    136 #define IS_LOCK_FREE_4 __c11_atomic_is_lock_free(4)
    137 #define IS_LOCK_FREE_8 __c11_atomic_is_lock_free(8)
    138 #define IS_LOCK_FREE_16 0
    139 
    140 /// Macro that calls the compiler-generated lock-free versions of functions
    141 /// when they exist.
    142 #define LOCK_FREE_CASES() \
    143   do {\
    144   switch (size) {\
    145     case 2:\
    146       if (IS_LOCK_FREE_2) {\
    147         LOCK_FREE_ACTION(uint16_t);\
    148       }\
    149     case 4:\
    150       if (IS_LOCK_FREE_4) {\
    151         LOCK_FREE_ACTION(uint32_t);\
    152       }\
    153     case 8:\
    154       if (IS_LOCK_FREE_8) {\
    155         LOCK_FREE_ACTION(uint64_t);\
    156       }\
    157     case 16:\
    158       if (IS_LOCK_FREE_16) {\
    159         /* FIXME: __uint128_t isn't available on 32 bit platforms.
    160         LOCK_FREE_ACTION(__uint128_t);*/\
    161       }\
    162   }\
    163   } while (0)
    164 
    165 
    166 /// An atomic load operation.  This is atomic with respect to the source
    167 /// pointer only.
    168 void __atomic_load_c(int size, void *src, void *dest, int model) {
    169 #define LOCK_FREE_ACTION(type) \
    170     *((type*)dest) = __c11_atomic_load((_Atomic(type)*)src, model);\
    171     return;
    172   LOCK_FREE_CASES();
    173 #undef LOCK_FREE_ACTION
    174   Lock *l = lock_for_pointer(src);
    175   lock(l);
    176   memcpy(dest, src, size);
    177   unlock(l);
    178 }
    179 
    180 /// An atomic store operation.  This is atomic with respect to the destination
    181 /// pointer only.
    182 void __atomic_store_c(int size, void *dest, void *src, int model) {
    183 #define LOCK_FREE_ACTION(type) \
    184     __c11_atomic_store((_Atomic(type)*)dest, *(type*)dest, model);\
    185     return;
    186   LOCK_FREE_CASES();
    187 #undef LOCK_FREE_ACTION
    188   Lock *l = lock_for_pointer(dest);
    189   lock(l);
    190   memcpy(dest, src, size);
    191   unlock(l);
    192 }
    193 
    194 /// Atomic compare and exchange operation.  If the value at *ptr is identical
    195 /// to the value at *expected, then this copies value at *desired to *ptr.  If
    196 /// they  are not, then this stores the current value from *ptr in *expected.
    197 ///
    198 /// This function returns 1 if the exchange takes place or 0 if it fails.
    199 int __atomic_compare_exchange_c(int size, void *ptr, void *expected,
    200     void *desired, int success, int failure) {
    201 #define LOCK_FREE_ACTION(type) \
    202   return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, (type*)expected,\
    203       *(type*)desired, success, failure)
    204   LOCK_FREE_CASES();
    205 #undef LOCK_FREE_ACTION
    206   Lock *l = lock_for_pointer(ptr);
    207   lock(l);
    208   if (memcmp(ptr, expected, size) == 0) {
    209     memcpy(ptr, desired, size);
    210     unlock(l);
    211     return 1;
    212   }
    213   memcpy(expected, ptr, size);
    214   unlock(l);
    215   return 0;
    216 }
    217 
    218 /// Performs an atomic exchange operation between two pointers.  This is atomic
    219 /// with respect to the target address.
    220 void __atomic_exchange_c(int size, void *ptr, void *val, void *old, int model) {
    221 #define LOCK_FREE_ACTION(type) \
    222     *(type*)old = __c11_atomic_exchange((_Atomic(type)*)ptr, *(type*)val,\
    223         model);\
    224     return;
    225   LOCK_FREE_CASES();
    226 #undef LOCK_FREE_ACTION
    227   Lock *l = lock_for_pointer(ptr);
    228   lock(l);
    229   memcpy(old, ptr, size);
    230   memcpy(ptr, val, size);
    231   unlock(l);
    232 }
    233 
    234 ////////////////////////////////////////////////////////////////////////////////
    235 // Where the size is known at compile time, the compiler may emit calls to
    236 // specialised versions of the above functions.
    237 ////////////////////////////////////////////////////////////////////////////////
    238 #define OPTIMISED_CASES\
    239   OPTIMISED_CASE(1, IS_LOCK_FREE_1, uint8_t)\
    240   OPTIMISED_CASE(2, IS_LOCK_FREE_2, uint16_t)\
    241   OPTIMISED_CASE(4, IS_LOCK_FREE_4, uint32_t)\
    242   OPTIMISED_CASE(8, IS_LOCK_FREE_8, uint64_t)\
    243   /* FIXME: __uint128_t isn't available on 32 bit platforms.
    244   OPTIMISED_CASE(16, IS_LOCK_FREE_16, __uint128_t)*/\
    245 
    246 #define OPTIMISED_CASE(n, lockfree, type)\
    247 type __atomic_load_##n(type *src, int model) {\
    248   if (lockfree)\
    249     return __c11_atomic_load((_Atomic(type)*)src, model);\
    250   Lock *l = lock_for_pointer(src);\
    251   lock(l);\
    252   type val = *src;\
    253   unlock(l);\
    254   return val;\
    255 }
    256 OPTIMISED_CASES
    257 #undef OPTIMISED_CASE
    258 
    259 #define OPTIMISED_CASE(n, lockfree, type)\
    260 void  __atomic_store_##n(type *dest, type val, int model) {\
    261   if (lockfree) {\
    262     __c11_atomic_store((_Atomic(type)*)dest, val, model);\
    263     return;\
    264   }\
    265   Lock *l = lock_for_pointer(dest);\
    266   lock(l);\
    267   *dest = val;\
    268   unlock(l);\
    269   return;\
    270 }
    271 OPTIMISED_CASES
    272 #undef OPTIMISED_CASE
    273 
    274 #define OPTIMISED_CASE(n, lockfree, type)\
    275 type __atomic_exchange_##n(type *dest, type val, int model) {\
    276   if (lockfree)\
    277     return __c11_atomic_exchange((_Atomic(type)*)dest, val, model);\
    278   Lock *l = lock_for_pointer(dest);\
    279   lock(l);\
    280   type tmp = *dest;\
    281   *dest = val;\
    282   unlock(l);\
    283   return tmp;\
    284 }
    285 OPTIMISED_CASES
    286 #undef OPTIMISED_CASE
    287 
    288 #define OPTIMISED_CASE(n, lockfree, type)\
    289 int __atomic_compare_exchange_##n(type *ptr, type *expected, type desired,\
    290     int success, int failure) {\
    291   if (lockfree)\
    292     return __c11_atomic_compare_exchange_strong((_Atomic(type)*)ptr, expected, desired,\
    293         success, failure);\
    294   Lock *l = lock_for_pointer(ptr);\
    295   lock(l);\
    296   if (*ptr == *expected) {\
    297     *ptr = desired;\
    298     unlock(l);\
    299     return 1;\
    300   }\
    301   *expected = *ptr;\
    302   unlock(l);\
    303   return 0;\
    304 }
    305 OPTIMISED_CASES
    306 #undef OPTIMISED_CASE
    307 
    308 ////////////////////////////////////////////////////////////////////////////////
    309 // Atomic read-modify-write operations for integers of various sizes.
    310 ////////////////////////////////////////////////////////////////////////////////
    311 #define ATOMIC_RMW(n, lockfree, type, opname, op) \
    312 type __atomic_fetch_##opname##_##n(type *ptr, type val, int model) {\
    313   if (lockfree) \
    314     return __c11_atomic_fetch_##opname((_Atomic(type)*)ptr, val, model);\
    315   Lock *l = lock_for_pointer(ptr);\
    316   lock(l);\
    317   type tmp = *ptr;\
    318   *ptr = tmp op val;\
    319   unlock(l);\
    320   return tmp;\
    321 }
    322 
    323 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, add, +)
    324 OPTIMISED_CASES
    325 #undef OPTIMISED_CASE
    326 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, sub, -)
    327 OPTIMISED_CASES
    328 #undef OPTIMISED_CASE
    329 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, and, &)
    330 OPTIMISED_CASES
    331 #undef OPTIMISED_CASE
    332 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, or, |)
    333 OPTIMISED_CASES
    334 #undef OPTIMISED_CASE
    335 #define OPTIMISED_CASE(n, lockfree, type) ATOMIC_RMW(n, lockfree, type, xor, ^)
    336 OPTIMISED_CASES
    337 #undef OPTIMISED_CASE
    338