Home | History | Annotate | Download | only in base
      1 /* Copyright (c) 2006, Google Inc.
      2  * All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are
      6  * met:
      7  *
      8  *     * Redistributions of source code must retain the above copyright
      9  * notice, this list of conditions and the following disclaimer.
     10  *     * Redistributions in binary form must reproduce the above
     11  * copyright notice, this list of conditions and the following disclaimer
     12  * in the documentation and/or other materials provided with the
     13  * distribution.
     14  *     * Neither the name of Google Inc. nor the names of its
     15  * contributors may be used to endorse or promote products derived from
     16  * this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 // Implementation of atomic operations for Mac OS X.  This file should not
     32 // be included directly.  Clients should instead include
     33 // "base/atomicops.h".
     34 
     35 #ifndef BASE_ATOMICOPS_INTERNALS_MACOSX_H_
     36 #define BASE_ATOMICOPS_INTERNALS_MACOSX_H_
     37 
     38 typedef int32_t Atomic32;
     39 
     40 // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different
     41 // on the Mac, even when they are the same size.  Similarly, on __ppc64__,
     42 // AtomicWord and Atomic64 are always different.  Thus, we need explicit
     43 // casting.
     44 #ifdef __LP64__
     45 #define AtomicWordCastType base::subtle::Atomic64
     46 #else
     47 #define AtomicWordCastType Atomic32
     48 #endif
     49 
     50 #if defined(__LP64__) || defined(__i386__)
     51 #define BASE_HAS_ATOMIC64 1  // Use only in tests and base/atomic*
     52 #endif
     53 
     54 #include <libkern/OSAtomic.h>
     55 
     56 namespace base {
     57 namespace subtle {
     58 
     59 #if !defined(__LP64__) && defined(__ppc__)
     60 
     61 // The Mac 64-bit OSAtomic implementations are not available for 32-bit PowerPC,
     62 // while the underlying assembly instructions are available only some
     63 // implementations of PowerPC.
     64 
     65 // The following inline functions will fail with the error message at compile
     66 // time ONLY IF they are called.  So it is safe to use this header if user
     67 // code only calls AtomicWord and Atomic32 operations.
     68 //
     69 // NOTE(vchen): Implementation notes to implement the atomic ops below may
     70 // be found in "PowerPC Virtual Environment Architecture, Book II,
     71 // Version 2.02", January 28, 2005, Appendix B, page 46.  Unfortunately,
     72 // extra care must be taken to ensure data are properly 8-byte aligned, and
     73 // that data are returned correctly according to Mac OS X ABI specs.
     74 
     75 inline int64_t OSAtomicCompareAndSwap64(
     76     int64_t oldValue, int64_t newValue, int64_t *theValue) {
     77   __asm__ __volatile__(
     78       "_OSAtomicCompareAndSwap64_not_supported_for_32_bit_ppc\n\t");
     79   return 0;
     80 }
     81 
     82 inline int64_t OSAtomicAdd64(int64_t theAmount, int64_t *theValue) {
     83   __asm__ __volatile__(
     84       "_OSAtomicAdd64_not_supported_for_32_bit_ppc\n\t");
     85   return 0;
     86 }
     87 
     88 inline int64_t OSAtomicCompareAndSwap64Barrier(
     89     int64_t oldValue, int64_t newValue, int64_t *theValue) {
     90   int64_t prev = OSAtomicCompareAndSwap64(oldValue, newValue, theValue);
     91   OSMemoryBarrier();
     92   return prev;
     93 }
     94 
     95 inline int64_t OSAtomicAdd64Barrier(
     96     int64_t theAmount, int64_t *theValue) {
     97   int64_t new_val = OSAtomicAdd64(theAmount, theValue);
     98   OSMemoryBarrier();
     99   return new_val;
    100 }
    101 #endif
    102 
    103 typedef int64_t Atomic64;
    104 
    105 inline void MemoryBarrier() {
    106   OSMemoryBarrier();
    107 }
    108 
    109 // 32-bit Versions.
    110 
    111 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
    112                                          Atomic32 old_value,
    113                                          Atomic32 new_value) {
    114   Atomic32 prev_value;
    115   do {
    116     if (OSAtomicCompareAndSwap32(old_value, new_value,
    117                                  const_cast<Atomic32*>(ptr))) {
    118       return old_value;
    119     }
    120     prev_value = *ptr;
    121   } while (prev_value == old_value);
    122   return prev_value;
    123 }
    124 
    125 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
    126                                          Atomic32 new_value) {
    127   Atomic32 old_value;
    128   do {
    129     old_value = *ptr;
    130   } while (!OSAtomicCompareAndSwap32(old_value, new_value,
    131                                      const_cast<Atomic32*>(ptr)));
    132   return old_value;
    133 }
    134 
    135 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
    136                                           Atomic32 increment) {
    137   return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr));
    138 }
    139 
    140 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
    141                                           Atomic32 increment) {
    142   return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr));
    143 }
    144 
    145 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
    146                                        Atomic32 old_value,
    147                                        Atomic32 new_value) {
    148   Atomic32 prev_value;
    149   do {
    150     if (OSAtomicCompareAndSwap32Barrier(old_value, new_value,
    151                                         const_cast<Atomic32*>(ptr))) {
    152       return old_value;
    153     }
    154     prev_value = *ptr;
    155   } while (prev_value == old_value);
    156   return prev_value;
    157 }
    158 
    159 inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
    160                                        Atomic32 old_value,
    161                                        Atomic32 new_value) {
    162   return Acquire_CompareAndSwap(ptr, old_value, new_value);
    163 }
    164 
    165 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
    166   *ptr = value;
    167 }
    168 
    169 inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
    170   *ptr = value;
    171   MemoryBarrier();
    172 }
    173 
    174 inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
    175   MemoryBarrier();
    176   *ptr = value;
    177 }
    178 
    179 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
    180   return *ptr;
    181 }
    182 
    183 inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
    184   Atomic32 value = *ptr;
    185   MemoryBarrier();
    186   return value;
    187 }
    188 
    189 inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
    190   MemoryBarrier();
    191   return *ptr;
    192 }
    193 
    194 // 64-bit version
    195 
    196 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
    197                                          Atomic64 old_value,
    198                                          Atomic64 new_value) {
    199   Atomic64 prev_value;
    200   do {
    201     if (OSAtomicCompareAndSwap64(old_value, new_value,
    202                                  const_cast<Atomic64*>(ptr))) {
    203       return old_value;
    204     }
    205     prev_value = *ptr;
    206   } while (prev_value == old_value);
    207   return prev_value;
    208 }
    209 
    210 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
    211                                          Atomic64 new_value) {
    212   Atomic64 old_value;
    213   do {
    214     old_value = *ptr;
    215   } while (!OSAtomicCompareAndSwap64(old_value, new_value,
    216                                      const_cast<Atomic64*>(ptr)));
    217   return old_value;
    218 }
    219 
    220 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
    221                                           Atomic64 increment) {
    222   return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr));
    223 }
    224 
    225 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
    226                                         Atomic64 increment) {
    227   return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr));
    228 }
    229 
    230 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
    231                                        Atomic64 old_value,
    232                                        Atomic64 new_value) {
    233   Atomic64 prev_value;
    234   do {
    235     if (OSAtomicCompareAndSwap64Barrier(old_value, new_value,
    236                                         const_cast<Atomic64*>(ptr))) {
    237       return old_value;
    238     }
    239     prev_value = *ptr;
    240   } while (prev_value == old_value);
    241   return prev_value;
    242 }
    243 
    244 inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
    245                                        Atomic64 old_value,
    246                                        Atomic64 new_value) {
    247   // The lib kern interface does not distinguish between
    248   // Acquire and Release memory barriers; they are equivalent.
    249   return Acquire_CompareAndSwap(ptr, old_value, new_value);
    250 }
    251 
    252 #ifdef __LP64__
    253 
    254 // 64-bit implementation on 64-bit platform
    255 
    256 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
    257   *ptr = value;
    258 }
    259 
    260 inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
    261   *ptr = value;
    262   MemoryBarrier();
    263 }
    264 
    265 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
    266   MemoryBarrier();
    267   *ptr = value;
    268 }
    269 
    270 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
    271   return *ptr;
    272 }
    273 
    274 inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
    275   Atomic64 value = *ptr;
    276   MemoryBarrier();
    277   return value;
    278 }
    279 
    280 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
    281   MemoryBarrier();
    282   return *ptr;
    283 }
    284 
    285 #else
    286 
    287 // 64-bit implementation on 32-bit platform
    288 
    289 #if defined(__ppc__)
    290 
    291 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
    292    __asm__ __volatile__(
    293        "_NoBarrier_Store_not_supported_for_32_bit_ppc\n\t");
    294 }
    295 
    296 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
    297    __asm__ __volatile__(
    298        "_NoBarrier_Load_not_supported_for_32_bit_ppc\n\t");
    299    return 0;
    300 }
    301 
    302 #elif defined(__i386__)
    303 
    304 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
    305   __asm__ __volatile__("movq %1, %%mm0\n\t"    // Use mmx reg for 64-bit atomic
    306                        "movq %%mm0, %0\n\t"  // moves (ptr could be read-only)
    307                        "emms\n\t"              // Reset FP registers
    308                        : "=m" (*ptr)
    309                        : "m" (value)
    310                        : // mark the FP stack and mmx registers as clobbered
    311                          "st", "st(1)", "st(2)", "st(3)", "st(4)",
    312                          "st(5)", "st(6)", "st(7)", "mm0", "mm1",
    313                          "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
    314 
    315 }
    316 
    317 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
    318   Atomic64 value;
    319   __asm__ __volatile__("movq %1, %%mm0\n\t"  // Use mmx reg for 64-bit atomic
    320                        "movq %%mm0, %0\n\t"  // moves (ptr could be read-only)
    321                        "emms\n\t"            // Reset FP registers
    322                        : "=m" (value)
    323                        : "m" (*ptr)
    324                        : // mark the FP stack and mmx registers as clobbered
    325                          "st", "st(1)", "st(2)", "st(3)", "st(4)",
    326                          "st(5)", "st(6)", "st(7)", "mm0", "mm1",
    327                          "mm2", "mm3", "mm4", "mm5", "mm6", "mm7");
    328 
    329   return value;
    330 }
    331 #endif
    332 
    333 
    334 inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
    335   NoBarrier_Store(ptr, value);
    336   MemoryBarrier();
    337 }
    338 
    339 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
    340   MemoryBarrier();
    341   NoBarrier_Store(ptr, value);
    342 }
    343 
    344 inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
    345   Atomic64 value = NoBarrier_Load(ptr);
    346   MemoryBarrier();
    347   return value;
    348 }
    349 
    350 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
    351   MemoryBarrier();
    352   return NoBarrier_Load(ptr);
    353 }
    354 #endif  // __LP64__
    355 
    356 }   // namespace base::subtle
    357 }   // namespace base
    358 
    359 #endif  // BASE_ATOMICOPS_INTERNALS_MACOSX_H_
    360