Home | History | Annotate | Download | only in src
      1 // Copyright 2010 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 // The routines exported by this module are subtle.  If you use them, even if
     29 // you get the code right, it will depend on careful reasoning about atomicity
     30 // and memory ordering; it will be less readable, and harder to maintain.  If
     31 // you plan to use these routines, you should have a good reason, such as solid
     32 // evidence that performance would otherwise suffer, or there being no
     33 // alternative.  You should assume only properties explicitly guaranteed by the
     34 // specifications in this file.  You are almost certainly _not_ writing code
     35 // just for the x86; if you assume x86 semantics, x86 hardware bugs and
     36 // implementations on other archtectures will cause your code to break.  If you
     37 // do not know what you are doing, avoid these routines, and use a Mutex.
     38 //
     39 // It is incorrect to make direct assignments to/from an atomic variable.
     40 // You should use one of the Load or Store routines.  The NoBarrier
     41 // versions are provided when no barriers are needed:
     42 //   NoBarrier_Store()
     43 //   NoBarrier_Load()
     44 // Although there are currently no compiler enforcement, you are encouraged
     45 // to use these.
     46 //
     47 
     48 #ifndef V8_ATOMICOPS_H_
     49 #define V8_ATOMICOPS_H_
     50 
     51 #include "../include/v8.h"
     52 #include "globals.h"
     53 
     54 namespace v8 {
     55 namespace internal {
     56 
     57 typedef int32_t Atomic32;
     58 #ifdef V8_HOST_ARCH_64_BIT
     59 // We need to be able to go between Atomic64 and AtomicWord implicitly.  This
     60 // means Atomic64 and AtomicWord should be the same type on 64-bit.
     61 #if defined(__APPLE__)
     62 // MacOS is an exception to the implicit conversion rule above,
     63 // because it uses long for intptr_t.
     64 typedef int64_t Atomic64;
     65 #else
     66 typedef intptr_t Atomic64;
     67 #endif
     68 #endif
     69 
     70 // Use AtomicWord for a machine-sized pointer.  It will use the Atomic32 or
     71 // Atomic64 routines below, depending on your architecture.
     72 typedef intptr_t AtomicWord;
     73 
     74 // Atomically execute:
     75 //      result = *ptr;
     76 //      if (*ptr == old_value)
     77 //        *ptr = new_value;
     78 //      return result;
     79 //
     80 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
     81 // Always return the old value of "*ptr"
     82 //
     83 // This routine implies no memory barriers.
     84 Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
     85                                   Atomic32 old_value,
     86                                   Atomic32 new_value);
     87 
     88 // Atomically store new_value into *ptr, returning the previous value held in
     89 // *ptr.  This routine implies no memory barriers.
     90 Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value);
     91 
     92 // Atomically increment *ptr by "increment".  Returns the new value of
     93 // *ptr with the increment applied.  This routine implies no memory barriers.
     94 Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment);
     95 
     96 Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
     97                                  Atomic32 increment);
     98 
     99 // These following lower-level operations are typically useful only to people
    100 // implementing higher-level synchronization operations like spinlocks,
    101 // mutexes, and condition-variables.  They combine CompareAndSwap(), a load, or
    102 // a store with appropriate memory-ordering instructions.  "Acquire" operations
    103 // ensure that no later memory access can be reordered ahead of the operation.
    104 // "Release" operations ensure that no previous memory access can be reordered
    105 // after the operation.  "Barrier" operations have both "Acquire" and "Release"
    106 // semantics.   A MemoryBarrier() has "Barrier" semantics, but does no memory
    107 // access.
    108 Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
    109                                 Atomic32 old_value,
    110                                 Atomic32 new_value);
    111 Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
    112                                 Atomic32 old_value,
    113                                 Atomic32 new_value);
    114 
    115 void MemoryBarrier();
    116 void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value);
    117 void Acquire_Store(volatile Atomic32* ptr, Atomic32 value);
    118 void Release_Store(volatile Atomic32* ptr, Atomic32 value);
    119 
    120 Atomic32 NoBarrier_Load(volatile const Atomic32* ptr);
    121 Atomic32 Acquire_Load(volatile const Atomic32* ptr);
    122 Atomic32 Release_Load(volatile const Atomic32* ptr);
    123 
    124 // 64-bit atomic operations (only available on 64-bit processors).
    125 #ifdef V8_HOST_ARCH_64_BIT
    126 Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
    127                                   Atomic64 old_value,
    128                                   Atomic64 new_value);
    129 Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value);
    130 Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
    131 Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment);
    132 
    133 Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
    134                                 Atomic64 old_value,
    135                                 Atomic64 new_value);
    136 Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
    137                                 Atomic64 old_value,
    138                                 Atomic64 new_value);
    139 void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value);
    140 void Acquire_Store(volatile Atomic64* ptr, Atomic64 value);
    141 void Release_Store(volatile Atomic64* ptr, Atomic64 value);
    142 Atomic64 NoBarrier_Load(volatile const Atomic64* ptr);
    143 Atomic64 Acquire_Load(volatile const Atomic64* ptr);
    144 Atomic64 Release_Load(volatile const Atomic64* ptr);
    145 #endif  // V8_HOST_ARCH_64_BIT
    146 
    147 } }  // namespace v8::internal
    148 
    149 // Include our platform specific implementation.
    150 #if defined(_MSC_VER) && \
    151   (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
    152 #include "atomicops_internals_x86_msvc.h"
    153 #elif defined(__APPLE__) && \
    154   (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
    155 #include "atomicops_internals_x86_macosx.h"
    156 #elif defined(__GNUC__) && \
    157   (defined(V8_HOST_ARCH_IA32) || defined(V8_HOST_ARCH_X64))
    158 #include "atomicops_internals_x86_gcc.h"
    159 #elif defined(__GNUC__) && defined(V8_HOST_ARCH_ARM)
    160 #include "atomicops_internals_arm_gcc.h"
    161 #elif defined(__GNUC__) && defined(V8_HOST_ARCH_MIPS)
    162 #include "atomicops_internals_mips_gcc.h"
    163 #else
    164 #error "Atomic operations are not supported on your platform"
    165 #endif
    166 
    167 #endif  // V8_ATOMICOPS_H_
    168