Home | History | Annotate | Download | only in src
      1 // Copyright 2010 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 // This file is an internal atomic implementation, use atomicops.h instead.
     29 //
     30 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears.
     31 
     32 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
     33 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
     34 
     35 namespace v8 {
     36 namespace internal {
     37 
     38 // 0xffff0fc0 is the hard coded address of a function provided by
     39 // the kernel which implements an atomic compare-exchange. On older
     40 // ARM architecture revisions (pre-v6) this may be implemented using
     41 // a syscall. This address is stable, and in active use (hard coded)
     42 // by at least glibc-2.7 and the Android C library.
     43 typedef Atomic32 (*LinuxKernelCmpxchgFunc)(Atomic32 old_value,
     44                                            Atomic32 new_value,
     45                                            volatile Atomic32* ptr);
     46 LinuxKernelCmpxchgFunc pLinuxKernelCmpxchg __attribute__((weak)) =
     47     (LinuxKernelCmpxchgFunc) 0xffff0fc0;
     48 
     49 typedef void (*LinuxKernelMemoryBarrierFunc)(void);
     50 LinuxKernelMemoryBarrierFunc pLinuxKernelMemoryBarrier __attribute__((weak)) =
     51     (LinuxKernelMemoryBarrierFunc) 0xffff0fa0;
     52 
     53 
     54 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
     55                                          Atomic32 old_value,
     56                                          Atomic32 new_value) {
     57   Atomic32 prev_value = *ptr;
     58   do {
     59     if (!pLinuxKernelCmpxchg(old_value, new_value,
     60                              const_cast<Atomic32*>(ptr))) {
     61       return old_value;
     62     }
     63     prev_value = *ptr;
     64   } while (prev_value == old_value);
     65   return prev_value;
     66 }
     67 
     68 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
     69                                          Atomic32 new_value) {
     70   Atomic32 old_value;
     71   do {
     72     old_value = *ptr;
     73   } while (pLinuxKernelCmpxchg(old_value, new_value,
     74                                const_cast<Atomic32*>(ptr)));
     75   return old_value;
     76 }
     77 
     78 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
     79                                           Atomic32 increment) {
     80   return Barrier_AtomicIncrement(ptr, increment);
     81 }
     82 
     83 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
     84                                         Atomic32 increment) {
     85   for (;;) {
     86     // Atomic exchange the old value with an incremented one.
     87     Atomic32 old_value = *ptr;
     88     Atomic32 new_value = old_value + increment;
     89     if (pLinuxKernelCmpxchg(old_value, new_value,
     90                             const_cast<Atomic32*>(ptr)) == 0) {
     91       // The exchange took place as expected.
     92       return new_value;
     93     }
     94     // Otherwise, *ptr changed mid-loop and we need to retry.
     95   }
     96 }
     97 
     98 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
     99                                        Atomic32 old_value,
    100                                        Atomic32 new_value) {
    101   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    102 }
    103 
    104 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
    105                                        Atomic32 old_value,
    106                                        Atomic32 new_value) {
    107   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    108 }
    109 
    110 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
    111   *ptr = value;
    112 }
    113 
    114 inline void MemoryBarrier() {
    115   pLinuxKernelMemoryBarrier();
    116 }
    117 
    118 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
    119   *ptr = value;
    120   MemoryBarrier();
    121 }
    122 
    123 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
    124   MemoryBarrier();
    125   *ptr = value;
    126 }
    127 
    128 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
    129   return *ptr;
    130 }
    131 
    132 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
    133   Atomic32 value = *ptr;
    134   MemoryBarrier();
    135   return value;
    136 }
    137 
    138 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
    139   MemoryBarrier();
    140   return *ptr;
    141 }
    142 
    143 } }  // namespace v8::internal
    144 
    145 #endif  // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
    146