Home | History | Annotate | Download | only in src
      1 // Copyright 2010 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 // This file is an internal atomic implementation, use atomicops.h instead.
     29 
     30 #ifndef V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
     31 #define V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
     32 
     33 #include "checks.h"
     34 #include "win32-headers.h"
     35 
     36 namespace v8 {
     37 namespace internal {
     38 
     39 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
     40                                          Atomic32 old_value,
     41                                          Atomic32 new_value) {
     42   LONG result = InterlockedCompareExchange(
     43       reinterpret_cast<volatile LONG*>(ptr),
     44       static_cast<LONG>(new_value),
     45       static_cast<LONG>(old_value));
     46   return static_cast<Atomic32>(result);
     47 }
     48 
     49 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
     50                                          Atomic32 new_value) {
     51   LONG result = InterlockedExchange(
     52       reinterpret_cast<volatile LONG*>(ptr),
     53       static_cast<LONG>(new_value));
     54   return static_cast<Atomic32>(result);
     55 }
     56 
     57 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
     58                                         Atomic32 increment) {
     59   return InterlockedExchangeAdd(
     60       reinterpret_cast<volatile LONG*>(ptr),
     61       static_cast<LONG>(increment)) + increment;
     62 }
     63 
     64 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
     65                                           Atomic32 increment) {
     66   return Barrier_AtomicIncrement(ptr, increment);
     67 }
     68 
     69 #if !(defined(_MSC_VER) && _MSC_VER >= 1400)
     70 #error "We require at least vs2005 for MemoryBarrier"
     71 #endif
     72 inline void MemoryBarrier() {
     73   // We use MemoryBarrier from WinNT.h
     74   ::MemoryBarrier();
     75 }
     76 
     77 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
     78                                        Atomic32 old_value,
     79                                        Atomic32 new_value) {
     80   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     81 }
     82 
     83 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
     84                                        Atomic32 old_value,
     85                                        Atomic32 new_value) {
     86   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     87 }
     88 
     89 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
     90   *ptr = value;
     91 }
     92 
     93 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
     94   NoBarrier_AtomicExchange(ptr, value);
     95               // acts as a barrier in this implementation
     96 }
     97 
     98 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
     99   *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
    100   // See comments in Atomic64 version of Release_Store() below.
    101 }
    102 
    103 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
    104   return *ptr;
    105 }
    106 
    107 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
    108   Atomic32 value = *ptr;
    109   return value;
    110 }
    111 
    112 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
    113   MemoryBarrier();
    114   return *ptr;
    115 }
    116 
    117 #if defined(_WIN64)
    118 
    119 // 64-bit low-level operations on 64-bit platform.
    120 
    121 STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID));
    122 
    123 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
    124                                          Atomic64 old_value,
    125                                          Atomic64 new_value) {
    126   PVOID result = InterlockedCompareExchangePointer(
    127     reinterpret_cast<volatile PVOID*>(ptr),
    128     reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
    129   return reinterpret_cast<Atomic64>(result);
    130 }
    131 
    132 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
    133                                          Atomic64 new_value) {
    134   PVOID result = InterlockedExchangePointer(
    135     reinterpret_cast<volatile PVOID*>(ptr),
    136     reinterpret_cast<PVOID>(new_value));
    137   return reinterpret_cast<Atomic64>(result);
    138 }
    139 
    140 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
    141                                         Atomic64 increment) {
    142   return InterlockedExchangeAdd64(
    143       reinterpret_cast<volatile LONGLONG*>(ptr),
    144       static_cast<LONGLONG>(increment)) + increment;
    145 }
    146 
    147 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
    148                                           Atomic64 increment) {
    149   return Barrier_AtomicIncrement(ptr, increment);
    150 }
    151 
    152 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
    153   *ptr = value;
    154 }
    155 
    156 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
    157   NoBarrier_AtomicExchange(ptr, value);
    158               // acts as a barrier in this implementation
    159 }
    160 
    161 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
    162   *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
    163 
    164   // When new chips come out, check:
    165   //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
    166   //  System Programming Guide, Chatper 7: Multiple-processor management,
    167   //  Section 7.2, Memory Ordering.
    168   // Last seen at:
    169   //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
    170 }
    171 
    172 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
    173   return *ptr;
    174 }
    175 
    176 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
    177   Atomic64 value = *ptr;
    178   return value;
    179 }
    180 
    181 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
    182   MemoryBarrier();
    183   return *ptr;
    184 }
    185 
    186 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
    187                                        Atomic64 old_value,
    188                                        Atomic64 new_value) {
    189   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    190 }
    191 
    192 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
    193                                        Atomic64 old_value,
    194                                        Atomic64 new_value) {
    195   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    196 }
    197 
    198 
    199 #endif  // defined(_WIN64)
    200 
    201 } }  // namespace v8::internal
    202 
    203 #endif  // V8_ATOMICOPS_INTERNALS_X86_MSVC_H_
    204