Home | History | Annotate | Download | only in base
      1 // Copyright 2010 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 // This file is an internal atomic implementation, use base/atomicops.h instead.
      6 
      7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
      8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
      9 
     10 #include "src/base/macros.h"
     11 #include "src/base/win32-headers.h"
     12 
     13 #if defined(V8_HOST_ARCH_64_BIT)
     14 // windows.h #defines this (only on x64). This causes problems because the
     15 // public API also uses MemoryBarrier at the public name for this fence. So, on
     16 // X64, undef it, and call its documented
     17 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
     18 // implementation directly.
     19 #undef MemoryBarrier
     20 #endif
     21 
     22 namespace v8 {
     23 namespace base {
     24 
     25 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
     26                                          Atomic32 old_value,
     27                                          Atomic32 new_value) {
     28   LONG result = InterlockedCompareExchange(
     29       reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value),
     30       static_cast<LONG>(old_value));
     31   return static_cast<Atomic32>(result);
     32 }
     33 
     34 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
     35                                          Atomic32 new_value) {
     36   LONG result = InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr),
     37                                     static_cast<LONG>(new_value));
     38   return static_cast<Atomic32>(result);
     39 }
     40 
     41 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
     42                                         Atomic32 increment) {
     43   return InterlockedExchangeAdd(reinterpret_cast<volatile LONG*>(ptr),
     44                                 static_cast<LONG>(increment)) +
     45          increment;
     46 }
     47 
     48 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
     49                                           Atomic32 increment) {
     50   return Barrier_AtomicIncrement(ptr, increment);
     51 }
     52 
     53 inline void MemoryBarrier() {
     54 #if defined(V8_HOST_ARCH_64_BIT)
     55   // See #undef and note at the top of this file.
     56   __faststorefence();
     57 #else
     58   // We use MemoryBarrier from WinNT.h
     59   ::MemoryBarrier();
     60 #endif
     61 }
     62 
     63 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
     64                                        Atomic32 old_value,
     65                                        Atomic32 new_value) {
     66   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     67 }
     68 
     69 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
     70                                        Atomic32 old_value,
     71                                        Atomic32 new_value) {
     72   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     73 }
     74 
     75 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
     76   *ptr = value;
     77 }
     78 
     79 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
     80   *ptr = value;
     81 }
     82 
     83 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
     84   *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
     85   // See comments in Atomic64 version of Release_Store() below.
     86 }
     87 
     88 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
     89   return *ptr;
     90 }
     91 
     92 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
     93   return *ptr;
     94 }
     95 
     96 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
     97   Atomic32 value = *ptr;
     98   return value;
     99 }
    100 
    101 #if defined(_WIN64)
    102 
    103 // 64-bit low-level operations on 64-bit platform.
    104 
    105 static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
    106 
    107 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
    108                                          Atomic64 old_value,
    109                                          Atomic64 new_value) {
    110   PVOID result = InterlockedCompareExchangePointer(
    111     reinterpret_cast<volatile PVOID*>(ptr),
    112     reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
    113   return reinterpret_cast<Atomic64>(result);
    114 }
    115 
    116 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
    117                                          Atomic64 new_value) {
    118   PVOID result = InterlockedExchangePointer(
    119     reinterpret_cast<volatile PVOID*>(ptr),
    120     reinterpret_cast<PVOID>(new_value));
    121   return reinterpret_cast<Atomic64>(result);
    122 }
    123 
    124 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
    125                                         Atomic64 increment) {
    126   return InterlockedExchangeAdd64(
    127       reinterpret_cast<volatile LONGLONG*>(ptr),
    128       static_cast<LONGLONG>(increment)) + increment;
    129 }
    130 
    131 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
    132                                           Atomic64 increment) {
    133   return Barrier_AtomicIncrement(ptr, increment);
    134 }
    135 
    136 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
    137   *ptr = value;
    138 }
    139 
    140 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
    141   *ptr = value;  // works w/o barrier for current Intel chips as of June 2005
    142 
    143   // When new chips come out, check:
    144   //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
    145   //  System Programming Guide, Chatper 7: Multiple-processor management,
    146   //  Section 7.2, Memory Ordering.
    147   // Last seen at:
    148   //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
    149 }
    150 
    151 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
    152   return *ptr;
    153 }
    154 
    155 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
    156   Atomic64 value = *ptr;
    157   return value;
    158 }
    159 
    160 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
    161                                        Atomic64 old_value,
    162                                        Atomic64 new_value) {
    163   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    164 }
    165 
    166 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
    167                                        Atomic64 old_value,
    168                                        Atomic64 new_value) {
    169   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    170 }
    171 
    172 
    173 #endif  // defined(_WIN64)
    174 
    175 }  // namespace base
    176 }  // namespace v8
    177 
    178 #endif  // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
    179