Home | History | Annotate | Download | only in base
      1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 // This file is an internal atomic implementation, use base/atomicops.h instead.
      6 
      7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
      8 #define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
      9 
     10 #include <windows.h>
     11 
     12 #include <intrin.h>
     13 
     14 #include "base/macros.h"
     15 #include "build/build_config.h"
     16 
     17 #if defined(ARCH_CPU_64_BITS)
     18 // windows.h #defines this (only on x64). This causes problems because the
     19 // public API also uses MemoryBarrier at the public name for this fence. So, on
     20 // X64, undef it, and call its documented
     21 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
     22 // implementation directly.
     23 #undef MemoryBarrier
     24 #endif
     25 
     26 namespace base {
     27 namespace subtle {
     28 
     29 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
     30                                          Atomic32 old_value,
     31                                          Atomic32 new_value) {
     32   LONG result = _InterlockedCompareExchange(
     33       reinterpret_cast<volatile LONG*>(ptr),
     34       static_cast<LONG>(new_value),
     35       static_cast<LONG>(old_value));
     36   return static_cast<Atomic32>(result);
     37 }
     38 
     39 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
     40                                          Atomic32 new_value) {
     41   LONG result = _InterlockedExchange(
     42       reinterpret_cast<volatile LONG*>(ptr),
     43       static_cast<LONG>(new_value));
     44   return static_cast<Atomic32>(result);
     45 }
     46 
     47 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
     48                                         Atomic32 increment) {
     49   return _InterlockedExchangeAdd(
     50       reinterpret_cast<volatile LONG*>(ptr),
     51       static_cast<LONG>(increment)) + increment;
     52 }
     53 
     54 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
     55                                           Atomic32 increment) {
     56   return Barrier_AtomicIncrement(ptr, increment);
     57 }
     58 
     59 inline void MemoryBarrier() {
     60 #if defined(ARCH_CPU_64_BITS)
     61   // See #undef and note at the top of this file.
     62   __faststorefence();
     63 #else
     64   // We use MemoryBarrier from WinNT.h
     65   ::MemoryBarrier();
     66 #endif
     67 }
     68 
     69 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
     70                                        Atomic32 old_value,
     71                                        Atomic32 new_value) {
     72   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     73 }
     74 
     75 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
     76                                        Atomic32 old_value,
     77                                        Atomic32 new_value) {
     78   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     79 }
     80 
     81 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
     82   *ptr = value;
     83 }
     84 
     85 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
     86   NoBarrier_AtomicExchange(ptr, value);
     87               // acts as a barrier in this implementation
     88 }
     89 
     90 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
     91   *ptr = value; // works w/o barrier for current Intel chips as of June 2005
     92   // See comments in Atomic64 version of Release_Store() below.
     93 }
     94 
     95 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
     96   return *ptr;
     97 }
     98 
     99 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
    100   Atomic32 value = *ptr;
    101   return value;
    102 }
    103 
    104 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
    105   MemoryBarrier();
    106   return *ptr;
    107 }
    108 
    109 #if defined(_WIN64)
    110 
    111 // 64-bit low-level operations on 64-bit platform.
    112 
    113 static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic");
    114 
    115 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
    116                                          Atomic64 old_value,
    117                                          Atomic64 new_value) {
    118   PVOID result = InterlockedCompareExchangePointer(
    119     reinterpret_cast<volatile PVOID*>(ptr),
    120     reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
    121   return reinterpret_cast<Atomic64>(result);
    122 }
    123 
    124 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
    125                                          Atomic64 new_value) {
    126   PVOID result = InterlockedExchangePointer(
    127     reinterpret_cast<volatile PVOID*>(ptr),
    128     reinterpret_cast<PVOID>(new_value));
    129   return reinterpret_cast<Atomic64>(result);
    130 }
    131 
    132 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
    133                                         Atomic64 increment) {
    134   return InterlockedExchangeAdd64(
    135       reinterpret_cast<volatile LONGLONG*>(ptr),
    136       static_cast<LONGLONG>(increment)) + increment;
    137 }
    138 
    139 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
    140                                           Atomic64 increment) {
    141   return Barrier_AtomicIncrement(ptr, increment);
    142 }
    143 
    144 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
    145   *ptr = value;
    146 }
    147 
    148 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
    149   NoBarrier_AtomicExchange(ptr, value);
    150               // acts as a barrier in this implementation
    151 }
    152 
    153 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
    154   *ptr = value; // works w/o barrier for current Intel chips as of June 2005
    155 
    156   // When new chips come out, check:
    157   //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
    158   //  System Programming Guide, Chatper 7: Multiple-processor management,
    159   //  Section 7.2, Memory Ordering.
    160   // Last seen at:
    161   //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
    162 }
    163 
    164 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
    165   return *ptr;
    166 }
    167 
    168 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
    169   Atomic64 value = *ptr;
    170   return value;
    171 }
    172 
    173 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
    174   MemoryBarrier();
    175   return *ptr;
    176 }
    177 
    178 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
    179                                        Atomic64 old_value,
    180                                        Atomic64 new_value) {
    181   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    182 }
    183 
    184 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
    185                                        Atomic64 old_value,
    186                                        Atomic64 new_value) {
    187   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    188 }
    189 
    190 
    191 #endif  // defined(_WIN64)
    192 
    193 }  // namespace subtle
    194 }  // namespace base
    195 
    196 #endif  // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
    197