Home | History | Annotate | Download | only in base
      1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 // This file is an internal atomic implementation, use base/atomicops.h instead.
      6 
      7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
      8 #define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
      9 
     10 #include <windows.h>
     11 
     12 #include <intrin.h>
     13 
     14 #include "base/macros.h"
     15 
     16 #if defined(ARCH_CPU_64_BITS)
     17 // windows.h #defines this (only on x64). This causes problems because the
     18 // public API also uses MemoryBarrier at the public name for this fence. So, on
     19 // X64, undef it, and call its documented
     20 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx)
     21 // implementation directly.
     22 #undef MemoryBarrier
     23 #endif
     24 
     25 namespace base {
     26 namespace subtle {
     27 
     28 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
     29                                          Atomic32 old_value,
     30                                          Atomic32 new_value) {
     31   LONG result = _InterlockedCompareExchange(
     32       reinterpret_cast<volatile LONG*>(ptr),
     33       static_cast<LONG>(new_value),
     34       static_cast<LONG>(old_value));
     35   return static_cast<Atomic32>(result);
     36 }
     37 
     38 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
     39                                          Atomic32 new_value) {
     40   LONG result = _InterlockedExchange(
     41       reinterpret_cast<volatile LONG*>(ptr),
     42       static_cast<LONG>(new_value));
     43   return static_cast<Atomic32>(result);
     44 }
     45 
     46 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
     47                                         Atomic32 increment) {
     48   return _InterlockedExchangeAdd(
     49       reinterpret_cast<volatile LONG*>(ptr),
     50       static_cast<LONG>(increment)) + increment;
     51 }
     52 
     53 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
     54                                           Atomic32 increment) {
     55   return Barrier_AtomicIncrement(ptr, increment);
     56 }
     57 
     58 #if !(defined(_MSC_VER) && _MSC_VER >= 1400)
     59 #error "We require at least vs2005 for MemoryBarrier"
     60 #endif
     61 inline void MemoryBarrier() {
     62 #if defined(ARCH_CPU_64_BITS)
     63   // See #undef and note at the top of this file.
     64   __faststorefence();
     65 #else
     66   // We use MemoryBarrier from WinNT.h
     67   ::MemoryBarrier();
     68 #endif
     69 }
     70 
     71 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
     72                                        Atomic32 old_value,
     73                                        Atomic32 new_value) {
     74   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     75 }
     76 
     77 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
     78                                        Atomic32 old_value,
     79                                        Atomic32 new_value) {
     80   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
     81 }
     82 
     83 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
     84   *ptr = value;
     85 }
     86 
     87 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
     88   NoBarrier_AtomicExchange(ptr, value);
     89               // acts as a barrier in this implementation
     90 }
     91 
     92 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
     93   *ptr = value; // works w/o barrier for current Intel chips as of June 2005
     94   // See comments in Atomic64 version of Release_Store() below.
     95 }
     96 
     97 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
     98   return *ptr;
     99 }
    100 
    101 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
    102   Atomic32 value = *ptr;
    103   return value;
    104 }
    105 
    106 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
    107   MemoryBarrier();
    108   return *ptr;
    109 }
    110 
    111 #if defined(_WIN64)
    112 
    113 // 64-bit low-level operations on 64-bit platform.
    114 
    115 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic);
    116 
    117 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
    118                                          Atomic64 old_value,
    119                                          Atomic64 new_value) {
    120   PVOID result = InterlockedCompareExchangePointer(
    121     reinterpret_cast<volatile PVOID*>(ptr),
    122     reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value));
    123   return reinterpret_cast<Atomic64>(result);
    124 }
    125 
    126 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
    127                                          Atomic64 new_value) {
    128   PVOID result = InterlockedExchangePointer(
    129     reinterpret_cast<volatile PVOID*>(ptr),
    130     reinterpret_cast<PVOID>(new_value));
    131   return reinterpret_cast<Atomic64>(result);
    132 }
    133 
    134 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
    135                                         Atomic64 increment) {
    136   return InterlockedExchangeAdd64(
    137       reinterpret_cast<volatile LONGLONG*>(ptr),
    138       static_cast<LONGLONG>(increment)) + increment;
    139 }
    140 
    141 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
    142                                           Atomic64 increment) {
    143   return Barrier_AtomicIncrement(ptr, increment);
    144 }
    145 
    146 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
    147   *ptr = value;
    148 }
    149 
    150 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
    151   NoBarrier_AtomicExchange(ptr, value);
    152               // acts as a barrier in this implementation
    153 }
    154 
    155 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
    156   *ptr = value; // works w/o barrier for current Intel chips as of June 2005
    157 
    158   // When new chips come out, check:
    159   //  IA-32 Intel Architecture Software Developer's Manual, Volume 3:
    160   //  System Programming Guide, Chatper 7: Multiple-processor management,
    161   //  Section 7.2, Memory Ordering.
    162   // Last seen at:
    163   //   http://developer.intel.com/design/pentium4/manuals/index_new.htm
    164 }
    165 
    166 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
    167   return *ptr;
    168 }
    169 
    170 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
    171   Atomic64 value = *ptr;
    172   return value;
    173 }
    174 
    175 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
    176   MemoryBarrier();
    177   return *ptr;
    178 }
    179 
    180 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
    181                                        Atomic64 old_value,
    182                                        Atomic64 new_value) {
    183   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    184 }
    185 
    186 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
    187                                        Atomic64 old_value,
    188                                        Atomic64 new_value) {
    189   return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
    190 }
    191 
    192 
    193 #endif  // defined(_WIN64)
    194 
    195 }  // namespace base::subtle
    196 }  // namespace base
    197 
    198 #endif  // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_
    199