Home | History | Annotate | Download | only in base
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 // This file is an internal atomic implementation, use atomicops.h instead.
      6 
      7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
      8 #define V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
      9 
     10 namespace v8 {
     11 namespace base {
     12 
     13 inline void MemoryBarrier() { __sync_synchronize(); }
     14 
     15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
     16                                          Atomic32 old_value,
     17                                          Atomic32 new_value) {
     18   return __sync_val_compare_and_swap(ptr, old_value, new_value);
     19 }
     20 
     21 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
     22                                          Atomic32 new_value) {
     23   return __sync_lock_test_and_set(ptr, new_value);
     24 }
     25 
     26 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
     27                                           Atomic32 increment) {
     28   return __sync_add_and_fetch(ptr, increment);
     29 }
     30 
     31 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
     32                                         Atomic32 increment) {
     33   return __sync_add_and_fetch(ptr, increment);
     34 }
     35 
     36 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
     37                                        Atomic32 old_value, Atomic32 new_value) {
     38   return __sync_val_compare_and_swap(ptr, old_value, new_value);
     39 }
     40 
     41 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
     42                                        Atomic32 old_value, Atomic32 new_value) {
     43   return __sync_val_compare_and_swap(ptr, old_value, new_value);
     44 }
     45 
     46 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
     47   __sync_lock_test_and_set(ptr, value);
     48 }
     49 
     50 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
     51   __sync_lock_test_and_set(ptr, value);
     52 }
     53 
     54 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
     55   __sync_lock_test_and_set(ptr, value);
     56 }
     57 
     58 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
     59   __sync_lock_test_and_set(ptr, value);
     60 }
     61 
     62 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
     63   return __sync_add_and_fetch(ptr, 0);
     64 }
     65 
     66 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
     67   return __sync_add_and_fetch(ptr, 0);
     68 }
     69 
     70 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
     71   return __sync_add_and_fetch(ptr, 0);
     72 }
     73 
     74 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
     75   return __sync_add_and_fetch(ptr, 0);
     76 }
     77 
     78 // 64-bit versions of the operations.
     79 // See the 32-bit versions for comments.
     80 
     81 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
     82                                          Atomic64 old_value,
     83                                          Atomic64 new_value) {
     84   return __sync_val_compare_and_swap(ptr, old_value, new_value);
     85 }
     86 
     87 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
     88                                          Atomic64 new_value) {
     89   return __sync_lock_test_and_set(ptr, new_value);
     90 }
     91 
     92 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
     93                                           Atomic64 increment) {
     94   return __sync_add_and_fetch(ptr, increment);
     95 }
     96 
     97 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
     98                                         Atomic64 increment) {
     99   return __sync_add_and_fetch(ptr, increment);
    100 }
    101 
    102 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
    103                                        Atomic64 old_value, Atomic64 new_value) {
    104   return __sync_val_compare_and_swap(ptr, old_value, new_value);
    105 }
    106 
    107 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
    108                                        Atomic64 old_value, Atomic64 new_value) {
    109   return __sync_val_compare_and_swap(ptr, old_value, new_value);
    110 }
    111 
    112 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
    113   __sync_lock_test_and_set(ptr, value);
    114 }
    115 
    116 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
    117   __sync_lock_test_and_set(ptr, value);
    118 }
    119 
    120 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
    121   __sync_lock_test_and_set(ptr, value);
    122 }
    123 
    124 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
    125   return __sync_add_and_fetch(ptr, 0);
    126 }
    127 
    128 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
    129   return __sync_add_and_fetch(ptr, 0);
    130 }
    131 
    132 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
    133   return __sync_add_and_fetch(ptr, 0);
    134 }
    135 }  // namespace base
    136 }  // namespace v8
    137 
    138 #endif  // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_
    139