Home | History | Annotate | Download | only in src
      1 // Copyright 2012 the V8 project authors. All rights reserved.
      2 // Redistribution and use in source and binary forms, with or without
      3 // modification, are permitted provided that the following conditions are
      4 // met:
      5 //
      6 //     * Redistributions of source code must retain the above copyright
      7 //       notice, this list of conditions and the following disclaimer.
      8 //     * Redistributions in binary form must reproduce the above
      9 //       copyright notice, this list of conditions and the following
     10 //       disclaimer in the documentation and/or other materials provided
     11 //       with the distribution.
     12 //     * Neither the name of Google Inc. nor the names of its
     13 //       contributors may be used to endorse or promote products derived
     14 //       from this software without specific prior written permission.
     15 //
     16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     27 
     28 
     29 // This file is an internal atomic implementation for compiler-based
     30 // ThreadSanitizer. Use base/atomicops.h instead.
     31 
     32 #ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_
     33 #define V8_ATOMICOPS_INTERNALS_TSAN_H_
     34 
     35 namespace v8 {
     36 namespace internal {
     37 
     38 #ifndef TSAN_INTERFACE_ATOMIC_H
     39 #define TSAN_INTERFACE_ATOMIC_H
     40 
     41 // This struct is not part of the public API of this module; clients may not
     42 // use it.  (However, it's exported via BASE_EXPORT because clients implicitly
     43 // do use it at link time by inlining these functions.)
     44 // Features of this x86.  Values may not be correct before main() is run,
     45 // but are set conservatively.
     46 struct AtomicOps_x86CPUFeatureStruct {
     47   bool has_amd_lock_mb_bug;  // Processor has AMD memory-barrier bug; do lfence
     48                              // after acquire compare-and-swap.
     49   bool has_sse2;             // Processor has SSE2.
     50 };
     51 extern struct AtomicOps_x86CPUFeatureStruct
     52     AtomicOps_Internalx86CPUFeatures;
     53 
     54 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
     55 
     56 #ifdef __cplusplus
     57 extern "C" {
     58 #endif
     59 
     60 typedef char  __tsan_atomic8;
     61 typedef short __tsan_atomic16;  // NOLINT
     62 typedef int   __tsan_atomic32;
     63 typedef long  __tsan_atomic64;  // NOLINT
     64 
     65 #if defined(__SIZEOF_INT128__) \
     66     || (__clang_major__ * 100 + __clang_minor__ >= 302)
     67 typedef __int128 __tsan_atomic128;
     68 #define __TSAN_HAS_INT128 1
     69 #else
     70 typedef char     __tsan_atomic128;
     71 #define __TSAN_HAS_INT128 0
     72 #endif
     73 
     74 typedef enum {
     75   __tsan_memory_order_relaxed,
     76   __tsan_memory_order_consume,
     77   __tsan_memory_order_acquire,
     78   __tsan_memory_order_release,
     79   __tsan_memory_order_acq_rel,
     80   __tsan_memory_order_seq_cst,
     81 } __tsan_memory_order;
     82 
     83 __tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
     84     __tsan_memory_order mo);
     85 __tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
     86     __tsan_memory_order mo);
     87 __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
     88     __tsan_memory_order mo);
     89 __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
     90     __tsan_memory_order mo);
     91 __tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128 *a,
     92     __tsan_memory_order mo);
     93 
     94 void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
     95     __tsan_memory_order mo);
     96 void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
     97     __tsan_memory_order mo);
     98 void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
     99     __tsan_memory_order mo);
    100 void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
    101     __tsan_memory_order mo);
    102 void __tsan_atomic128_store(volatile __tsan_atomic128 *a, __tsan_atomic128 v,
    103     __tsan_memory_order mo);
    104 
    105 __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
    106     __tsan_atomic8 v, __tsan_memory_order mo);
    107 __tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
    108     __tsan_atomic16 v, __tsan_memory_order mo);
    109 __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
    110     __tsan_atomic32 v, __tsan_memory_order mo);
    111 __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
    112     __tsan_atomic64 v, __tsan_memory_order mo);
    113 __tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128 *a,
    114     __tsan_atomic128 v, __tsan_memory_order mo);
    115 
    116 __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
    117     __tsan_atomic8 v, __tsan_memory_order mo);
    118 __tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
    119     __tsan_atomic16 v, __tsan_memory_order mo);
    120 __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
    121     __tsan_atomic32 v, __tsan_memory_order mo);
    122 __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
    123     __tsan_atomic64 v, __tsan_memory_order mo);
    124 __tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128 *a,
    125     __tsan_atomic128 v, __tsan_memory_order mo);
    126 
    127 __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
    128     __tsan_atomic8 v, __tsan_memory_order mo);
    129 __tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
    130     __tsan_atomic16 v, __tsan_memory_order mo);
    131 __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
    132     __tsan_atomic32 v, __tsan_memory_order mo);
    133 __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
    134     __tsan_atomic64 v, __tsan_memory_order mo);
    135 __tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128 *a,
    136     __tsan_atomic128 v, __tsan_memory_order mo);
    137 
    138 __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
    139     __tsan_atomic8 v, __tsan_memory_order mo);
    140 __tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
    141     __tsan_atomic16 v, __tsan_memory_order mo);
    142 __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
    143     __tsan_atomic32 v, __tsan_memory_order mo);
    144 __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
    145     __tsan_atomic64 v, __tsan_memory_order mo);
    146 __tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128 *a,
    147     __tsan_atomic128 v, __tsan_memory_order mo);
    148 
    149 __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
    150     __tsan_atomic8 v, __tsan_memory_order mo);
    151 __tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
    152     __tsan_atomic16 v, __tsan_memory_order mo);
    153 __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
    154     __tsan_atomic32 v, __tsan_memory_order mo);
    155 __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
    156     __tsan_atomic64 v, __tsan_memory_order mo);
    157 __tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128 *a,
    158     __tsan_atomic128 v, __tsan_memory_order mo);
    159 
    160 __tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8 *a,
    161     __tsan_atomic8 v, __tsan_memory_order mo);
    162 __tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16 *a,
    163     __tsan_atomic16 v, __tsan_memory_order mo);
    164 __tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32 *a,
    165     __tsan_atomic32 v, __tsan_memory_order mo);
    166 __tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64 *a,
    167     __tsan_atomic64 v, __tsan_memory_order mo);
    168 __tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128 *a,
    169     __tsan_atomic64 v, __tsan_memory_order mo);
    170 
    171 int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
    172     __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
    173     __tsan_memory_order fail_mo);
    174 int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
    175     __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
    176     __tsan_memory_order fail_mo);
    177 int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
    178     __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
    179     __tsan_memory_order fail_mo);
    180 int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
    181     __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
    182     __tsan_memory_order fail_mo);
    183 int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128 *a,
    184     __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
    185     __tsan_memory_order fail_mo);
    186 
    187 int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
    188     __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo,
    189     __tsan_memory_order fail_mo);
    190 int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
    191     __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo,
    192     __tsan_memory_order fail_mo);
    193 int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
    194     __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo,
    195     __tsan_memory_order fail_mo);
    196 int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
    197     __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo,
    198     __tsan_memory_order fail_mo);
    199 int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128 *a,
    200     __tsan_atomic128 *c, __tsan_atomic128 v, __tsan_memory_order mo,
    201     __tsan_memory_order fail_mo);
    202 
    203 __tsan_atomic8 __tsan_atomic8_compare_exchange_val(
    204     volatile __tsan_atomic8 *a, __tsan_atomic8 c, __tsan_atomic8 v,
    205     __tsan_memory_order mo, __tsan_memory_order fail_mo);
    206 __tsan_atomic16 __tsan_atomic16_compare_exchange_val(
    207     volatile __tsan_atomic16 *a, __tsan_atomic16 c, __tsan_atomic16 v,
    208     __tsan_memory_order mo, __tsan_memory_order fail_mo);
    209 __tsan_atomic32 __tsan_atomic32_compare_exchange_val(
    210     volatile __tsan_atomic32 *a, __tsan_atomic32 c, __tsan_atomic32 v,
    211     __tsan_memory_order mo, __tsan_memory_order fail_mo);
    212 __tsan_atomic64 __tsan_atomic64_compare_exchange_val(
    213     volatile __tsan_atomic64 *a, __tsan_atomic64 c, __tsan_atomic64 v,
    214     __tsan_memory_order mo, __tsan_memory_order fail_mo);
    215 __tsan_atomic128 __tsan_atomic128_compare_exchange_val(
    216     volatile __tsan_atomic128 *a, __tsan_atomic128 c, __tsan_atomic128 v,
    217     __tsan_memory_order mo, __tsan_memory_order fail_mo);
    218 
    219 void __tsan_atomic_thread_fence(__tsan_memory_order mo);
    220 void __tsan_atomic_signal_fence(__tsan_memory_order mo);
    221 
    222 #ifdef __cplusplus
    223 }  // extern "C"
    224 #endif
    225 
    226 #endif  // #ifndef TSAN_INTERFACE_ATOMIC_H
    227 
    228 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
    229                                          Atomic32 old_value,
    230                                          Atomic32 new_value) {
    231   Atomic32 cmp = old_value;
    232   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
    233       __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
    234   return cmp;
    235 }
    236 
    237 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
    238                                          Atomic32 new_value) {
    239   return __tsan_atomic32_exchange(ptr, new_value,
    240       __tsan_memory_order_relaxed);
    241 }
    242 
    243 inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
    244                                        Atomic32 new_value) {
    245   return __tsan_atomic32_exchange(ptr, new_value,
    246       __tsan_memory_order_acquire);
    247 }
    248 
    249 inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
    250                                        Atomic32 new_value) {
    251   return __tsan_atomic32_exchange(ptr, new_value,
    252       __tsan_memory_order_release);
    253 }
    254 
    255 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
    256                                           Atomic32 increment) {
    257   return increment + __tsan_atomic32_fetch_add(ptr, increment,
    258       __tsan_memory_order_relaxed);
    259 }
    260 
    261 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
    262                                         Atomic32 increment) {
    263   return increment + __tsan_atomic32_fetch_add(ptr, increment,
    264       __tsan_memory_order_acq_rel);
    265 }
    266 
    267 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
    268                                        Atomic32 old_value,
    269                                        Atomic32 new_value) {
    270   Atomic32 cmp = old_value;
    271   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
    272       __tsan_memory_order_acquire, __tsan_memory_order_acquire);
    273   return cmp;
    274 }
    275 
    276 inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
    277                                        Atomic32 old_value,
    278                                        Atomic32 new_value) {
    279   Atomic32 cmp = old_value;
    280   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
    281       __tsan_memory_order_release, __tsan_memory_order_relaxed);
    282   return cmp;
    283 }
    284 
    285 inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
    286   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
    287 }
    288 
    289 inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
    290   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
    291   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
    292 }
    293 
    294 inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
    295   __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
    296 }
    297 
    298 inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
    299   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
    300 }
    301 
    302 inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
    303   return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
    304 }
    305 
    306 inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
    307   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
    308   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
    309 }
    310 
    311 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
    312                                          Atomic64 old_value,
    313                                          Atomic64 new_value) {
    314   Atomic64 cmp = old_value;
    315   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
    316       __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
    317   return cmp;
    318 }
    319 
    320 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
    321                                          Atomic64 new_value) {
    322   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
    323 }
    324 
    325 inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
    326                                        Atomic64 new_value) {
    327   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
    328 }
    329 
    330 inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
    331                                        Atomic64 new_value) {
    332   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
    333 }
    334 
    335 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
    336                                           Atomic64 increment) {
    337   return increment + __tsan_atomic64_fetch_add(ptr, increment,
    338       __tsan_memory_order_relaxed);
    339 }
    340 
    341 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
    342                                         Atomic64 increment) {
    343   return increment + __tsan_atomic64_fetch_add(ptr, increment,
    344       __tsan_memory_order_acq_rel);
    345 }
    346 
    347 inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
    348   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
    349 }
    350 
    351 inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
    352   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
    353   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
    354 }
    355 
    356 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
    357   __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
    358 }
    359 
    360 inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
    361   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
    362 }
    363 
    364 inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
    365   return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
    366 }
    367 
    368 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
    369   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
    370   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
    371 }
    372 
    373 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
    374                                        Atomic64 old_value,
    375                                        Atomic64 new_value) {
    376   Atomic64 cmp = old_value;
    377   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
    378       __tsan_memory_order_acquire, __tsan_memory_order_acquire);
    379   return cmp;
    380 }
    381 
    382 inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
    383                                        Atomic64 old_value,
    384                                        Atomic64 new_value) {
    385   Atomic64 cmp = old_value;
    386   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
    387       __tsan_memory_order_release, __tsan_memory_order_relaxed);
    388   return cmp;
    389 }
    390 
    391 inline void MemoryBarrier() {
    392   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
    393 }
    394 
    395 }  // namespace internal
    396 }  // namespace v8
    397 
    398 #undef ATOMICOPS_COMPILER_BARRIER
    399 
    400 #endif  // V8_ATOMICOPS_INTERNALS_TSAN_H_
    401