Home | History | Annotate | Download | only in stubs
      1 // Protocol Buffers - Google's data interchange format
      2 // Copyright 2013 Google Inc.  All rights reserved.
      3 // https://developers.google.com/protocol-buffers/
      4 //
      5 // Redistribution and use in source and binary forms, with or without
      6 // modification, are permitted provided that the following conditions are
      7 // met:
      8 //
      9 //     * Redistributions of source code must retain the above copyright
     10 // notice, this list of conditions and the following disclaimer.
     11 //     * Redistributions in binary form must reproduce the above
     12 // copyright notice, this list of conditions and the following disclaimer
     13 // in the documentation and/or other materials provided with the
     14 // distribution.
     15 //     * Neither the name of Google Inc. nor the names of its
     16 // contributors may be used to endorse or promote products derived from
     17 // this software without specific prior written permission.
     18 //
     19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     30 
     31 // This file is an internal atomic implementation for compiler-based
     32 // ThreadSanitizer (http://clang.llvm.org/docs/ThreadSanitizer.html).
     33 // Use atomicops.h instead.
     34 
     35 #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
     36 #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
     37 
     38 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
     39 
     40 #include <sanitizer/tsan_interface_atomic.h>
     41 
     42 namespace google {
     43 namespace protobuf {
     44 namespace internal {
     45 
     46 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr,
     47                                          Atomic32 old_value,
     48                                          Atomic32 new_value) {
     49   Atomic32 cmp = old_value;
     50   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
     51       __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
     52   return cmp;
     53 }
     54 
     55 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr,
     56                                          Atomic32 new_value) {
     57   return __tsan_atomic32_exchange(ptr, new_value,
     58       __tsan_memory_order_relaxed);
     59 }
     60 
     61 inline Atomic32 Acquire_AtomicExchange(volatile Atomic32 *ptr,
     62                                        Atomic32 new_value) {
     63   return __tsan_atomic32_exchange(ptr, new_value,
     64       __tsan_memory_order_acquire);
     65 }
     66 
     67 inline Atomic32 Release_AtomicExchange(volatile Atomic32 *ptr,
     68                                        Atomic32 new_value) {
     69   return __tsan_atomic32_exchange(ptr, new_value,
     70       __tsan_memory_order_release);
     71 }
     72 
     73 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr,
     74                                           Atomic32 increment) {
     75   return increment + __tsan_atomic32_fetch_add(ptr, increment,
     76       __tsan_memory_order_relaxed);
     77 }
     78 
     79 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr,
     80                                         Atomic32 increment) {
     81   return increment + __tsan_atomic32_fetch_add(ptr, increment,
     82       __tsan_memory_order_acq_rel);
     83 }
     84 
     85 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr,
     86                                        Atomic32 old_value,
     87                                        Atomic32 new_value) {
     88   Atomic32 cmp = old_value;
     89   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
     90       __tsan_memory_order_acquire, __tsan_memory_order_acquire);
     91   return cmp;
     92 }
     93 
     94 inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr,
     95                                        Atomic32 old_value,
     96                                        Atomic32 new_value) {
     97   Atomic32 cmp = old_value;
     98   __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
     99       __tsan_memory_order_release, __tsan_memory_order_relaxed);
    100   return cmp;
    101 }
    102 
    103 inline void NoBarrier_Store(volatile Atomic32 *ptr, Atomic32 value) {
    104   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
    105 }
    106 
    107 inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) {
    108   __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
    109   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
    110 }
    111 
    112 inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) {
    113   __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
    114 }
    115 
    116 inline Atomic32 NoBarrier_Load(volatile const Atomic32 *ptr) {
    117   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
    118 }
    119 
    120 inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) {
    121   return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
    122 }
    123 
    124 inline Atomic32 Release_Load(volatile const Atomic32 *ptr) {
    125   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
    126   return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
    127 }
    128 
    129 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr,
    130                                          Atomic64 old_value,
    131                                          Atomic64 new_value) {
    132   Atomic64 cmp = old_value;
    133   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
    134       __tsan_memory_order_relaxed, __tsan_memory_order_relaxed);
    135   return cmp;
    136 }
    137 
    138 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr,
    139                                          Atomic64 new_value) {
    140   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
    141 }
    142 
    143 inline Atomic64 Acquire_AtomicExchange(volatile Atomic64 *ptr,
    144                                        Atomic64 new_value) {
    145   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
    146 }
    147 
    148 inline Atomic64 Release_AtomicExchange(volatile Atomic64 *ptr,
    149                                        Atomic64 new_value) {
    150   return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
    151 }
    152 
    153 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr,
    154                                           Atomic64 increment) {
    155   return increment + __tsan_atomic64_fetch_add(ptr, increment,
    156       __tsan_memory_order_relaxed);
    157 }
    158 
    159 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr,
    160                                         Atomic64 increment) {
    161   return increment + __tsan_atomic64_fetch_add(ptr, increment,
    162       __tsan_memory_order_acq_rel);
    163 }
    164 
    165 inline void NoBarrier_Store(volatile Atomic64 *ptr, Atomic64 value) {
    166   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
    167 }
    168 
    169 inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) {
    170   __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
    171   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
    172 }
    173 
    174 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) {
    175   __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
    176 }
    177 
    178 inline Atomic64 NoBarrier_Load(volatile const Atomic64 *ptr) {
    179   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
    180 }
    181 
    182 inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) {
    183   return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
    184 }
    185 
    186 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) {
    187   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
    188   return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
    189 }
    190 
    191 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr,
    192                                        Atomic64 old_value,
    193                                        Atomic64 new_value) {
    194   Atomic64 cmp = old_value;
    195   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
    196       __tsan_memory_order_acquire, __tsan_memory_order_acquire);
    197   return cmp;
    198 }
    199 
    200 inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr,
    201                                        Atomic64 old_value,
    202                                        Atomic64 new_value) {
    203   Atomic64 cmp = old_value;
    204   __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
    205       __tsan_memory_order_release, __tsan_memory_order_relaxed);
    206   return cmp;
    207 }
    208 
    209 inline void MemoryBarrier() {
    210   __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
    211 }
    212 
    213 }  // namespace internal
    214 }  // namespace protobuf
    215 }  // namespace google
    216 
    217 #undef ATOMICOPS_COMPILER_BARRIER
    218 
    219 #endif  // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_TSAN_H_
    220