Home | History | Annotate | Download | only in base
      1 /*
      2  * Copyright (C) 2008 The Android Open Source Project
      3  *
      4  * Licensed under the Apache License, Version 2.0 (the "License");
      5  * you may not use this file except in compliance with the License.
      6  * You may obtain a copy of the License at
      7  *
      8  *      http://www.apache.org/licenses/LICENSE-2.0
      9  *
     10  * Unless required by applicable law or agreed to in writing, software
     11  * distributed under the License is distributed on an "AS IS" BASIS,
     12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
     13  * See the License for the specific language governing permissions and
     14  * limitations under the License.
     15  */
     16 
     17 #ifndef ART_LIBARTBASE_BASE_ATOMIC_H_
     18 #define ART_LIBARTBASE_BASE_ATOMIC_H_
     19 
     20 #include <stdint.h>
     21 #include <atomic>
     22 #include <limits>
     23 #include <vector>
     24 
     25 #include <android-base/logging.h>
     26 
     27 #include "base/macros.h"
     28 
     29 namespace art {
     30 
     31 template<typename T>
     32 class PACKED(sizeof(T)) Atomic : public std::atomic<T> {
     33  public:
     34   Atomic<T>() : std::atomic<T>(T()) { }
     35 
     36   explicit Atomic<T>(T value) : std::atomic<T>(value) { }
     37 
     38   // Load from memory without ordering or synchronization constraints.
     39   T LoadRelaxed() const {
     40     return this->load(std::memory_order_relaxed);
     41   }
     42 
     43   // Load from memory with acquire ordering.
     44   T LoadAcquire() const {
     45     return this->load(std::memory_order_acquire);
     46   }
     47 
     48   // Word tearing allowed, but may race.
     49   // TODO: Optimize?
     50   // There has been some discussion of eventually disallowing word
     51   // tearing for Java data loads.
     52   T LoadJavaData() const {
     53     return this->load(std::memory_order_relaxed);
     54   }
     55 
     56   // Load from memory with a total ordering.
     57   // Corresponds exactly to a Java volatile load.
     58   T LoadSequentiallyConsistent() const {
     59     return this->load(std::memory_order_seq_cst);
     60   }
     61 
     62   // Store to memory without ordering or synchronization constraints.
     63   void StoreRelaxed(T desired_value) {
     64     this->store(desired_value, std::memory_order_relaxed);
     65   }
     66 
     67   // Word tearing allowed, but may race.
     68   void StoreJavaData(T desired_value) {
     69     this->store(desired_value, std::memory_order_relaxed);
     70   }
     71 
     72   // Store to memory with release ordering.
     73   void StoreRelease(T desired_value) {
     74     this->store(desired_value, std::memory_order_release);
     75   }
     76 
     77   // Store to memory with a total ordering.
     78   void StoreSequentiallyConsistent(T desired_value) {
     79     this->store(desired_value, std::memory_order_seq_cst);
     80   }
     81 
     82   // Atomically replace the value with desired_value.
     83   T ExchangeRelaxed(T desired_value) {
     84     return this->exchange(desired_value, std::memory_order_relaxed);
     85   }
     86 
     87   // Atomically replace the value with desired_value.
     88   T ExchangeSequentiallyConsistent(T desired_value) {
     89     return this->exchange(desired_value, std::memory_order_seq_cst);
     90   }
     91 
     92   // Atomically replace the value with desired_value.
     93   T ExchangeAcquire(T desired_value) {
     94     return this->exchange(desired_value, std::memory_order_acquire);
     95   }
     96 
     97   // Atomically replace the value with desired_value.
     98   T ExchangeRelease(T desired_value) {
     99     return this->exchange(desired_value, std::memory_order_release);
    100   }
    101 
    102   // Atomically replace the value with desired_value if it matches the expected_value.
    103   // Participates in total ordering of atomic operations. Returns true on success, false otherwise.
    104   // If the value does not match, updates the expected_value argument with the value that was
    105   // atomically read for the failed comparison.
    106   bool CompareAndExchangeStrongSequentiallyConsistent(T* expected_value, T desired_value) {
    107     return this->compare_exchange_strong(*expected_value, desired_value, std::memory_order_seq_cst);
    108   }
    109 
    110   // Atomically replace the value with desired_value if it matches the expected_value.
    111   // Participates in total ordering of atomic operations. Returns true on success, false otherwise.
    112   // If the value does not match, updates the expected_value argument with the value that was
    113   // atomically read for the failed comparison.
    114   bool CompareAndExchangeStrongAcquire(T* expected_value, T desired_value) {
    115     return this->compare_exchange_strong(*expected_value, desired_value, std::memory_order_acquire);
    116   }
    117 
    118   // Atomically replace the value with desired_value if it matches the expected_value.
    119   // Participates in total ordering of atomic operations. Returns true on success, false otherwise.
    120   // If the value does not match, updates the expected_value argument with the value that was
    121   // atomically read for the failed comparison.
    122   bool CompareAndExchangeStrongRelease(T* expected_value, T desired_value) {
    123     return this->compare_exchange_strong(*expected_value, desired_value, std::memory_order_release);
    124   }
    125 
    126   // Atomically replace the value with desired_value if it matches the expected_value.
    127   // Participates in total ordering of atomic operations.
    128   bool CompareAndSetStrongSequentiallyConsistent(T expected_value, T desired_value) {
    129     return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_seq_cst);
    130   }
    131 
    132   // The same, except it may fail spuriously.
    133   bool CompareAndSetWeakSequentiallyConsistent(T expected_value, T desired_value) {
    134     return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_seq_cst);
    135   }
    136 
    137   // Atomically replace the value with desired_value if it matches the expected_value. Doesn't
    138   // imply ordering or synchronization constraints.
    139   bool CompareAndSetStrongRelaxed(T expected_value, T desired_value) {
    140     return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_relaxed);
    141   }
    142 
    143   // Atomically replace the value with desired_value if it matches the expected_value. Prior writes
    144   // to other memory locations become visible to the threads that do a consume or an acquire on the
    145   // same location.
    146   bool CompareAndSetStrongRelease(T expected_value, T desired_value) {
    147     return this->compare_exchange_strong(expected_value, desired_value, std::memory_order_release);
    148   }
    149 
    150   // The same, except it may fail spuriously.
    151   bool CompareAndSetWeakRelaxed(T expected_value, T desired_value) {
    152     return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_relaxed);
    153   }
    154 
    155   // Atomically replace the value with desired_value if it matches the expected_value. Prior writes
    156   // made to other memory locations by the thread that did the release become visible in this
    157   // thread.
    158   bool CompareAndSetWeakAcquire(T expected_value, T desired_value) {
    159     return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_acquire);
    160   }
    161 
    162   // Atomically replace the value with desired_value if it matches the expected_value. Prior writes
    163   // to other memory locations become visible to the threads that do a consume or an acquire on the
    164   // same location.
    165   bool CompareAndSetWeakRelease(T expected_value, T desired_value) {
    166     return this->compare_exchange_weak(expected_value, desired_value, std::memory_order_release);
    167   }
    168 
    169   T FetchAndAddSequentiallyConsistent(const T value) {
    170     return this->fetch_add(value, std::memory_order_seq_cst);  // Return old_value.
    171   }
    172 
    173   T FetchAndAddRelaxed(const T value) {
    174     return this->fetch_add(value, std::memory_order_relaxed);  // Return old_value.
    175   }
    176 
    177   T FetchAndAddAcquire(const T value) {
    178     return this->fetch_add(value, std::memory_order_acquire);  // Return old_value.
    179   }
    180 
    181   T FetchAndAddRelease(const T value) {
    182     return this->fetch_add(value, std::memory_order_acquire);  // Return old_value.
    183   }
    184 
    185   T FetchAndSubSequentiallyConsistent(const T value) {
    186     return this->fetch_sub(value, std::memory_order_seq_cst);  // Return old value.
    187   }
    188 
    189   T FetchAndSubRelaxed(const T value) {
    190     return this->fetch_sub(value, std::memory_order_relaxed);  // Return old value.
    191   }
    192 
    193   T FetchAndBitwiseAndSequentiallyConsistent(const T value) {
    194     return this->fetch_and(value, std::memory_order_seq_cst);  // Return old_value.
    195   }
    196 
    197   T FetchAndBitwiseAndAcquire(const T value) {
    198     return this->fetch_and(value, std::memory_order_acquire);  // Return old_value.
    199   }
    200 
    201   T FetchAndBitwiseAndRelease(const T value) {
    202     return this->fetch_and(value, std::memory_order_release);  // Return old_value.
    203   }
    204 
    205   T FetchAndBitwiseOrSequentiallyConsistent(const T value) {
    206     return this->fetch_or(value, std::memory_order_seq_cst);  // Return old_value.
    207   }
    208 
    209   T FetchAndBitwiseOrAcquire(const T value) {
    210     return this->fetch_or(value, std::memory_order_acquire);  // Return old_value.
    211   }
    212 
    213   T FetchAndBitwiseOrRelease(const T value) {
    214     return this->fetch_or(value, std::memory_order_release);  // Return old_value.
    215   }
    216 
    217   T FetchAndBitwiseXorSequentiallyConsistent(const T value) {
    218     return this->fetch_xor(value, std::memory_order_seq_cst);  // Return old_value.
    219   }
    220 
    221   T FetchAndBitwiseXorAcquire(const T value) {
    222     return this->fetch_xor(value, std::memory_order_acquire);  // Return old_value.
    223   }
    224 
    225   T FetchAndBitwiseXorRelease(const T value) {
    226     return this->fetch_xor(value, std::memory_order_release);  // Return old_value.
    227   }
    228 
    229   volatile T* Address() {
    230     return reinterpret_cast<T*>(this);
    231   }
    232 
    233   static T MaxValue() {
    234     return std::numeric_limits<T>::max();
    235   }
    236 };
    237 
    238 typedef Atomic<int32_t> AtomicInteger;
    239 
    240 static_assert(sizeof(AtomicInteger) == sizeof(int32_t), "Weird AtomicInteger size");
    241 static_assert(alignof(AtomicInteger) == alignof(int32_t),
    242               "AtomicInteger alignment differs from that of underlyingtype");
    243 static_assert(sizeof(Atomic<int64_t>) == sizeof(int64_t), "Weird Atomic<int64> size");
    244 
    245 // Assert the alignment of 64-bit integers is 64-bit. This isn't true on certain 32-bit
    246 // architectures (e.g. x86-32) but we know that 64-bit integers here are arranged to be 8-byte
    247 // aligned.
    248 #if defined(__LP64__)
    249   static_assert(alignof(Atomic<int64_t>) == alignof(int64_t),
    250                 "Atomic<int64> alignment differs from that of underlying type");
    251 #endif
    252 
    253 }  // namespace art
    254 
    255 #endif  // ART_LIBARTBASE_BASE_ATOMIC_H_
    256