Home | History | Annotate | Download | only in bits
      1 // -*- C++ -*- header.
      2 
      3 // Copyright (C) 2008, 2009
      4 // Free Software Foundation, Inc.
      5 //
      6 // This file is part of the GNU ISO C++ Library.  This library is free
      7 // software; you can redistribute it and/or modify it under the
      8 // terms of the GNU General Public License as published by the
      9 // Free Software Foundation; either version 3, or (at your option)
     10 // any later version.
     11 
     12 // This library is distributed in the hope that it will be useful,
     13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
     14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15 // GNU General Public License for more details.
     16 
     17 // Under Section 7 of GPL version 3, you are granted additional
     18 // permissions described in the GCC Runtime Library Exception, version
     19 // 3.1, as published by the Free Software Foundation.
     20 
     21 // You should have received a copy of the GNU General Public License and
     22 // a copy of the GCC Runtime Library Exception along with this program;
     23 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     24 // <http://www.gnu.org/licenses/>.
     25 
     26 /** @file bits/atomic_2.h
     27  *  This is an internal header file, included by other library headers.
     28  *  You should not attempt to use it directly.
     29  */
     30 
     31 #ifndef _GLIBCXX_ATOMIC_2_H
     32 #define _GLIBCXX_ATOMIC_2_H 1
     33 
     34 #pragma GCC system_header
     35 
     36 // _GLIBCXX_BEGIN_NAMESPACE(std)
     37 
     38 // 2 == __atomic2 == Always lock-free
     39 // Assumed:
     40 // _GLIBCXX_ATOMIC_BUILTINS_1
     41 // _GLIBCXX_ATOMIC_BUILTINS_2
     42 // _GLIBCXX_ATOMIC_BUILTINS_4
     43 // _GLIBCXX_ATOMIC_BUILTINS_8
     44 namespace __atomic2
     45 {
     46   /// atomic_flag
     47   struct atomic_flag : public __atomic_flag_base
     48   {
     49     atomic_flag() = default;
     50     ~atomic_flag() = default;
     51     atomic_flag(const atomic_flag&) = delete;
     52     atomic_flag& operator=(const atomic_flag&) = delete;
     53 
     54     // Conversion to ATOMIC_FLAG_INIT.
     55     atomic_flag(bool __i): __atomic_flag_base({ __i }) { }
     56 
     57     bool
     58     test_and_set(memory_order __m = memory_order_seq_cst) volatile
     59     {
     60       // Redundant synchronize if built-in for lock is a full barrier.
     61       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
     62 	__sync_synchronize();
     63       return __sync_lock_test_and_set(&_M_i, 1);
     64     }
     65 
     66     void
     67     clear(memory_order __m = memory_order_seq_cst) volatile
     68     {
     69       __glibcxx_assert(__m != memory_order_consume);
     70       __glibcxx_assert(__m != memory_order_acquire);
     71       __glibcxx_assert(__m != memory_order_acq_rel);
     72 
     73       __sync_lock_release(&_M_i);
     74       if (__m != memory_order_acquire && __m != memory_order_acq_rel)
     75 	__sync_synchronize();
     76     }
     77   };
     78 
     79 
     80   /// 29.4.2, address types
     81   struct atomic_address
     82   {
     83   private:
     84     void* _M_i;
     85 
     86   public:
     87     atomic_address() = default;
     88     ~atomic_address() = default;
     89     atomic_address(const atomic_address&) = delete;
     90     atomic_address& operator=(const atomic_address&) = delete;
     91 
     92     atomic_address(void* __v) { _M_i = __v; }
     93 
     94     bool
     95     is_lock_free() const volatile
     96     { return true; }
     97 
     98     void
     99     store(void* __v, memory_order __m = memory_order_seq_cst) volatile
    100     {
    101       __glibcxx_assert(__m != memory_order_acquire);
    102       __glibcxx_assert(__m != memory_order_acq_rel);
    103       __glibcxx_assert(__m != memory_order_consume);
    104 
    105       if (__m == memory_order_relaxed)
    106 	_M_i = __v;
    107       else
    108 	{
    109 	  // write_mem_barrier();
    110 	  _M_i = __v;
    111 	  if (__m == memory_order_seq_cst)
    112 	    __sync_synchronize();
    113 	}
    114     }
    115 
    116     void*
    117     load(memory_order __m = memory_order_seq_cst) const volatile
    118     {
    119       __glibcxx_assert(__m != memory_order_release);
    120       __glibcxx_assert(__m != memory_order_acq_rel);
    121 
    122       __sync_synchronize();
    123       void* __ret = _M_i;
    124       __sync_synchronize();
    125       return __ret;
    126     }
    127 
    128     void*
    129     exchange(void* __v, memory_order __m = memory_order_seq_cst) volatile
    130     {
    131       // XXX built-in assumes memory_order_acquire.
    132       return __sync_lock_test_and_set(&_M_i, __v);
    133     }
    134 
    135     bool
    136     compare_exchange_weak(void*& __v1, void* __v2, memory_order __m1,
    137 			  memory_order __m2) volatile
    138     { return compare_exchange_strong(__v1, __v2, __m1, __m2); }
    139 
    140     bool
    141     compare_exchange_weak(void*& __v1, void* __v2,
    142 			  memory_order __m = memory_order_seq_cst) volatile
    143     {
    144       return compare_exchange_weak(__v1, __v2, __m,
    145 				   __calculate_memory_order(__m));
    146     }
    147 
    148     bool
    149     compare_exchange_strong(void*& __v1, void* __v2, memory_order __m1,
    150 			    memory_order __m2) volatile
    151     {
    152       __glibcxx_assert(__m2 != memory_order_release);
    153       __glibcxx_assert(__m2 != memory_order_acq_rel);
    154       __glibcxx_assert(__m2 <= __m1);
    155 
    156       void* __v1o = __v1;
    157       void* __v1n = __sync_val_compare_and_swap(&_M_i, __v1o, __v2);
    158 
    159       // Assume extra stores (of same value) allowed in true case.
    160       __v1 = __v1n;
    161       return __v1o == __v1n;
    162     }
    163 
    164     bool
    165     compare_exchange_strong(void*& __v1, void* __v2,
    166 			  memory_order __m = memory_order_seq_cst) volatile
    167     {
    168       return compare_exchange_strong(__v1, __v2, __m,
    169 				     __calculate_memory_order(__m));
    170     }
    171 
    172     void*
    173     fetch_add(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
    174     { return __sync_fetch_and_add(&_M_i, __d); }
    175 
    176     void*
    177     fetch_sub(ptrdiff_t __d, memory_order __m = memory_order_seq_cst) volatile
    178     { return __sync_fetch_and_sub(&_M_i, __d); }
    179 
    180     operator void*() const volatile
    181     { return load(); }
    182 
    183     void*
    184     operator=(void* __v) // XXX volatile
    185     {
    186       store(__v);
    187       return __v;
    188     }
    189 
    190     void*
    191     operator+=(ptrdiff_t __d) volatile
    192     { return __sync_add_and_fetch(&_M_i, __d); }
    193 
    194     void*
    195     operator-=(ptrdiff_t __d) volatile
    196     { return __sync_sub_and_fetch(&_M_i, __d); }
    197   };
    198 
    199   // 29.3.1 atomic integral types
    200   // For each of the integral types, define atomic_[integral type] struct
    201   //
    202   // atomic_bool     bool
    203   // atomic_char     char
    204   // atomic_schar    signed char
    205   // atomic_uchar    unsigned char
    206   // atomic_short    short
    207   // atomic_ushort   unsigned short
    208   // atomic_int      int
    209   // atomic_uint     unsigned int
    210   // atomic_long     long
    211   // atomic_ulong    unsigned long
    212   // atomic_llong    long long
    213   // atomic_ullong   unsigned long long
    214   // atomic_char16_t char16_t
    215   // atomic_char32_t char32_t
    216   // atomic_wchar_t  wchar_t
    217 
    218   // Base type.
    219   // NB: Assuming _ITp is an integral scalar type that is 1, 2, 4, or 8 bytes,
    220   // since that is what GCC built-in functions for atomic memory access work on.
    221   template<typename _ITp>
    222     struct __atomic_base
    223     {
    224     private:
    225       typedef _ITp 	__integral_type;
    226 
    227       __integral_type 	_M_i;
    228 
    229     public:
    230       __atomic_base() = default;
    231       ~__atomic_base() = default;
    232       __atomic_base(const __atomic_base&) = delete;
    233       __atomic_base& operator=(const __atomic_base&) = delete;
    234 
    235       // Requires __integral_type convertible to _M_base._M_i.
    236       __atomic_base(__integral_type __i) { _M_i = __i; }
    237 
    238       operator __integral_type() const volatile
    239       { return load(); }
    240 
    241       __integral_type
    242       operator=(__integral_type __i) // XXX volatile
    243       {
    244 	store(__i);
    245 	return __i;
    246       }
    247 
    248       __integral_type
    249       operator++(int) volatile
    250       { return fetch_add(1); }
    251 
    252       __integral_type
    253       operator--(int) volatile
    254       { return fetch_sub(1); }
    255 
    256       __integral_type
    257       operator++() volatile
    258       { return __sync_add_and_fetch(&_M_i, 1); }
    259 
    260       __integral_type
    261       operator--() volatile
    262       { return __sync_sub_and_fetch(&_M_i, 1); }
    263 
    264       __integral_type
    265       operator+=(__integral_type __i) volatile
    266       { return __sync_add_and_fetch(&_M_i, __i); }
    267 
    268       __integral_type
    269       operator-=(__integral_type __i) volatile
    270       { return __sync_sub_and_fetch(&_M_i, __i); }
    271 
    272       __integral_type
    273       operator&=(__integral_type __i) volatile
    274       { return __sync_and_and_fetch(&_M_i, __i); }
    275 
    276       __integral_type
    277       operator|=(__integral_type __i) volatile
    278       { return __sync_or_and_fetch(&_M_i, __i); }
    279 
    280       __integral_type
    281       operator^=(__integral_type __i) volatile
    282       { return __sync_xor_and_fetch(&_M_i, __i); }
    283 
    284       bool
    285       is_lock_free() const volatile
    286       { return true; }
    287 
    288       void
    289       store(__integral_type __i,
    290 	    memory_order __m = memory_order_seq_cst) volatile
    291       {
    292 	__glibcxx_assert(__m != memory_order_acquire);
    293 	__glibcxx_assert(__m != memory_order_acq_rel);
    294 	__glibcxx_assert(__m != memory_order_consume);
    295 
    296 	if (__m == memory_order_relaxed)
    297 	  _M_i = __i;
    298 	else
    299 	  {
    300 	    // write_mem_barrier();
    301 	    _M_i = __i;
    302 	    if (__m == memory_order_seq_cst)
    303 	      __sync_synchronize();
    304 	  }
    305       }
    306 
    307       __integral_type
    308       load(memory_order __m = memory_order_seq_cst) const volatile
    309       {
    310 	__glibcxx_assert(__m != memory_order_release);
    311 	__glibcxx_assert(__m != memory_order_acq_rel);
    312 
    313 	__sync_synchronize();
    314 	__integral_type __ret = _M_i;
    315 	__sync_synchronize();
    316 	return __ret;
    317       }
    318 
    319       __integral_type
    320       exchange(__integral_type __i,
    321 	       memory_order __m = memory_order_seq_cst) volatile
    322       {
    323 	// XXX built-in assumes memory_order_acquire.
    324 	return __sync_lock_test_and_set(&_M_i, __i);
    325       }
    326 
    327       bool
    328       compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
    329 			    memory_order __m1, memory_order __m2) volatile
    330       { return compare_exchange_strong(__i1, __i2, __m1, __m2); }
    331 
    332       bool
    333       compare_exchange_weak(__integral_type& __i1, __integral_type __i2,
    334 			    memory_order __m = memory_order_seq_cst) volatile
    335       {
    336 	return compare_exchange_weak(__i1, __i2, __m,
    337 				     __calculate_memory_order(__m));
    338       }
    339 
    340       bool
    341       compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
    342 			      memory_order __m1, memory_order __m2) volatile
    343       {
    344 	__glibcxx_assert(__m2 != memory_order_release);
    345 	__glibcxx_assert(__m2 != memory_order_acq_rel);
    346 	__glibcxx_assert(__m2 <= __m1);
    347 
    348 	__integral_type __i1o = __i1;
    349 	__integral_type __i1n = __sync_val_compare_and_swap(&_M_i, __i1o, __i2);
    350 
    351 	// Assume extra stores (of same value) allowed in true case.
    352 	__i1 = __i1n;
    353 	return __i1o == __i1n;
    354       }
    355 
    356       bool
    357       compare_exchange_strong(__integral_type& __i1, __integral_type __i2,
    358 			      memory_order __m = memory_order_seq_cst) volatile
    359       {
    360 	return compare_exchange_strong(__i1, __i2, __m,
    361 				       __calculate_memory_order(__m));
    362       }
    363 
    364       __integral_type
    365       fetch_add(__integral_type __i,
    366 		memory_order __m = memory_order_seq_cst) volatile
    367       { return __sync_fetch_and_add(&_M_i, __i); }
    368 
    369       __integral_type
    370       fetch_sub(__integral_type __i,
    371 		memory_order __m = memory_order_seq_cst) volatile
    372       { return __sync_fetch_and_sub(&_M_i, __i); }
    373 
    374       __integral_type
    375       fetch_and(__integral_type __i,
    376 		memory_order __m = memory_order_seq_cst) volatile
    377       { return __sync_fetch_and_and(&_M_i, __i); }
    378 
    379       __integral_type
    380       fetch_or(__integral_type __i,
    381 	       memory_order __m = memory_order_seq_cst) volatile
    382       { return __sync_fetch_and_or(&_M_i, __i); }
    383 
    384       __integral_type
    385       fetch_xor(__integral_type __i,
    386 		memory_order __m = memory_order_seq_cst) volatile
    387       { return __sync_fetch_and_xor(&_M_i, __i); }
    388     };
    389 
    390 
    391   /// atomic_bool
    392   // NB: No operators or fetch-operations for this type.
    393   struct atomic_bool
    394   {
    395   private:
    396     __atomic_base<bool>	_M_base;
    397 
    398   public:
    399     atomic_bool() = default;
    400     ~atomic_bool() = default;
    401     atomic_bool(const atomic_bool&) = delete;
    402     atomic_bool& operator=(const atomic_bool&) = delete;
    403 
    404     atomic_bool(bool __i) : _M_base(__i) { }
    405 
    406     bool
    407     operator=(bool __i) // XXX volatile
    408     { return _M_base.operator=(__i); }
    409 
    410     operator bool() const volatile
    411     { return _M_base.load(); }
    412 
    413     bool
    414     is_lock_free() const volatile
    415     { return _M_base.is_lock_free(); }
    416 
    417     void
    418     store(bool __i, memory_order __m = memory_order_seq_cst) volatile
    419     { _M_base.store(__i, __m); }
    420 
    421     bool
    422     load(memory_order __m = memory_order_seq_cst) const volatile
    423     { return _M_base.load(__m); }
    424 
    425     bool
    426     exchange(bool __i, memory_order __m = memory_order_seq_cst) volatile
    427     { return _M_base.exchange(__i, __m); }
    428 
    429     bool
    430     compare_exchange_weak(bool& __i1, bool __i2, memory_order __m1,
    431 			  memory_order __m2) volatile
    432     { return _M_base.compare_exchange_weak(__i1, __i2, __m1, __m2); }
    433 
    434     bool
    435     compare_exchange_weak(bool& __i1, bool __i2,
    436 			  memory_order __m = memory_order_seq_cst) volatile
    437     { return _M_base.compare_exchange_weak(__i1, __i2, __m); }
    438 
    439     bool
    440     compare_exchange_strong(bool& __i1, bool __i2, memory_order __m1,
    441 			    memory_order __m2) volatile
    442     { return _M_base.compare_exchange_strong(__i1, __i2, __m1, __m2); }
    443 
    444 
    445     bool
    446     compare_exchange_strong(bool& __i1, bool __i2,
    447 			    memory_order __m = memory_order_seq_cst) volatile
    448     { return _M_base.compare_exchange_strong(__i1, __i2, __m); }
    449   };
    450 } // namespace __atomic2
    451 
    452 // _GLIBCXX_END_NAMESPACE
    453 
    454 #endif
    455