Home | History | Annotate | Download | only in include
      1 // <mutex> -*- C++ -*-
      2 
      3 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
      4 // Free Software Foundation, Inc.
      5 //
      6 // This file is part of the GNU ISO C++ Library.  This library is free
      7 // software; you can redistribute it and/or modify it under the
      8 // terms of the GNU General Public License as published by the
      9 // Free Software Foundation; either version 3, or (at your option)
     10 // any later version.
     11 
     12 // This library is distributed in the hope that it will be useful,
     13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
     14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15 // GNU General Public License for more details.
     16 
     17 // Under Section 7 of GPL version 3, you are granted additional
     18 // permissions described in the GCC Runtime Library Exception, version
     19 // 3.1, as published by the Free Software Foundation.
     20 
     21 // You should have received a copy of the GNU General Public License and
     22 // a copy of the GCC Runtime Library Exception along with this program;
     23 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     24 // <http://www.gnu.org/licenses/>.
     25 
     26 /** @file include/mutex
     27  *  This is a Standard C++ Library header.
     28  */
     29 
     30 #ifndef _GLIBCXX_MUTEX
     31 #define _GLIBCXX_MUTEX 1
     32 
     33 #pragma GCC system_header
     34 
     35 #ifndef __GXX_EXPERIMENTAL_CXX0X__
     36 # include <bits/c++0x_warning.h>
     37 #else
     38 
     39 #include <tuple>
     40 #include <chrono>
     41 #include <exception>
     42 #include <type_traits>
     43 #include <functional>
     44 #include <system_error>
     45 #include <bits/functexcept.h>
     46 #include <bits/gthr.h>
     47 #include <bits/move.h> // for std::swap
     48 
     49 #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
     50 
     51 namespace std _GLIBCXX_VISIBILITY(default)
     52 {
     53 _GLIBCXX_BEGIN_NAMESPACE_VERSION
     54 
     55   // Common base class for std::mutex and std::timed_mutex
     56   class __mutex_base
     57   {
     58   protected:
     59     typedef __gthread_mutex_t			__native_type;
     60 
     61 #ifdef __GTHREAD_MUTEX_INIT
     62     __native_type  _M_mutex = __GTHREAD_MUTEX_INIT;
     63 
     64     constexpr __mutex_base() noexcept = default;
     65 #else
     66     __native_type  _M_mutex;
     67 
     68     __mutex_base() noexcept
     69     {
     70       // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
     71       __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
     72     }
     73 
     74     ~__mutex_base() noexcept { __gthread_mutex_destroy(&_M_mutex); }
     75 #endif
     76 
     77     __mutex_base(const __mutex_base&) = delete;
     78     __mutex_base& operator=(const __mutex_base&) = delete;
     79   };
     80 
     81   // Common base class for std::recursive_mutex and std::timed_recursive_mutex
     82   class __recursive_mutex_base
     83   {
     84   protected:
     85     typedef __gthread_recursive_mutex_t		__native_type;
     86 
     87     __recursive_mutex_base(const __recursive_mutex_base&) = delete;
     88     __recursive_mutex_base& operator=(const __recursive_mutex_base&) = delete;
     89 
     90 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
     91     __native_type  _M_mutex = __GTHREAD_RECURSIVE_MUTEX_INIT;
     92 
     93     __recursive_mutex_base() = default;
     94 #else
     95     __native_type  _M_mutex;
     96 
     97     __recursive_mutex_base()
     98     {
     99       // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
    100       __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
    101     }
    102 
    103     ~__recursive_mutex_base()
    104     { _S_destroy(&_M_mutex); }
    105 
    106   private:
    107     // FIXME: gthreads doesn't define __gthread_recursive_mutex_destroy
    108     // so we need to obtain a __gthread_mutex_t to destroy
    109 
    110     // matches when there's only one mutex type
    111     template<typename _Rm>
    112       static
    113       typename enable_if<is_same<_Rm, __gthread_mutex_t>::value, void>::type
    114       _S_destroy(_Rm* __mx)
    115       { __gthread_mutex_destroy(__mx); }
    116 
    117     // matches a recursive mutex with a member 'actual'
    118     template<typename _Rm>
    119       static typename enable_if<(bool)sizeof(&_Rm::actual), void>::type
    120       _S_destroy(_Rm* __mx)
    121       { __gthread_mutex_destroy(&__mx->actual); }
    122 
    123     // matches a gthr-win32.h recursive mutex
    124     template<typename _Rm>
    125       static typename enable_if<(bool)sizeof(&_Rm::sema), void>::type
    126       _S_destroy(_Rm* __mx)
    127       {
    128         __gthread_mutex_t __tmp;
    129         _S_destroy_win32(&__tmp, __mx);
    130       }
    131 
    132     template<typename _Mx, typename _Rm>
    133       static void
    134       _S_destroy_win32(_Mx* __mx, _Rm const* __rmx)
    135       {
    136         __mx->counter = __rmx->counter;
    137         __mx->sema = __rmx->sema;
    138         __gthread_mutex_destroy(__mx);
    139       }
    140 #endif
    141   };
    142 
    143   /**
    144    * @defgroup mutexes Mutexes
    145    * @ingroup concurrency
    146    *
    147    * Classes for mutex support.
    148    * @{
    149    */
    150 
    151   /// mutex
    152   class mutex : private __mutex_base
    153   {
    154   public:
    155     typedef __native_type* 			native_handle_type;
    156 
    157 #ifdef __GTHREAD_MUTEX_INIT
    158     constexpr
    159 #endif
    160     mutex() noexcept = default;
    161     ~mutex() = default;
    162 
    163     mutex(const mutex&) = delete;
    164     mutex& operator=(const mutex&) = delete;
    165 
    166     void
    167     lock()
    168     {
    169       int __e = __gthread_mutex_lock(&_M_mutex);
    170 
    171       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
    172       if (__e)
    173 	__throw_system_error(__e);
    174     }
    175 
    176     bool
    177     try_lock() noexcept
    178     {
    179       // XXX EINVAL, EAGAIN, EBUSY
    180       return !__gthread_mutex_trylock(&_M_mutex);
    181     }
    182 
    183     void
    184     unlock()
    185     {
    186       // XXX EINVAL, EAGAIN, EPERM
    187       __gthread_mutex_unlock(&_M_mutex);
    188     }
    189 
    190     native_handle_type
    191     native_handle()
    192     { return &_M_mutex; }
    193   };
    194 
    195   /// recursive_mutex
    196   class recursive_mutex : private __recursive_mutex_base
    197   {
    198   public:
    199     typedef __native_type* 			native_handle_type;
    200 
    201     recursive_mutex() = default;
    202     ~recursive_mutex() = default;
    203 
    204     recursive_mutex(const recursive_mutex&) = delete;
    205     recursive_mutex& operator=(const recursive_mutex&) = delete;
    206 
    207     void
    208     lock()
    209     {
    210       int __e = __gthread_recursive_mutex_lock(&_M_mutex);
    211 
    212       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
    213       if (__e)
    214 	__throw_system_error(__e);
    215     }
    216 
    217     bool
    218     try_lock() noexcept
    219     {
    220       // XXX EINVAL, EAGAIN, EBUSY
    221       return !__gthread_recursive_mutex_trylock(&_M_mutex);
    222     }
    223 
    224     void
    225     unlock()
    226     {
    227       // XXX EINVAL, EAGAIN, EBUSY
    228       __gthread_recursive_mutex_unlock(&_M_mutex);
    229     }
    230 
    231     native_handle_type
    232     native_handle()
    233     { return &_M_mutex; }
    234   };
    235 
    236 #if _GTHREAD_USE_MUTEX_TIMEDLOCK
    237   /// timed_mutex
    238   class timed_mutex : private __mutex_base
    239   {
    240 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
    241     typedef chrono::steady_clock 	  	__clock_t;
    242 #else
    243     typedef chrono::high_resolution_clock 	__clock_t;
    244 #endif
    245 
    246   public:
    247     typedef __native_type* 		  	native_handle_type;
    248 
    249     timed_mutex() = default;
    250     ~timed_mutex() = default;
    251 
    252     timed_mutex(const timed_mutex&) = delete;
    253     timed_mutex& operator=(const timed_mutex&) = delete;
    254 
    255     void
    256     lock()
    257     {
    258       int __e = __gthread_mutex_lock(&_M_mutex);
    259 
    260       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
    261       if (__e)
    262 	__throw_system_error(__e);
    263     }
    264 
    265     bool
    266     try_lock() noexcept
    267     {
    268       // XXX EINVAL, EAGAIN, EBUSY
    269       return !__gthread_mutex_trylock(&_M_mutex);
    270     }
    271 
    272     template <class _Rep, class _Period>
    273       bool
    274       try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
    275       { return __try_lock_for_impl(__rtime); }
    276 
    277     template <class _Clock, class _Duration>
    278       bool
    279       try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
    280       {
    281 	chrono::time_point<_Clock, chrono::seconds> __s =
    282 	  chrono::time_point_cast<chrono::seconds>(__atime);
    283 
    284 	chrono::nanoseconds __ns =
    285 	  chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
    286 
    287 	__gthread_time_t __ts = {
    288 	  static_cast<std::time_t>(__s.time_since_epoch().count()),
    289 	  static_cast<long>(__ns.count())
    290 	};
    291 
    292 	return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
    293       }
    294 
    295     void
    296     unlock()
    297     {
    298       // XXX EINVAL, EAGAIN, EBUSY
    299       __gthread_mutex_unlock(&_M_mutex);
    300     }
    301 
    302     native_handle_type
    303     native_handle()
    304     { return &_M_mutex; }
    305 
    306   private:
    307     template<typename _Rep, typename _Period>
    308       typename enable_if<
    309 	ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
    310       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
    311       {
    312 	__clock_t::time_point __atime = __clock_t::now()
    313 	  + chrono::duration_cast<__clock_t::duration>(__rtime);
    314 
    315 	return try_lock_until(__atime);
    316       }
    317 
    318     template <typename _Rep, typename _Period>
    319       typename enable_if<
    320 	!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
    321       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
    322       {
    323 	__clock_t::time_point __atime = __clock_t::now()
    324 	  + ++chrono::duration_cast<__clock_t::duration>(__rtime);
    325 
    326 	return try_lock_until(__atime);
    327       }
    328   };
    329 
    330   /// recursive_timed_mutex
    331   class recursive_timed_mutex : private __recursive_mutex_base
    332   {
    333 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
    334     typedef chrono::steady_clock 		__clock_t;
    335 #else
    336     typedef chrono::high_resolution_clock 	__clock_t;
    337 #endif
    338 
    339   public:
    340     typedef __native_type* 			native_handle_type;
    341 
    342     recursive_timed_mutex() = default;
    343     ~recursive_timed_mutex() = default;
    344 
    345     recursive_timed_mutex(const recursive_timed_mutex&) = delete;
    346     recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
    347 
    348     void
    349     lock()
    350     {
    351       int __e = __gthread_recursive_mutex_lock(&_M_mutex);
    352 
    353       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
    354       if (__e)
    355 	__throw_system_error(__e);
    356     }
    357 
    358     bool
    359     try_lock() noexcept
    360     {
    361       // XXX EINVAL, EAGAIN, EBUSY
    362       return !__gthread_recursive_mutex_trylock(&_M_mutex);
    363     }
    364 
    365     template <class _Rep, class _Period>
    366       bool
    367       try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
    368       { return __try_lock_for_impl(__rtime); }
    369 
    370     template <class _Clock, class _Duration>
    371       bool
    372       try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
    373       {
    374 	chrono::time_point<_Clock, chrono::seconds>  __s =
    375 	  chrono::time_point_cast<chrono::seconds>(__atime);
    376 
    377 	chrono::nanoseconds __ns =
    378 	  chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
    379 
    380 	__gthread_time_t __ts = {
    381 	  static_cast<std::time_t>(__s.time_since_epoch().count()),
    382 	  static_cast<long>(__ns.count())
    383 	};
    384 
    385 	return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
    386       }
    387 
    388     void
    389     unlock()
    390     {
    391       // XXX EINVAL, EAGAIN, EBUSY
    392       __gthread_recursive_mutex_unlock(&_M_mutex);
    393     }
    394 
    395     native_handle_type
    396     native_handle()
    397     { return &_M_mutex; }
    398 
    399   private:
    400     template<typename _Rep, typename _Period>
    401       typename enable_if<
    402 	ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
    403       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
    404       {
    405 	__clock_t::time_point __atime = __clock_t::now()
    406 	  + chrono::duration_cast<__clock_t::duration>(__rtime);
    407 
    408 	return try_lock_until(__atime);
    409       }
    410 
    411     template <typename _Rep, typename _Period>
    412       typename enable_if<
    413 	!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
    414       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
    415       {
    416 	__clock_t::time_point __atime = __clock_t::now()
    417 	  + ++chrono::duration_cast<__clock_t::duration>(__rtime);
    418 
    419 	return try_lock_until(__atime);
    420       }
    421   };
    422 #endif
    423 
    424   /// Do not acquire ownership of the mutex.
    425   struct defer_lock_t { };
    426 
    427   /// Try to acquire ownership of the mutex without blocking.
    428   struct try_to_lock_t { };
    429 
    430   /// Assume the calling thread has already obtained mutex ownership
    431   /// and manage it.
    432   struct adopt_lock_t { };
    433 
    434   constexpr defer_lock_t	defer_lock { };
    435   constexpr try_to_lock_t	try_to_lock { };
    436   constexpr adopt_lock_t	adopt_lock { };
    437 
    438   /// @brief  Scoped lock idiom.
    439   // Acquire the mutex here with a constructor call, then release with
    440   // the destructor call in accordance with RAII style.
    441   template<typename _Mutex>
    442     class lock_guard
    443     {
    444     public:
    445       typedef _Mutex mutex_type;
    446 
    447       explicit lock_guard(mutex_type& __m) : _M_device(__m)
    448       { _M_device.lock(); }
    449 
    450       lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
    451       { } // calling thread owns mutex
    452 
    453       ~lock_guard()
    454       { _M_device.unlock(); }
    455 
    456       lock_guard(const lock_guard&) = delete;
    457       lock_guard& operator=(const lock_guard&) = delete;
    458 
    459     private:
    460       mutex_type&  _M_device;
    461     };
    462 
    463   /// unique_lock
    464   template<typename _Mutex>
    465     class unique_lock
    466     {
    467     public:
    468       typedef _Mutex mutex_type;
    469 
    470       unique_lock() noexcept
    471       : _M_device(0), _M_owns(false)
    472       { }
    473 
    474       explicit unique_lock(mutex_type& __m)
    475       : _M_device(&__m), _M_owns(false)
    476       {
    477 	lock();
    478 	_M_owns = true;
    479       }
    480 
    481       unique_lock(mutex_type& __m, defer_lock_t) noexcept
    482       : _M_device(&__m), _M_owns(false)
    483       { }
    484 
    485       unique_lock(mutex_type& __m, try_to_lock_t)
    486       : _M_device(&__m), _M_owns(_M_device->try_lock())
    487       { }
    488 
    489       unique_lock(mutex_type& __m, adopt_lock_t)
    490       : _M_device(&__m), _M_owns(true)
    491       {
    492 	// XXX calling thread owns mutex
    493       }
    494 
    495       template<typename _Clock, typename _Duration>
    496 	unique_lock(mutex_type& __m,
    497 		    const chrono::time_point<_Clock, _Duration>& __atime)
    498 	: _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
    499 	{ }
    500 
    501       template<typename _Rep, typename _Period>
    502 	unique_lock(mutex_type& __m,
    503 		    const chrono::duration<_Rep, _Period>& __rtime)
    504 	: _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
    505 	{ }
    506 
    507       ~unique_lock()
    508       {
    509 	if (_M_owns)
    510 	  unlock();
    511       }
    512 
    513       unique_lock(const unique_lock&) = delete;
    514       unique_lock& operator=(const unique_lock&) = delete;
    515 
    516       unique_lock(unique_lock&& __u) noexcept
    517       : _M_device(__u._M_device), _M_owns(__u._M_owns)
    518       {
    519 	__u._M_device = 0;
    520 	__u._M_owns = false;
    521       }
    522 
    523       unique_lock& operator=(unique_lock&& __u) noexcept
    524       {
    525 	if(_M_owns)
    526 	  unlock();
    527 
    528 	unique_lock(std::move(__u)).swap(*this);
    529 
    530 	__u._M_device = 0;
    531 	__u._M_owns = false;
    532 
    533 	return *this;
    534       }
    535 
    536       void
    537       lock()
    538       {
    539 	if (!_M_device)
    540 	  __throw_system_error(int(errc::operation_not_permitted));
    541 	else if (_M_owns)
    542 	  __throw_system_error(int(errc::resource_deadlock_would_occur));
    543 	else
    544 	  {
    545 	    _M_device->lock();
    546 	    _M_owns = true;
    547 	  }
    548       }
    549 
    550       bool
    551       try_lock()
    552       {
    553 	if (!_M_device)
    554 	  __throw_system_error(int(errc::operation_not_permitted));
    555 	else if (_M_owns)
    556 	  __throw_system_error(int(errc::resource_deadlock_would_occur));
    557 	else
    558 	  {
    559 	    _M_owns = _M_device->try_lock();
    560 	    return _M_owns;
    561 	  }
    562       }
    563 
    564       template<typename _Clock, typename _Duration>
    565 	bool
    566 	try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
    567 	{
    568 	  if (!_M_device)
    569 	    __throw_system_error(int(errc::operation_not_permitted));
    570 	  else if (_M_owns)
    571 	    __throw_system_error(int(errc::resource_deadlock_would_occur));
    572 	  else
    573 	    {
    574 	      _M_owns = _M_device->try_lock_until(__atime);
    575 	      return _M_owns;
    576 	    }
    577 	}
    578 
    579       template<typename _Rep, typename _Period>
    580 	bool
    581 	try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
    582 	{
    583 	  if (!_M_device)
    584 	    __throw_system_error(int(errc::operation_not_permitted));
    585 	  else if (_M_owns)
    586 	    __throw_system_error(int(errc::resource_deadlock_would_occur));
    587 	  else
    588 	    {
    589 	      _M_owns = _M_device->try_lock_for(__rtime);
    590 	      return _M_owns;
    591 	    }
    592 	 }
    593 
    594       void
    595       unlock()
    596       {
    597 	if (!_M_owns)
    598 	  __throw_system_error(int(errc::operation_not_permitted));
    599 	else if (_M_device)
    600 	  {
    601 	    _M_device->unlock();
    602 	    _M_owns = false;
    603 	  }
    604       }
    605 
    606       void
    607       swap(unique_lock& __u) noexcept
    608       {
    609 	std::swap(_M_device, __u._M_device);
    610 	std::swap(_M_owns, __u._M_owns);
    611       }
    612 
    613       mutex_type*
    614       release() noexcept
    615       {
    616 	mutex_type* __ret = _M_device;
    617 	_M_device = 0;
    618 	_M_owns = false;
    619 	return __ret;
    620       }
    621 
    622       bool
    623       owns_lock() const noexcept
    624       { return _M_owns; }
    625 
    626       explicit operator bool() const noexcept
    627       { return owns_lock(); }
    628 
    629       mutex_type*
    630       mutex() const noexcept
    631       { return _M_device; }
    632 
    633     private:
    634       mutex_type*	_M_device;
    635       bool		_M_owns; // XXX use atomic_bool
    636     };
    637 
    638   /// Partial specialization for unique_lock objects.
    639   template<typename _Mutex>
    640     inline void
    641     swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y) noexcept
    642     { __x.swap(__y); }
    643 
    644   template<int _Idx>
    645     struct __unlock_impl
    646     {
    647       template<typename... _Lock>
    648 	static void
    649 	__do_unlock(tuple<_Lock&...>& __locks)
    650 	{
    651 	  std::get<_Idx>(__locks).unlock();
    652 	  __unlock_impl<_Idx - 1>::__do_unlock(__locks);
    653 	}
    654     };
    655 
    656   template<>
    657     struct __unlock_impl<-1>
    658     {
    659       template<typename... _Lock>
    660 	static void
    661 	__do_unlock(tuple<_Lock&...>&)
    662 	{ }
    663     };
    664 
    665   template<typename _Lock>
    666     unique_lock<_Lock>
    667     __try_to_lock(_Lock& __l)
    668     { return unique_lock<_Lock>(__l, try_to_lock); }
    669 
    670   template<int _Idx, bool _Continue = true>
    671     struct __try_lock_impl
    672     {
    673       template<typename... _Lock>
    674 	static void
    675 	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
    676 	{
    677           __idx = _Idx;
    678           auto __lock = __try_to_lock(std::get<_Idx>(__locks));
    679           if (__lock.owns_lock())
    680             {
    681               __try_lock_impl<_Idx + 1, _Idx + 2 < sizeof...(_Lock)>::
    682                 __do_try_lock(__locks, __idx);
    683               if (__idx == -1)
    684                 __lock.release();
    685             }
    686 	}
    687     };
    688 
    689   template<int _Idx>
    690     struct __try_lock_impl<_Idx, false>
    691     {
    692       template<typename... _Lock>
    693 	static void
    694 	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
    695 	{
    696           __idx = _Idx;
    697           auto __lock = __try_to_lock(std::get<_Idx>(__locks));
    698           if (__lock.owns_lock())
    699             {
    700               __idx = -1;
    701               __lock.release();
    702             }
    703 	}
    704     };
    705 
    706   /** @brief Generic try_lock.
    707    *  @param __l1 Meets Mutex requirements (try_lock() may throw).
    708    *  @param __l2 Meets Mutex requirements (try_lock() may throw).
    709    *  @param __l3 Meets Mutex requirements (try_lock() may throw).
    710    *  @return Returns -1 if all try_lock() calls return true. Otherwise returns
    711    *          a 0-based index corresponding to the argument that returned false.
    712    *  @post Either all arguments are locked, or none will be.
    713    *
    714    *  Sequentially calls try_lock() on each argument.
    715    */
    716   template<typename _Lock1, typename _Lock2, typename... _Lock3>
    717     int
    718     try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
    719     {
    720       int __idx;
    721       auto __locks = std::tie(__l1, __l2, __l3...);
    722       __try
    723       { __try_lock_impl<0>::__do_try_lock(__locks, __idx); }
    724       __catch(...)
    725       { }
    726       return __idx;
    727     }
    728 
    729   /** @brief Generic lock.
    730    *  @param __l1 Meets Mutex requirements (try_lock() may throw).
    731    *  @param __l2 Meets Mutex requirements (try_lock() may throw).
    732    *  @param __l3 Meets Mutex requirements (try_lock() may throw).
    733    *  @throw An exception thrown by an argument's lock() or try_lock() member.
    734    *  @post All arguments are locked.
    735    *
    736    *  All arguments are locked via a sequence of calls to lock(), try_lock()
    737    *  and unlock().  If the call exits via an exception any locks that were
    738    *  obtained will be released.
    739    */
    740   template<typename _L1, typename _L2, typename ..._L3>
    741     void
    742     lock(_L1& __l1, _L2& __l2, _L3&... __l3)
    743     {
    744       while (true)
    745         {
    746           unique_lock<_L1> __first(__l1);
    747           int __idx;
    748           auto __locks = std::tie(__l2, __l3...);
    749           __try_lock_impl<0, sizeof...(_L3)>::__do_try_lock(__locks, __idx);
    750           if (__idx == -1)
    751             {
    752               __first.release();
    753               return;
    754             }
    755         }
    756     }
    757 
    758   /// once_flag
    759   struct once_flag
    760   {
    761   private:
    762     typedef __gthread_once_t __native_type;
    763     __native_type  _M_once = __GTHREAD_ONCE_INIT;
    764 
    765   public:
    766     /// Constructor
    767     constexpr once_flag() noexcept = default;
    768 
    769     /// Deleted copy constructor
    770     once_flag(const once_flag&) = delete;
    771     /// Deleted assignment operator
    772     once_flag& operator=(const once_flag&) = delete;
    773 
    774     template<typename _Callable, typename... _Args>
    775       friend void
    776       call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
    777   };
    778 
    779 #ifdef _GLIBCXX_HAVE_TLS
    780   extern __thread void* __once_callable;
    781   extern __thread void (*__once_call)();
    782 
    783   template<typename _Callable>
    784     inline void
    785     __once_call_impl()
    786     {
    787       (*(_Callable*)__once_callable)();
    788     }
    789 #else
    790   extern function<void()> __once_functor;
    791 
    792   extern void
    793   __set_once_functor_lock_ptr(unique_lock<mutex>*);
    794 
    795   extern mutex&
    796   __get_once_mutex();
    797 #endif
    798 
    799   extern "C" void __once_proxy(void);
    800 
    801   /// call_once
    802   template<typename _Callable, typename... _Args>
    803     void
    804     call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
    805     {
    806 #ifdef _GLIBCXX_HAVE_TLS
    807       auto __bound_functor = std::__bind_simple(std::forward<_Callable>(__f),
    808           std::forward<_Args>(__args)...);
    809       __once_callable = &__bound_functor;
    810       __once_call = &__once_call_impl<decltype(__bound_functor)>;
    811 #else
    812       unique_lock<mutex> __functor_lock(__get_once_mutex());
    813       auto __callable = std::__bind_simple(std::forward<_Callable>(__f),
    814           std::forward<_Args>(__args)...);
    815       __once_functor = [&]() { __callable(); };
    816       __set_once_functor_lock_ptr(&__functor_lock);
    817 #endif
    818 
    819       int __e = __gthread_once(&(__once._M_once), &__once_proxy);
    820 
    821 #ifndef _GLIBCXX_HAVE_TLS
    822       if (__functor_lock)
    823         __set_once_functor_lock_ptr(0);
    824 #endif
    825 
    826       if (__e)
    827 	__throw_system_error(__e);
    828     }
    829 
    830   // @} group mutexes
    831 _GLIBCXX_END_NAMESPACE_VERSION
    832 } // namespace
    833 
    834 #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
    835 
    836 #endif // __GXX_EXPERIMENTAL_CXX0X__
    837 
    838 #endif // _GLIBCXX_MUTEX
    839