Home | History | Annotate | Download | only in include
      1 // <mutex> -*- C++ -*-
      2 
      3 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010
      4 // Free Software Foundation, Inc.
      5 //
      6 // This file is part of the GNU ISO C++ Library.  This library is free
      7 // software; you can redistribute it and/or modify it under the
      8 // terms of the GNU General Public License as published by the
      9 // Free Software Foundation; either version 3, or (at your option)
     10 // any later version.
     11 
     12 // This library is distributed in the hope that it will be useful,
     13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
     14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15 // GNU General Public License for more details.
     16 
     17 // Under Section 7 of GPL version 3, you are granted additional
     18 // permissions described in the GCC Runtime Library Exception, version
     19 // 3.1, as published by the Free Software Foundation.
     20 
     21 // You should have received a copy of the GNU General Public License and
     22 // a copy of the GCC Runtime Library Exception along with this program;
     23 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     24 // <http://www.gnu.org/licenses/>.
     25 
     26 /** @file include/mutex
     27  *  This is a Standard C++ Library header.
     28  */
     29 
     30 #ifndef _GLIBCXX_MUTEX
     31 #define _GLIBCXX_MUTEX 1
     32 
     33 #pragma GCC system_header
     34 
     35 #ifndef __GXX_EXPERIMENTAL_CXX0X__
     36 # include <bits/c++0x_warning.h>
     37 #else
     38 
     39 #include <tuple>
     40 #include <chrono>
     41 #include <exception>
     42 #include <type_traits>
     43 #include <functional>
     44 #include <system_error>
     45 #include <bits/functexcept.h>
     46 #include <bits/gthr.h>
     47 #include <bits/move.h> // for std::swap
     48 
     49 #if defined(_GLIBCXX_HAS_GTHREADS) && defined(_GLIBCXX_USE_C99_STDINT_TR1)
     50 
     51 namespace std _GLIBCXX_VISIBILITY(default)
     52 {
     53 _GLIBCXX_BEGIN_NAMESPACE_VERSION
     54 
     55   /**
     56    * @defgroup mutexes Mutexes
     57    * @ingroup concurrency
     58    *
     59    * Classes for mutex support.
     60    * @{
     61    */
     62 
     63   /// mutex
     64   class mutex
     65   {
     66     typedef __gthread_mutex_t			__native_type;
     67     __native_type  _M_mutex;
     68 
     69   public:
     70     typedef __native_type* 			native_handle_type;
     71 
     72 #ifdef __GTHREAD_MUTEX_INIT
     73     constexpr mutex() : _M_mutex(__GTHREAD_MUTEX_INIT) { }
     74 #else
     75     mutex()
     76     {
     77       // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
     78       __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
     79     }
     80 
     81     ~mutex() { __gthread_mutex_destroy(&_M_mutex); }
     82 #endif
     83 
     84     mutex(const mutex&) = delete;
     85     mutex& operator=(const mutex&) = delete;
     86 
     87     void
     88     lock()
     89     {
     90       int __e = __gthread_mutex_lock(&_M_mutex);
     91 
     92       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
     93       if (__e)
     94 	__throw_system_error(__e);
     95     }
     96 
     97     bool
     98     try_lock()
     99     {
    100       // XXX EINVAL, EAGAIN, EBUSY
    101       return !__gthread_mutex_trylock(&_M_mutex);
    102     }
    103 
    104     void
    105     unlock()
    106     {
    107       // XXX EINVAL, EAGAIN, EPERM
    108       __gthread_mutex_unlock(&_M_mutex);
    109     }
    110 
    111     native_handle_type
    112     native_handle()
    113     { return &_M_mutex; }
    114   };
    115 
    116 #ifndef __GTHREAD_RECURSIVE_MUTEX_INIT
    117   // FIXME: gthreads doesn't define __gthread_recursive_mutex_destroy
    118   // so we need to obtain a __gthread_mutex_t to destroy
    119   class __destroy_recursive_mutex
    120   {
    121     template<typename _Mx, typename _Rm>
    122       static void
    123       _S_destroy_win32(_Mx* __mx, _Rm const* __rmx)
    124       {
    125         __mx->counter = __rmx->counter;
    126         __mx->sema = __rmx->sema;
    127         __gthread_mutex_destroy(__mx);
    128       }
    129 
    130   public:
    131     // matches a gthr-win32.h recursive mutex
    132     template<typename _Rm>
    133       static typename enable_if<sizeof(&_Rm::sema), void>::type
    134       _S_destroy(_Rm* __mx)
    135       {
    136         __gthread_mutex_t __tmp;
    137         _S_destroy_win32(&__tmp, __mx);
    138       }
    139 
    140     // matches a recursive mutex with a member 'actual'
    141     template<typename _Rm>
    142       static typename enable_if<sizeof(&_Rm::actual), void>::type
    143       _S_destroy(_Rm* __mx)
    144       { __gthread_mutex_destroy(&__mx->actual); }
    145 
    146     // matches when there's only one mutex type
    147     template<typename _Rm>
    148       static
    149       typename enable_if<is_same<_Rm, __gthread_mutex_t>::value, void>::type
    150       _S_destroy(_Rm* __mx)
    151       { __gthread_mutex_destroy(__mx); }
    152   };
    153 #endif
    154 
    155   /// recursive_mutex
    156   class recursive_mutex
    157   {
    158     typedef __gthread_recursive_mutex_t		__native_type;
    159     __native_type  _M_mutex;
    160 
    161   public:
    162     typedef __native_type* 			native_handle_type;
    163 
    164 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
    165     recursive_mutex() : _M_mutex(__GTHREAD_RECURSIVE_MUTEX_INIT) { }
    166 #else
    167     recursive_mutex()
    168     {
    169       // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
    170       __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
    171     }
    172 
    173     ~recursive_mutex()
    174     { __destroy_recursive_mutex::_S_destroy(&_M_mutex); }
    175 #endif
    176 
    177     recursive_mutex(const recursive_mutex&) = delete;
    178     recursive_mutex& operator=(const recursive_mutex&) = delete;
    179 
    180     void
    181     lock()
    182     {
    183       int __e = __gthread_recursive_mutex_lock(&_M_mutex);
    184 
    185       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
    186       if (__e)
    187 	__throw_system_error(__e);
    188     }
    189 
    190     bool
    191     try_lock()
    192     {
    193       // XXX EINVAL, EAGAIN, EBUSY
    194       return !__gthread_recursive_mutex_trylock(&_M_mutex);
    195     }
    196 
    197     void
    198     unlock()
    199     {
    200       // XXX EINVAL, EAGAIN, EBUSY
    201       __gthread_recursive_mutex_unlock(&_M_mutex);
    202     }
    203 
    204     native_handle_type
    205     native_handle()
    206     { return &_M_mutex; }
    207   };
    208 
    209   /// timed_mutex
    210   class timed_mutex
    211   {
    212     typedef __gthread_mutex_t 		  	__native_type;
    213 
    214 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
    215     typedef chrono::monotonic_clock 	  	__clock_t;
    216 #else
    217     typedef chrono::high_resolution_clock 	__clock_t;
    218 #endif
    219 
    220     __native_type  _M_mutex;
    221 
    222   public:
    223     typedef __native_type* 		  	native_handle_type;
    224 
    225 #ifdef __GTHREAD_MUTEX_INIT
    226     timed_mutex() : _M_mutex(__GTHREAD_MUTEX_INIT) { }
    227 #else
    228     timed_mutex()
    229     {
    230       __GTHREAD_MUTEX_INIT_FUNCTION(&_M_mutex);
    231     }
    232 
    233     ~timed_mutex() { __gthread_mutex_destroy(&_M_mutex); }
    234 #endif
    235 
    236     timed_mutex(const timed_mutex&) = delete;
    237     timed_mutex& operator=(const timed_mutex&) = delete;
    238 
    239     void
    240     lock()
    241     {
    242       int __e = __gthread_mutex_lock(&_M_mutex);
    243 
    244       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
    245       if (__e)
    246 	__throw_system_error(__e);
    247     }
    248 
    249     bool
    250     try_lock()
    251     {
    252       // XXX EINVAL, EAGAIN, EBUSY
    253       return !__gthread_mutex_trylock(&_M_mutex);
    254     }
    255 
    256     template <class _Rep, class _Period>
    257       bool
    258       try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
    259       { return __try_lock_for_impl(__rtime); }
    260 
    261     template <class _Clock, class _Duration>
    262       bool
    263       try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
    264       {
    265 	chrono::time_point<_Clock, chrono::seconds> __s =
    266 	  chrono::time_point_cast<chrono::seconds>(__atime);
    267 
    268 	chrono::nanoseconds __ns =
    269 	  chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
    270 
    271 	__gthread_time_t __ts = {
    272 	  static_cast<std::time_t>(__s.time_since_epoch().count()),
    273 	  static_cast<long>(__ns.count())
    274 	};
    275 
    276 	return !__gthread_mutex_timedlock(&_M_mutex, &__ts);
    277       }
    278 
    279     void
    280     unlock()
    281     {
    282       // XXX EINVAL, EAGAIN, EBUSY
    283       __gthread_mutex_unlock(&_M_mutex);
    284     }
    285 
    286     native_handle_type
    287     native_handle()
    288     { return &_M_mutex; }
    289 
    290   private:
    291     template<typename _Rep, typename _Period>
    292       typename enable_if<
    293 	ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
    294       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
    295       {
    296 	__clock_t::time_point __atime = __clock_t::now()
    297 	  + chrono::duration_cast<__clock_t::duration>(__rtime);
    298 
    299 	return try_lock_until(__atime);
    300       }
    301 
    302     template <typename _Rep, typename _Period>
    303       typename enable_if<
    304 	!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
    305       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
    306       {
    307 	__clock_t::time_point __atime = __clock_t::now()
    308 	  + ++chrono::duration_cast<__clock_t::duration>(__rtime);
    309 
    310 	return try_lock_until(__atime);
    311       }
    312   };
    313 
    314   /// recursive_timed_mutex
    315   class recursive_timed_mutex
    316   {
    317     typedef __gthread_recursive_mutex_t		__native_type;
    318 
    319 #ifdef _GLIBCXX_USE_CLOCK_MONOTONIC
    320     typedef chrono::monotonic_clock 		__clock_t;
    321 #else
    322     typedef chrono::high_resolution_clock 	__clock_t;
    323 #endif
    324 
    325     __native_type  _M_mutex;
    326 
    327   public:
    328     typedef __native_type* 			native_handle_type;
    329 
    330 #ifdef __GTHREAD_RECURSIVE_MUTEX_INIT
    331     recursive_timed_mutex() : _M_mutex(__GTHREAD_RECURSIVE_MUTEX_INIT) { }
    332 #else
    333     recursive_timed_mutex()
    334     {
    335       // XXX EAGAIN, ENOMEM, EPERM, EBUSY(may), EINVAL(may)
    336       __GTHREAD_RECURSIVE_MUTEX_INIT_FUNCTION(&_M_mutex);
    337     }
    338 
    339     ~recursive_timed_mutex()
    340     { __destroy_recursive_mutex::_S_destroy(&_M_mutex); }
    341 #endif
    342 
    343     recursive_timed_mutex(const recursive_timed_mutex&) = delete;
    344     recursive_timed_mutex& operator=(const recursive_timed_mutex&) = delete;
    345 
    346     void
    347     lock()
    348     {
    349       int __e = __gthread_recursive_mutex_lock(&_M_mutex);
    350 
    351       // EINVAL, EAGAIN, EBUSY, EINVAL, EDEADLK(may)
    352       if (__e)
    353 	__throw_system_error(__e);
    354     }
    355 
    356     bool
    357     try_lock()
    358     {
    359       // XXX EINVAL, EAGAIN, EBUSY
    360       return !__gthread_recursive_mutex_trylock(&_M_mutex);
    361     }
    362 
    363     template <class _Rep, class _Period>
    364       bool
    365       try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
    366       { return __try_lock_for_impl(__rtime); }
    367 
    368     template <class _Clock, class _Duration>
    369       bool
    370       try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
    371       {
    372 	chrono::time_point<_Clock, chrono::seconds>  __s =
    373 	  chrono::time_point_cast<chrono::seconds>(__atime);
    374 
    375 	chrono::nanoseconds __ns =
    376 	  chrono::duration_cast<chrono::nanoseconds>(__atime - __s);
    377 
    378 	__gthread_time_t __ts = {
    379 	  static_cast<std::time_t>(__s.time_since_epoch().count()),
    380 	  static_cast<long>(__ns.count())
    381 	};
    382 
    383 	return !__gthread_recursive_mutex_timedlock(&_M_mutex, &__ts);
    384       }
    385 
    386     void
    387     unlock()
    388     {
    389       // XXX EINVAL, EAGAIN, EBUSY
    390       __gthread_recursive_mutex_unlock(&_M_mutex);
    391     }
    392 
    393     native_handle_type
    394     native_handle()
    395     { return &_M_mutex; }
    396 
    397   private:
    398     template<typename _Rep, typename _Period>
    399       typename enable_if<
    400 	ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
    401       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
    402       {
    403 	__clock_t::time_point __atime = __clock_t::now()
    404 	  + chrono::duration_cast<__clock_t::duration>(__rtime);
    405 
    406 	return try_lock_until(__atime);
    407       }
    408 
    409     template <typename _Rep, typename _Period>
    410       typename enable_if<
    411 	!ratio_less_equal<__clock_t::period, _Period>::value, bool>::type
    412       __try_lock_for_impl(const chrono::duration<_Rep, _Period>& __rtime)
    413       {
    414 	__clock_t::time_point __atime = __clock_t::now()
    415 	  + ++chrono::duration_cast<__clock_t::duration>(__rtime);
    416 
    417 	return try_lock_until(__atime);
    418       }
    419   };
    420 
    421   /// Do not acquire ownership of the mutex.
    422   struct defer_lock_t { };
    423 
    424   /// Try to acquire ownership of the mutex without blocking.
    425   struct try_to_lock_t { };
    426 
    427   /// Assume the calling thread has already obtained mutex ownership
    428   /// and manage it.
    429   struct adopt_lock_t { };
    430 
    431   constexpr defer_lock_t	defer_lock { };
    432   constexpr try_to_lock_t	try_to_lock { };
    433   constexpr adopt_lock_t	adopt_lock { };
    434 
    435   /// @brief  Scoped lock idiom.
    436   // Acquire the mutex here with a constructor call, then release with
    437   // the destructor call in accordance with RAII style.
    438   template<typename _Mutex>
    439     class lock_guard
    440     {
    441     public:
    442       typedef _Mutex mutex_type;
    443 
    444       explicit lock_guard(mutex_type& __m) : _M_device(__m)
    445       { _M_device.lock(); }
    446 
    447       lock_guard(mutex_type& __m, adopt_lock_t) : _M_device(__m)
    448       { } // calling thread owns mutex
    449 
    450       ~lock_guard()
    451       { _M_device.unlock(); }
    452 
    453       lock_guard(const lock_guard&) = delete;
    454       lock_guard& operator=(const lock_guard&) = delete;
    455 
    456     private:
    457       mutex_type&  _M_device;
    458     };
    459 
    460   /// unique_lock
    461   template<typename _Mutex>
    462     class unique_lock
    463     {
    464     public:
    465       typedef _Mutex mutex_type;
    466 
    467       unique_lock()
    468       : _M_device(0), _M_owns(false)
    469       { }
    470 
    471       explicit unique_lock(mutex_type& __m)
    472       : _M_device(&__m), _M_owns(false)
    473       {
    474 	lock();
    475 	_M_owns = true;
    476       }
    477 
    478       unique_lock(mutex_type& __m, defer_lock_t)
    479       : _M_device(&__m), _M_owns(false)
    480       { }
    481 
    482       unique_lock(mutex_type& __m, try_to_lock_t)
    483       : _M_device(&__m), _M_owns(_M_device->try_lock())
    484       { }
    485 
    486       unique_lock(mutex_type& __m, adopt_lock_t)
    487       : _M_device(&__m), _M_owns(true)
    488       {
    489 	// XXX calling thread owns mutex
    490       }
    491 
    492       template<typename _Clock, typename _Duration>
    493 	unique_lock(mutex_type& __m,
    494 		    const chrono::time_point<_Clock, _Duration>& __atime)
    495 	: _M_device(&__m), _M_owns(_M_device->try_lock_until(__atime))
    496 	{ }
    497 
    498       template<typename _Rep, typename _Period>
    499 	unique_lock(mutex_type& __m,
    500 		    const chrono::duration<_Rep, _Period>& __rtime)
    501 	: _M_device(&__m), _M_owns(_M_device->try_lock_for(__rtime))
    502 	{ }
    503 
    504       ~unique_lock()
    505       {
    506 	if (_M_owns)
    507 	  unlock();
    508       }
    509 
    510       unique_lock(const unique_lock&) = delete;
    511       unique_lock& operator=(const unique_lock&) = delete;
    512 
    513       unique_lock(unique_lock&& __u)
    514       : _M_device(__u._M_device), _M_owns(__u._M_owns)
    515       {
    516 	__u._M_device = 0;
    517 	__u._M_owns = false;
    518       }
    519 
    520       unique_lock& operator=(unique_lock&& __u)
    521       {
    522 	if(_M_owns)
    523 	  unlock();
    524 
    525 	unique_lock(std::move(__u)).swap(*this);
    526 
    527 	__u._M_device = 0;
    528 	__u._M_owns = false;
    529 
    530 	return *this;
    531       }
    532 
    533       void
    534       lock()
    535       {
    536 	if (!_M_device)
    537 	  __throw_system_error(int(errc::operation_not_permitted));
    538 	else if (_M_owns)
    539 	  __throw_system_error(int(errc::resource_deadlock_would_occur));
    540 	else
    541 	  {
    542 	    _M_device->lock();
    543 	    _M_owns = true;
    544 	  }
    545       }
    546 
    547       bool
    548       try_lock()
    549       {
    550 	if (!_M_device)
    551 	  __throw_system_error(int(errc::operation_not_permitted));
    552 	else if (_M_owns)
    553 	  __throw_system_error(int(errc::resource_deadlock_would_occur));
    554 	else
    555 	  {
    556 	    _M_owns = _M_device->try_lock();
    557 	    return _M_owns;
    558 	  }
    559       }
    560 
    561       template<typename _Clock, typename _Duration>
    562 	bool
    563 	try_lock_until(const chrono::time_point<_Clock, _Duration>& __atime)
    564 	{
    565 	  if (!_M_device)
    566 	    __throw_system_error(int(errc::operation_not_permitted));
    567 	  else if (_M_owns)
    568 	    __throw_system_error(int(errc::resource_deadlock_would_occur));
    569 	  else
    570 	    {
    571 	      _M_owns = _M_device->try_lock_until(__atime);
    572 	      return _M_owns;
    573 	    }
    574 	}
    575 
    576       template<typename _Rep, typename _Period>
    577 	bool
    578 	try_lock_for(const chrono::duration<_Rep, _Period>& __rtime)
    579 	{
    580 	  if (!_M_device)
    581 	    __throw_system_error(int(errc::operation_not_permitted));
    582 	  else if (_M_owns)
    583 	    __throw_system_error(int(errc::resource_deadlock_would_occur));
    584 	  else
    585 	    {
    586 	      _M_owns = _M_device->try_lock_for(__rtime);
    587 	      return _M_owns;
    588 	    }
    589 	 }
    590 
    591       void
    592       unlock()
    593       {
    594 	if (!_M_owns)
    595 	  __throw_system_error(int(errc::operation_not_permitted));
    596 	else if (_M_device)
    597 	  {
    598 	    _M_device->unlock();
    599 	    _M_owns = false;
    600 	  }
    601       }
    602 
    603       void
    604       swap(unique_lock& __u)
    605       {
    606 	std::swap(_M_device, __u._M_device);
    607 	std::swap(_M_owns, __u._M_owns);
    608       }
    609 
    610       mutex_type*
    611       release()
    612       {
    613 	mutex_type* __ret = _M_device;
    614 	_M_device = 0;
    615 	_M_owns = false;
    616 	return __ret;
    617       }
    618 
    619       bool
    620       owns_lock() const
    621       { return _M_owns; }
    622 
    623       explicit operator bool() const
    624       { return owns_lock(); }
    625 
    626       mutex_type*
    627       mutex() const
    628       { return _M_device; }
    629 
    630     private:
    631       mutex_type*	_M_device;
    632       bool		_M_owns; // XXX use atomic_bool
    633     };
    634 
    635   template<typename _Mutex>
    636     inline void
    637     swap(unique_lock<_Mutex>& __x, unique_lock<_Mutex>& __y)
    638     { __x.swap(__y); }
    639 
    640   template<int _Idx>
    641     struct __unlock_impl
    642     {
    643       template<typename... _Lock>
    644 	static void
    645 	__do_unlock(tuple<_Lock&...>& __locks)
    646 	{
    647 	  std::get<_Idx>(__locks).unlock();
    648 	  __unlock_impl<_Idx - 1>::__do_unlock(__locks);
    649 	}
    650     };
    651 
    652   template<>
    653     struct __unlock_impl<-1>
    654     {
    655       template<typename... _Lock>
    656 	static void
    657 	__do_unlock(tuple<_Lock&...>&)
    658 	{ }
    659     };
    660 
    661   template<typename _Lock>
    662     unique_lock<_Lock>
    663     __try_to_lock(_Lock& __l)
    664     { return unique_lock<_Lock>(__l, try_to_lock); }
    665 
    666   template<int _Idx, bool _Continue = true>
    667     struct __try_lock_impl
    668     {
    669       template<typename... _Lock>
    670 	static void
    671 	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
    672 	{
    673           __idx = _Idx;
    674           auto __lock = __try_to_lock(std::get<_Idx>(__locks));
    675           if (__lock.owns_lock())
    676             {
    677               __try_lock_impl<_Idx + 1, _Idx + 2 < sizeof...(_Lock)>::
    678                 __do_try_lock(__locks, __idx);
    679               if (__idx == -1)
    680                 __lock.release();
    681             }
    682 	}
    683     };
    684 
    685   template<int _Idx>
    686     struct __try_lock_impl<_Idx, false>
    687     {
    688       template<typename... _Lock>
    689 	static void
    690 	__do_try_lock(tuple<_Lock&...>& __locks, int& __idx)
    691 	{
    692           __idx = _Idx;
    693           auto __lock = __try_to_lock(std::get<_Idx>(__locks));
    694           if (__lock.owns_lock())
    695             {
    696               __idx = -1;
    697               __lock.release();
    698             }
    699 	}
    700     };
    701 
    702   /** @brief Generic try_lock.
    703    *  @param __l1 Meets Mutex requirements (try_lock() may throw).
    704    *  @param __l2 Meets Mutex requirements (try_lock() may throw).
    705    *  @param __l3 Meets Mutex requirements (try_lock() may throw).
    706    *  @return Returns -1 if all try_lock() calls return true. Otherwise returns
    707    *          a 0-based index corresponding to the argument that returned false.
    708    *  @post Either all arguments are locked, or none will be.
    709    *
    710    *  Sequentially calls try_lock() on each argument.
    711    */
    712   template<typename _Lock1, typename _Lock2, typename... _Lock3>
    713     int
    714     try_lock(_Lock1& __l1, _Lock2& __l2, _Lock3&... __l3)
    715     {
    716       int __idx;
    717       auto __locks = std::tie(__l1, __l2, __l3...);
    718       __try
    719       { __try_lock_impl<0>::__do_try_lock(__locks, __idx); }
    720       __catch(...)
    721       { }
    722       return __idx;
    723     }
    724 
    725   /** @brief Generic lock.
    726    *  @param __l1 Meets Mutex requirements (try_lock() may throw).
    727    *  @param __l2 Meets Mutex requirements (try_lock() may throw).
    728    *  @param __l3 Meets Mutex requirements (try_lock() may throw).
    729    *  @throw An exception thrown by an argument's lock() or try_lock() member.
    730    *  @post All arguments are locked.
    731    *
    732    *  All arguments are locked via a sequence of calls to lock(), try_lock()
    733    *  and unlock().  If the call exits via an exception any locks that were
    734    *  obtained will be released.
    735    */
    736   template<typename _L1, typename _L2, typename ..._L3>
    737     void
    738     lock(_L1& __l1, _L2& __l2, _L3&... __l3)
    739     {
    740       while (true)
    741         {
    742           unique_lock<_L1> __first(__l1);
    743           int __idx;
    744           auto __locks = std::tie(__l2, __l3...);
    745           __try_lock_impl<0, sizeof...(_L3)>::__do_try_lock(__locks, __idx);
    746           if (__idx == -1)
    747             {
    748               __first.release();
    749               return;
    750             }
    751         }
    752     }
    753 
    754   /// once_flag
    755   struct once_flag
    756   {
    757   private:
    758     typedef __gthread_once_t __native_type;
    759     __native_type  _M_once;
    760 
    761   public:
    762     constexpr once_flag() : _M_once(__GTHREAD_ONCE_INIT) { }
    763 
    764     once_flag(const once_flag&) = delete;
    765     once_flag& operator=(const once_flag&) = delete;
    766 
    767     template<typename _Callable, typename... _Args>
    768       friend void
    769       call_once(once_flag& __once, _Callable&& __f, _Args&&... __args);
    770   };
    771 
    772 #ifdef _GLIBCXX_HAVE_TLS
    773   extern __thread void* __once_callable;
    774   extern __thread void (*__once_call)();
    775 
    776   template<typename _Callable>
    777     inline void
    778     __once_call_impl()
    779     {
    780       (*(_Callable*)__once_callable)();
    781     }
    782 #else
    783   extern function<void()> __once_functor;
    784 
    785   extern void
    786   __set_once_functor_lock_ptr(unique_lock<mutex>*);
    787 
    788   extern mutex&
    789   __get_once_mutex();
    790 #endif
    791 
    792   extern "C" void __once_proxy();
    793 
    794   /// call_once
    795   template<typename _Callable, typename... _Args>
    796     void
    797     call_once(once_flag& __once, _Callable&& __f, _Args&&... __args)
    798     {
    799 #ifdef _GLIBCXX_HAVE_TLS
    800       auto __bound_functor = std::bind<void>(std::forward<_Callable>(__f),
    801           std::forward<_Args>(__args)...);
    802       __once_callable = &__bound_functor;
    803       __once_call = &__once_call_impl<decltype(__bound_functor)>;
    804 #else
    805       unique_lock<mutex> __functor_lock(__get_once_mutex());
    806       __once_functor = std::bind<void>(std::forward<_Callable>(__f),
    807           std::forward<_Args>(__args)...);
    808       __set_once_functor_lock_ptr(&__functor_lock);
    809 #endif
    810 
    811       int __e = __gthread_once(&(__once._M_once), &__once_proxy);
    812 
    813 #ifndef _GLIBCXX_HAVE_TLS
    814       if (__functor_lock)
    815         __set_once_functor_lock_ptr(0);
    816 #endif
    817 
    818       if (__e)
    819 	__throw_system_error(__e);
    820     }
    821 
    822   // @} group mutexes
    823 _GLIBCXX_END_NAMESPACE_VERSION
    824 } // namespace
    825 
    826 #endif // _GLIBCXX_HAS_GTHREADS && _GLIBCXX_USE_C99_STDINT_TR1
    827 
    828 #endif // __GXX_EXPERIMENTAL_CXX0X__
    829 
    830 #endif // _GLIBCXX_MUTEX
    831