Home | History | Annotate | Download | only in ext
      1 // MT-optimized allocator -*- C++ -*-
      2 
      3 // Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011
      4 // Free Software Foundation, Inc.
      5 //
      6 // This file is part of the GNU ISO C++ Library.  This library is free
      7 // software; you can redistribute it and/or modify it under the
      8 // terms of the GNU General Public License as published by the
      9 // Free Software Foundation; either version 3, or (at your option)
     10 // any later version.
     11 
     12 // This library is distributed in the hope that it will be useful,
     13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
     14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15 // GNU General Public License for more details.
     16 
     17 // Under Section 7 of GPL version 3, you are granted additional
     18 // permissions described in the GCC Runtime Library Exception, version
     19 // 3.1, as published by the Free Software Foundation.
     20 
     21 // You should have received a copy of the GNU General Public License and
     22 // a copy of the GCC Runtime Library Exception along with this program;
     23 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     24 // <http://www.gnu.org/licenses/>.
     25 
     26 /** @file ext/mt_allocator.h
     27  *  This file is a GNU extension to the Standard C++ Library.
     28  */
     29 
     30 #ifndef _MT_ALLOCATOR_H
     31 #define _MT_ALLOCATOR_H 1
     32 
     33 #include <new>
     34 #include <cstdlib>
     35 #include <bits/functexcept.h>
     36 #include <ext/atomicity.h>
     37 #include <bits/move.h>
     38 
     39 namespace __gnu_cxx _GLIBCXX_VISIBILITY(default)
     40 {
     41 _GLIBCXX_BEGIN_NAMESPACE_VERSION
     42 
     43   using std::size_t;
     44   using std::ptrdiff_t;
     45 
     46   typedef void (*__destroy_handler)(void*);
     47 
     48   /// Base class for pool object.
     49   struct __pool_base
     50   {
     51     // Using short int as type for the binmap implies we are never
     52     // caching blocks larger than 32768 with this allocator.
     53     typedef unsigned short int _Binmap_type;
     54 
     55     // Variables used to configure the behavior of the allocator,
     56     // assigned and explained in detail below.
     57     struct _Tune
     58      {
     59       // Compile time constants for the default _Tune values.
     60       enum { _S_align = 8 };
     61       enum { _S_max_bytes = 128 };
     62       enum { _S_min_bin = 8 };
     63       enum { _S_chunk_size = 4096 - 4 * sizeof(void*) };
     64       enum { _S_max_threads = 4096 };
     65       enum { _S_freelist_headroom = 10 };
     66 
     67       // Alignment needed.
     68       // NB: In any case must be >= sizeof(_Block_record), that
     69       // is 4 on 32 bit machines and 8 on 64 bit machines.
     70       size_t	_M_align;
     71 
     72       // Allocation requests (after round-up to power of 2) below
     73       // this value will be handled by the allocator. A raw new/
     74       // call will be used for requests larger than this value.
     75       // NB: Must be much smaller than _M_chunk_size and in any
     76       // case <= 32768.
     77       size_t	_M_max_bytes;
     78 
     79       // Size in bytes of the smallest bin.
     80       // NB: Must be a power of 2 and >= _M_align (and of course
     81       // much smaller than _M_max_bytes).
     82       size_t	_M_min_bin;
     83 
     84       // In order to avoid fragmenting and minimize the number of
     85       // new() calls we always request new memory using this
     86       // value. Based on previous discussions on the libstdc++
     87       // mailing list we have chosen the value below.
     88       // See http://gcc.gnu.org/ml/libstdc++/2001-07/msg00077.html
     89       // NB: At least one order of magnitude > _M_max_bytes.
     90       size_t	_M_chunk_size;
     91 
     92       // The maximum number of supported threads. For
     93       // single-threaded operation, use one. Maximum values will
     94       // vary depending on details of the underlying system. (For
     95       // instance, Linux 2.4.18 reports 4070 in
     96       // /proc/sys/kernel/threads-max, while Linux 2.6.6 reports
     97       // 65534)
     98       size_t 	_M_max_threads;
     99 
    100       // Each time a deallocation occurs in a threaded application
    101       // we make sure that there are no more than
    102       // _M_freelist_headroom % of used memory on the freelist. If
    103       // the number of additional records is more than
    104       // _M_freelist_headroom % of the freelist, we move these
    105       // records back to the global pool.
    106       size_t 	_M_freelist_headroom;
    107 
    108       // Set to true forces all allocations to use new().
    109       bool 	_M_force_new;
    110 
    111       explicit
    112       _Tune()
    113       : _M_align(_S_align), _M_max_bytes(_S_max_bytes), _M_min_bin(_S_min_bin),
    114       _M_chunk_size(_S_chunk_size), _M_max_threads(_S_max_threads),
    115       _M_freelist_headroom(_S_freelist_headroom),
    116       _M_force_new(std::getenv("GLIBCXX_FORCE_NEW") ? true : false)
    117       { }
    118 
    119       explicit
    120       _Tune(size_t __align, size_t __maxb, size_t __minbin, size_t __chunk,
    121 	    size_t __maxthreads, size_t __headroom, bool __force)
    122       : _M_align(__align), _M_max_bytes(__maxb), _M_min_bin(__minbin),
    123       _M_chunk_size(__chunk), _M_max_threads(__maxthreads),
    124       _M_freelist_headroom(__headroom), _M_force_new(__force)
    125       { }
    126     };
    127 
    128     struct _Block_address
    129     {
    130       void* 			_M_initial;
    131       _Block_address* 		_M_next;
    132     };
    133 
    134     const _Tune&
    135     _M_get_options() const
    136     { return _M_options; }
    137 
    138     void
    139     _M_set_options(_Tune __t)
    140     {
    141       if (!_M_init)
    142 	_M_options = __t;
    143     }
    144 
    145     bool
    146     _M_check_threshold(size_t __bytes)
    147     { return __bytes > _M_options._M_max_bytes || _M_options._M_force_new; }
    148 
    149     size_t
    150     _M_get_binmap(size_t __bytes)
    151     { return _M_binmap[__bytes]; }
    152 
    153     size_t
    154     _M_get_align()
    155     { return _M_options._M_align; }
    156 
    157     explicit
    158     __pool_base()
    159     : _M_options(_Tune()), _M_binmap(0), _M_init(false) { }
    160 
    161     explicit
    162     __pool_base(const _Tune& __options)
    163     : _M_options(__options), _M_binmap(0), _M_init(false) { }
    164 
    165   private:
    166     explicit
    167     __pool_base(const __pool_base&);
    168 
    169     __pool_base&
    170     operator=(const __pool_base&);
    171 
    172   protected:
    173     // Configuration options.
    174     _Tune 	       		_M_options;
    175 
    176     _Binmap_type* 		_M_binmap;
    177 
    178     // Configuration of the pool object via _M_options can happen
    179     // after construction but before initialization. After
    180     // initialization is complete, this variable is set to true.
    181     bool 			_M_init;
    182   };
    183 
    184 
    185   /**
    186    *  @brief  Data describing the underlying memory pool, parameterized on
    187    *  threading support.
    188    */
    189   template<bool _Thread>
    190     class __pool;
    191 
    192   /// Specialization for single thread.
    193   template<>
    194     class __pool<false> : public __pool_base
    195     {
    196     public:
    197       union _Block_record
    198       {
    199 	// Points to the block_record of the next free block.
    200 	_Block_record* 			_M_next;
    201       };
    202 
    203       struct _Bin_record
    204       {
    205 	// An "array" of pointers to the first free block.
    206 	_Block_record**			_M_first;
    207 
    208 	// A list of the initial addresses of all allocated blocks.
    209 	_Block_address*		     	_M_address;
    210       };
    211 
    212       void
    213       _M_initialize_once()
    214       {
    215 	if (__builtin_expect(_M_init == false, false))
    216 	  _M_initialize();
    217       }
    218 
    219       void
    220       _M_destroy() throw();
    221 
    222       char*
    223       _M_reserve_block(size_t __bytes, const size_t __thread_id);
    224 
    225       void
    226       _M_reclaim_block(char* __p, size_t __bytes) throw ();
    227 
    228       size_t
    229       _M_get_thread_id() { return 0; }
    230 
    231       const _Bin_record&
    232       _M_get_bin(size_t __which)
    233       { return _M_bin[__which]; }
    234 
    235       void
    236       _M_adjust_freelist(const _Bin_record&, _Block_record*, size_t)
    237       { }
    238 
    239       explicit __pool()
    240       : _M_bin(0), _M_bin_size(1) { }
    241 
    242       explicit __pool(const __pool_base::_Tune& __tune)
    243       : __pool_base(__tune), _M_bin(0), _M_bin_size(1) { }
    244 
    245     private:
    246       // An "array" of bin_records each of which represents a specific
    247       // power of 2 size. Memory to this "array" is allocated in
    248       // _M_initialize().
    249       _Bin_record*		 _M_bin;
    250 
    251       // Actual value calculated in _M_initialize().
    252       size_t 	       	     	_M_bin_size;
    253 
    254       void
    255       _M_initialize();
    256   };
    257 
    258 #ifdef __GTHREADS
    259   /// Specialization for thread enabled, via gthreads.h.
    260   template<>
    261     class __pool<true> : public __pool_base
    262     {
    263     public:
    264       // Each requesting thread is assigned an id ranging from 1 to
    265       // _S_max_threads. Thread id 0 is used as a global memory pool.
    266       // In order to get constant performance on the thread assignment
    267       // routine, we keep a list of free ids. When a thread first
    268       // requests memory we remove the first record in this list and
    269       // stores the address in a __gthread_key. When initializing the
    270       // __gthread_key we specify a destructor. When this destructor
    271       // (i.e. the thread dies) is called, we return the thread id to
    272       // the front of this list.
    273       struct _Thread_record
    274       {
    275 	// Points to next free thread id record. NULL if last record in list.
    276 	_Thread_record*			_M_next;
    277 
    278 	// Thread id ranging from 1 to _S_max_threads.
    279 	size_t                          _M_id;
    280       };
    281 
    282       union _Block_record
    283       {
    284 	// Points to the block_record of the next free block.
    285 	_Block_record*			_M_next;
    286 
    287 	// The thread id of the thread which has requested this block.
    288 	size_t                          _M_thread_id;
    289       };
    290 
    291       struct _Bin_record
    292       {
    293 	// An "array" of pointers to the first free block for each
    294 	// thread id. Memory to this "array" is allocated in
    295 	// _S_initialize() for _S_max_threads + global pool 0.
    296 	_Block_record**			_M_first;
    297 
    298 	// A list of the initial addresses of all allocated blocks.
    299 	_Block_address*		     	_M_address;
    300 
    301 	// An "array" of counters used to keep track of the amount of
    302 	// blocks that are on the freelist/used for each thread id.
    303 	// - Note that the second part of the allocated _M_used "array"
    304 	//   actually hosts (atomic) counters of reclaimed blocks:  in
    305 	//   _M_reserve_block and in _M_reclaim_block those numbers are
    306 	//   subtracted from the first ones to obtain the actual size
    307 	//   of the "working set" of the given thread.
    308 	// - Memory to these "arrays" is allocated in _S_initialize()
    309 	//   for _S_max_threads + global pool 0.
    310 	size_t*				_M_free;
    311 	size_t*			        _M_used;
    312 
    313 	// Each bin has its own mutex which is used to ensure data
    314 	// integrity while changing "ownership" on a block.  The mutex
    315 	// is initialized in _S_initialize().
    316 	__gthread_mutex_t*              _M_mutex;
    317       };
    318 
    319       // XXX GLIBCXX_ABI Deprecated
    320       void
    321       _M_initialize(__destroy_handler);
    322 
    323       void
    324       _M_initialize_once()
    325       {
    326 	if (__builtin_expect(_M_init == false, false))
    327 	  _M_initialize();
    328       }
    329 
    330       void
    331       _M_destroy() throw();
    332 
    333       char*
    334       _M_reserve_block(size_t __bytes, const size_t __thread_id);
    335 
    336       void
    337       _M_reclaim_block(char* __p, size_t __bytes) throw ();
    338 
    339       const _Bin_record&
    340       _M_get_bin(size_t __which)
    341       { return _M_bin[__which]; }
    342 
    343       void
    344       _M_adjust_freelist(const _Bin_record& __bin, _Block_record* __block,
    345 			 size_t __thread_id)
    346       {
    347 	if (__gthread_active_p())
    348 	  {
    349 	    __block->_M_thread_id = __thread_id;
    350 	    --__bin._M_free[__thread_id];
    351 	    ++__bin._M_used[__thread_id];
    352 	  }
    353       }
    354 
    355       // XXX GLIBCXX_ABI Deprecated
    356       _GLIBCXX_CONST void
    357       _M_destroy_thread_key(void*) throw ();
    358 
    359       size_t
    360       _M_get_thread_id();
    361 
    362       explicit __pool()
    363       : _M_bin(0), _M_bin_size(1), _M_thread_freelist(0)
    364       { }
    365 
    366       explicit __pool(const __pool_base::_Tune& __tune)
    367       : __pool_base(__tune), _M_bin(0), _M_bin_size(1),
    368 	_M_thread_freelist(0)
    369       { }
    370 
    371     private:
    372       // An "array" of bin_records each of which represents a specific
    373       // power of 2 size. Memory to this "array" is allocated in
    374       // _M_initialize().
    375       _Bin_record*		_M_bin;
    376 
    377       // Actual value calculated in _M_initialize().
    378       size_t 	       	     	_M_bin_size;
    379 
    380       _Thread_record* 		_M_thread_freelist;
    381       void*			_M_thread_freelist_initial;
    382 
    383       void
    384       _M_initialize();
    385     };
    386 #endif
    387 
    388   template<template <bool> class _PoolTp, bool _Thread>
    389     struct __common_pool
    390     {
    391       typedef _PoolTp<_Thread> 		pool_type;
    392 
    393       static pool_type&
    394       _S_get_pool()
    395       {
    396 	static pool_type _S_pool;
    397 	return _S_pool;
    398       }
    399     };
    400 
    401   template<template <bool> class _PoolTp, bool _Thread>
    402     struct __common_pool_base;
    403 
    404   template<template <bool> class _PoolTp>
    405     struct __common_pool_base<_PoolTp, false>
    406     : public __common_pool<_PoolTp, false>
    407     {
    408       using  __common_pool<_PoolTp, false>::_S_get_pool;
    409 
    410       static void
    411       _S_initialize_once()
    412       {
    413 	static bool __init;
    414 	if (__builtin_expect(__init == false, false))
    415 	  {
    416 	    _S_get_pool()._M_initialize_once();
    417 	    __init = true;
    418 	  }
    419       }
    420     };
    421 
    422 #ifdef __GTHREADS
    423   template<template <bool> class _PoolTp>
    424     struct __common_pool_base<_PoolTp, true>
    425     : public __common_pool<_PoolTp, true>
    426     {
    427       using  __common_pool<_PoolTp, true>::_S_get_pool;
    428 
    429       static void
    430       _S_initialize()
    431       { _S_get_pool()._M_initialize_once(); }
    432 
    433       static void
    434       _S_initialize_once()
    435       {
    436 	static bool __init;
    437 	if (__builtin_expect(__init == false, false))
    438 	  {
    439 	    if (__gthread_active_p())
    440 	      {
    441 		// On some platforms, __gthread_once_t is an aggregate.
    442 		static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
    443 		__gthread_once(&__once, _S_initialize);
    444 	      }
    445 
    446 	    // Double check initialization. May be necessary on some
    447 	    // systems for proper construction when not compiling with
    448 	    // thread flags.
    449 	    _S_get_pool()._M_initialize_once();
    450 	    __init = true;
    451 	  }
    452       }
    453     };
    454 #endif
    455 
    456   /// Policy for shared __pool objects.
    457   template<template <bool> class _PoolTp, bool _Thread>
    458     struct __common_pool_policy : public __common_pool_base<_PoolTp, _Thread>
    459     {
    460       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
    461 	       bool _Thread1 = _Thread>
    462         struct _M_rebind
    463         { typedef __common_pool_policy<_PoolTp1, _Thread1> other; };
    464 
    465       using  __common_pool_base<_PoolTp, _Thread>::_S_get_pool;
    466       using  __common_pool_base<_PoolTp, _Thread>::_S_initialize_once;
    467   };
    468 
    469 
    470   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
    471     struct __per_type_pool
    472     {
    473       typedef _Tp 			value_type;
    474       typedef _PoolTp<_Thread> 		pool_type;
    475 
    476       static pool_type&
    477       _S_get_pool()
    478       {
    479 	// Sane defaults for the _PoolTp.
    480 	typedef typename pool_type::_Block_record _Block_record;
    481 	const static size_t __a = (__alignof__(_Tp) >= sizeof(_Block_record)
    482 				   ? __alignof__(_Tp) : sizeof(_Block_record));
    483 
    484 	typedef typename __pool_base::_Tune _Tune;
    485 	static _Tune _S_tune(__a, sizeof(_Tp) * 64,
    486 			     sizeof(_Tp) * 2 >= __a ? sizeof(_Tp) * 2 : __a,
    487 			     sizeof(_Tp) * size_t(_Tune::_S_chunk_size),
    488 			     _Tune::_S_max_threads,
    489 			     _Tune::_S_freelist_headroom,
    490 			     std::getenv("GLIBCXX_FORCE_NEW") ? true : false);
    491 	static pool_type _S_pool(_S_tune);
    492 	return _S_pool;
    493       }
    494     };
    495 
    496   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
    497     struct __per_type_pool_base;
    498 
    499   template<typename _Tp, template <bool> class _PoolTp>
    500     struct __per_type_pool_base<_Tp, _PoolTp, false>
    501     : public __per_type_pool<_Tp, _PoolTp, false>
    502     {
    503       using  __per_type_pool<_Tp, _PoolTp, false>::_S_get_pool;
    504 
    505       static void
    506       _S_initialize_once()
    507       {
    508 	static bool __init;
    509 	if (__builtin_expect(__init == false, false))
    510 	  {
    511 	    _S_get_pool()._M_initialize_once();
    512 	    __init = true;
    513 	  }
    514       }
    515     };
    516 
    517  #ifdef __GTHREADS
    518  template<typename _Tp, template <bool> class _PoolTp>
    519     struct __per_type_pool_base<_Tp, _PoolTp, true>
    520     : public __per_type_pool<_Tp, _PoolTp, true>
    521     {
    522       using  __per_type_pool<_Tp, _PoolTp, true>::_S_get_pool;
    523 
    524       static void
    525       _S_initialize()
    526       { _S_get_pool()._M_initialize_once(); }
    527 
    528       static void
    529       _S_initialize_once()
    530       {
    531 	static bool __init;
    532 	if (__builtin_expect(__init == false, false))
    533 	  {
    534 	    if (__gthread_active_p())
    535 	      {
    536 		// On some platforms, __gthread_once_t is an aggregate.
    537 		static __gthread_once_t __once = __GTHREAD_ONCE_INIT;
    538 		__gthread_once(&__once, _S_initialize);
    539 	      }
    540 
    541 	    // Double check initialization. May be necessary on some
    542 	    // systems for proper construction when not compiling with
    543 	    // thread flags.
    544 	    _S_get_pool()._M_initialize_once();
    545 	    __init = true;
    546 	  }
    547       }
    548     };
    549 #endif
    550 
    551   /// Policy for individual __pool objects.
    552   template<typename _Tp, template <bool> class _PoolTp, bool _Thread>
    553     struct __per_type_pool_policy
    554     : public __per_type_pool_base<_Tp, _PoolTp, _Thread>
    555     {
    556       template<typename _Tp1, template <bool> class _PoolTp1 = _PoolTp,
    557 	       bool _Thread1 = _Thread>
    558         struct _M_rebind
    559         { typedef __per_type_pool_policy<_Tp1, _PoolTp1, _Thread1> other; };
    560 
    561       using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_get_pool;
    562       using  __per_type_pool_base<_Tp, _PoolTp, _Thread>::_S_initialize_once;
    563   };
    564 
    565 
    566   /// Base class for _Tp dependent member functions.
    567   template<typename _Tp>
    568     class __mt_alloc_base
    569     {
    570     public:
    571       typedef size_t                    size_type;
    572       typedef ptrdiff_t                 difference_type;
    573       typedef _Tp*                      pointer;
    574       typedef const _Tp*                const_pointer;
    575       typedef _Tp&                      reference;
    576       typedef const _Tp&                const_reference;
    577       typedef _Tp                       value_type;
    578 
    579       pointer
    580       address(reference __x) const _GLIBCXX_NOEXCEPT
    581       { return std::__addressof(__x); }
    582 
    583       const_pointer
    584       address(const_reference __x) const _GLIBCXX_NOEXCEPT
    585       { return std::__addressof(__x); }
    586 
    587       size_type
    588       max_size() const _GLIBCXX_USE_NOEXCEPT
    589       { return size_t(-1) / sizeof(_Tp); }
    590 
    591 #ifdef __GXX_EXPERIMENTAL_CXX0X__
    592       template<typename _Up, typename... _Args>
    593         void
    594         construct(_Up* __p, _Args&&... __args)
    595 	{ ::new((void *)__p) _Up(std::forward<_Args>(__args)...); }
    596 
    597       template<typename _Up>
    598         void
    599         destroy(_Up* __p) { __p->~_Up(); }
    600 #else
    601       // _GLIBCXX_RESOLVE_LIB_DEFECTS
    602       // 402. wrong new expression in [some_] allocator::construct
    603       void
    604       construct(pointer __p, const _Tp& __val)
    605       { ::new((void *)__p) _Tp(__val); }
    606 
    607       void
    608       destroy(pointer __p) { __p->~_Tp(); }
    609 #endif
    610     };
    611 
    612 #ifdef __GTHREADS
    613 #define __thread_default true
    614 #else
    615 #define __thread_default false
    616 #endif
    617 
    618   /**
    619    *  @brief  This is a fixed size (power of 2) allocator which - when
    620    *  compiled with thread support - will maintain one freelist per
    621    *  size per thread plus a @a global one. Steps are taken to limit
    622    *  the per thread freelist sizes (by returning excess back to
    623    *  the @a global list).
    624    *  @ingroup allocators
    625    *
    626    *  Further details:
    627    *  http://gcc.gnu.org/onlinedocs/libstdc++/manual/bk01pt12ch32.html
    628    */
    629   template<typename _Tp,
    630 	   typename _Poolp = __common_pool_policy<__pool, __thread_default> >
    631     class __mt_alloc : public __mt_alloc_base<_Tp>
    632     {
    633     public:
    634       typedef size_t                    	size_type;
    635       typedef ptrdiff_t                 	difference_type;
    636       typedef _Tp*                      	pointer;
    637       typedef const _Tp*                	const_pointer;
    638       typedef _Tp&                      	reference;
    639       typedef const _Tp&                	const_reference;
    640       typedef _Tp                       	value_type;
    641       typedef _Poolp      			__policy_type;
    642       typedef typename _Poolp::pool_type	__pool_type;
    643 
    644       template<typename _Tp1, typename _Poolp1 = _Poolp>
    645         struct rebind
    646         {
    647 	  typedef typename _Poolp1::template _M_rebind<_Tp1>::other pol_type;
    648 	  typedef __mt_alloc<_Tp1, pol_type> other;
    649 	};
    650 
    651       __mt_alloc() _GLIBCXX_USE_NOEXCEPT { }
    652 
    653       __mt_alloc(const __mt_alloc&) _GLIBCXX_USE_NOEXCEPT { }
    654 
    655       template<typename _Tp1, typename _Poolp1>
    656         __mt_alloc(const __mt_alloc<_Tp1, _Poolp1>&) _GLIBCXX_USE_NOEXCEPT { }
    657 
    658       ~__mt_alloc() _GLIBCXX_USE_NOEXCEPT { }
    659 
    660       pointer
    661       allocate(size_type __n, const void* = 0);
    662 
    663       void
    664       deallocate(pointer __p, size_type __n);
    665 
    666       const __pool_base::_Tune
    667       _M_get_options()
    668       {
    669 	// Return a copy, not a reference, for external consumption.
    670 	return __policy_type::_S_get_pool()._M_get_options();
    671       }
    672 
    673       void
    674       _M_set_options(__pool_base::_Tune __t)
    675       { __policy_type::_S_get_pool()._M_set_options(__t); }
    676     };
    677 
    678   template<typename _Tp, typename _Poolp>
    679     typename __mt_alloc<_Tp, _Poolp>::pointer
    680     __mt_alloc<_Tp, _Poolp>::
    681     allocate(size_type __n, const void*)
    682     {
    683       if (__n > this->max_size())
    684 	std::__throw_bad_alloc();
    685 
    686       __policy_type::_S_initialize_once();
    687 
    688       // Requests larger than _M_max_bytes are handled by operator
    689       // new/delete directly.
    690       __pool_type& __pool = __policy_type::_S_get_pool();
    691       const size_t __bytes = __n * sizeof(_Tp);
    692       if (__pool._M_check_threshold(__bytes))
    693 	{
    694 	  void* __ret = ::operator new(__bytes);
    695 	  return static_cast<_Tp*>(__ret);
    696 	}
    697 
    698       // Round up to power of 2 and figure out which bin to use.
    699       const size_t __which = __pool._M_get_binmap(__bytes);
    700       const size_t __thread_id = __pool._M_get_thread_id();
    701 
    702       // Find out if we have blocks on our freelist.  If so, go ahead
    703       // and use them directly without having to lock anything.
    704       char* __c;
    705       typedef typename __pool_type::_Bin_record _Bin_record;
    706       const _Bin_record& __bin = __pool._M_get_bin(__which);
    707       if (__bin._M_first[__thread_id])
    708 	{
    709 	  // Already reserved.
    710 	  typedef typename __pool_type::_Block_record _Block_record;
    711 	  _Block_record* __block = __bin._M_first[__thread_id];
    712 	  __bin._M_first[__thread_id] = __block->_M_next;
    713 
    714 	  __pool._M_adjust_freelist(__bin, __block, __thread_id);
    715 	  __c = reinterpret_cast<char*>(__block) + __pool._M_get_align();
    716 	}
    717       else
    718 	{
    719 	  // Null, reserve.
    720 	  __c = __pool._M_reserve_block(__bytes, __thread_id);
    721 	}
    722       return static_cast<_Tp*>(static_cast<void*>(__c));
    723     }
    724 
    725   template<typename _Tp, typename _Poolp>
    726     void
    727     __mt_alloc<_Tp, _Poolp>::
    728     deallocate(pointer __p, size_type __n)
    729     {
    730       if (__builtin_expect(__p != 0, true))
    731 	{
    732 	  // Requests larger than _M_max_bytes are handled by
    733 	  // operators new/delete directly.
    734 	  __pool_type& __pool = __policy_type::_S_get_pool();
    735 	  const size_t __bytes = __n * sizeof(_Tp);
    736 	  if (__pool._M_check_threshold(__bytes))
    737 	    ::operator delete(__p);
    738 	  else
    739 	    __pool._M_reclaim_block(reinterpret_cast<char*>(__p), __bytes);
    740 	}
    741     }
    742 
    743   template<typename _Tp, typename _Poolp>
    744     inline bool
    745     operator==(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
    746     { return true; }
    747 
    748   template<typename _Tp, typename _Poolp>
    749     inline bool
    750     operator!=(const __mt_alloc<_Tp, _Poolp>&, const __mt_alloc<_Tp, _Poolp>&)
    751     { return false; }
    752 
    753 #undef __thread_default
    754 
    755 _GLIBCXX_END_NAMESPACE_VERSION
    756 } // namespace
    757 
    758 #endif
    759