Home | History | Annotate | Download | only in ext
      1 // Allocators -*- C++ -*-
      2 
      3 // Copyright (C) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009
      4 // Free Software Foundation, Inc.
      5 //
      6 // This file is part of the GNU ISO C++ Library.  This library is free
      7 // software; you can redistribute it and/or modify it under the
      8 // terms of the GNU General Public License as published by the
      9 // Free Software Foundation; either version 3, or (at your option)
     10 // any later version.
     11 
     12 // This library is distributed in the hope that it will be useful,
     13 // but WITHOUT ANY WARRANTY; without even the implied warranty of
     14 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
     15 // GNU General Public License for more details.
     16 
     17 // Under Section 7 of GPL version 3, you are granted additional
     18 // permissions described in the GCC Runtime Library Exception, version
     19 // 3.1, as published by the Free Software Foundation.
     20 
     21 // You should have received a copy of the GNU General Public License and
     22 // a copy of the GCC Runtime Library Exception along with this program;
     23 // see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
     24 // <http://www.gnu.org/licenses/>.
     25 
     26 /*
     27  * Copyright (c) 1996-1997
     28  * Silicon Graphics Computer Systems, Inc.
     29  *
     30  * Permission to use, copy, modify, distribute and sell this software
     31  * and its documentation for any purpose is hereby granted without fee,
     32  * provided that the above copyright notice appear in all copies and
     33  * that both that copyright notice and this permission notice appear
     34  * in supporting documentation.  Silicon Graphics makes no
     35  * representations about the suitability of this software for any
     36  * purpose.  It is provided "as is" without express or implied warranty.
     37  */
     38 
     39 /** @file ext/pool_allocator.h
     40  *  This file is a GNU extension to the Standard C++ Library.
     41  */
     42 
     43 #ifndef _POOL_ALLOCATOR_H
     44 #define _POOL_ALLOCATOR_H 1
     45 
     46 #include <bits/c++config.h>
     47 #include <cstdlib>
     48 #include <new>
     49 #include <bits/functexcept.h>
     50 #include <ext/atomicity.h>
     51 #include <ext/concurrence.h>
     52 #include <bits/move.h>
     53 
     54 _GLIBCXX_BEGIN_NAMESPACE(__gnu_cxx)
     55 
     56   using std::size_t;
     57   using std::ptrdiff_t;
     58 
     59   /**
     60    *  @brief  Base class for __pool_alloc.
     61    *
     62    *  Uses various allocators to fulfill underlying requests (and makes as
     63    *  few requests as possible when in default high-speed pool mode).
     64    *
     65    *  Important implementation properties:
     66    *  0. If globally mandated, then allocate objects from new
     67    *  1. If the clients request an object of size > _S_max_bytes, the resulting
     68    *     object will be obtained directly from new
     69    *  2. In all other cases, we allocate an object of size exactly
     70    *     _S_round_up(requested_size).  Thus the client has enough size
     71    *     information that we can return the object to the proper free list
     72    *     without permanently losing part of the object.
     73    */
     74     class __pool_alloc_base
     75     {
     76     protected:
     77 
     78       enum { _S_align = 8 };
     79       enum { _S_max_bytes = 128 };
     80       enum { _S_free_list_size = (size_t)_S_max_bytes / (size_t)_S_align };
     81 
     82       union _Obj
     83       {
     84 	union _Obj* _M_free_list_link;
     85 	char        _M_client_data[1];    // The client sees this.
     86       };
     87 
     88       static _Obj* volatile         _S_free_list[_S_free_list_size];
     89 
     90       // Chunk allocation state.
     91       static char*                  _S_start_free;
     92       static char*                  _S_end_free;
     93       static size_t                 _S_heap_size;
     94 
     95       size_t
     96       _M_round_up(size_t __bytes)
     97       { return ((__bytes + (size_t)_S_align - 1) & ~((size_t)_S_align - 1)); }
     98 
     99       _Obj* volatile*
    100       _M_get_free_list(size_t __bytes);
    101 
    102       __mutex&
    103       _M_get_mutex();
    104 
    105       // Returns an object of size __n, and optionally adds to size __n
    106       // free list.
    107       void*
    108       _M_refill(size_t __n);
    109 
    110       // Allocates a chunk for nobjs of size size.  nobjs may be reduced
    111       // if it is inconvenient to allocate the requested number.
    112       char*
    113       _M_allocate_chunk(size_t __n, int& __nobjs);
    114     };
    115 
    116 
    117   /**
    118    * @brief  Allocator using a memory pool with a single lock.
    119    * @ingroup allocators
    120    */
    121   template<typename _Tp>
    122     class __pool_alloc : private __pool_alloc_base
    123     {
    124     private:
    125       static _Atomic_word	    _S_force_new;
    126 
    127     public:
    128       typedef size_t     size_type;
    129       typedef ptrdiff_t  difference_type;
    130       typedef _Tp*       pointer;
    131       typedef const _Tp* const_pointer;
    132       typedef _Tp&       reference;
    133       typedef const _Tp& const_reference;
    134       typedef _Tp        value_type;
    135 
    136       template<typename _Tp1>
    137         struct rebind
    138         { typedef __pool_alloc<_Tp1> other; };
    139 
    140       __pool_alloc() throw() { }
    141 
    142       __pool_alloc(const __pool_alloc&) throw() { }
    143 
    144       template<typename _Tp1>
    145         __pool_alloc(const __pool_alloc<_Tp1>&) throw() { }
    146 
    147       ~__pool_alloc() throw() { }
    148 
    149       pointer
    150       address(reference __x) const { return &__x; }
    151 
    152       const_pointer
    153       address(const_reference __x) const { return &__x; }
    154 
    155       size_type
    156       max_size() const throw()
    157       { return size_t(-1) / sizeof(_Tp); }
    158 
    159       // _GLIBCXX_RESOLVE_LIB_DEFECTS
    160       // 402. wrong new expression in [some_] allocator::construct
    161       void
    162       construct(pointer __p, const _Tp& __val)
    163       { ::new((void *)__p) _Tp(__val); }
    164 
    165 #ifdef __GXX_EXPERIMENTAL_CXX0X__
    166       template<typename... _Args>
    167         void
    168         construct(pointer __p, _Args&&... __args)
    169 	{ ::new((void *)__p) _Tp(std::forward<_Args>(__args)...); }
    170 #endif
    171 
    172       void
    173       destroy(pointer __p) { __p->~_Tp(); }
    174 
    175       pointer
    176       allocate(size_type __n, const void* = 0);
    177 
    178       void
    179       deallocate(pointer __p, size_type __n);
    180     };
    181 
    182   template<typename _Tp>
    183     inline bool
    184     operator==(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
    185     { return true; }
    186 
    187   template<typename _Tp>
    188     inline bool
    189     operator!=(const __pool_alloc<_Tp>&, const __pool_alloc<_Tp>&)
    190     { return false; }
    191 
    192   template<typename _Tp>
    193     _Atomic_word
    194     __pool_alloc<_Tp>::_S_force_new;
    195 
    196   template<typename _Tp>
    197     _Tp*
    198     __pool_alloc<_Tp>::allocate(size_type __n, const void*)
    199     {
    200       pointer __ret = 0;
    201       if (__builtin_expect(__n != 0, true))
    202 	{
    203 	  if (__builtin_expect(__n > this->max_size(), false))
    204 	    std::__throw_bad_alloc();
    205 
    206 	  // If there is a race through here, assume answer from getenv
    207 	  // will resolve in same direction.  Inspired by techniques
    208 	  // to efficiently support threading found in basic_string.h.
    209 	  if (_S_force_new == 0)
    210 	    {
    211 	      if (std::getenv("GLIBCXX_FORCE_NEW"))
    212 		__atomic_add_dispatch(&_S_force_new, 1);
    213 	      else
    214 		__atomic_add_dispatch(&_S_force_new, -1);
    215 	    }
    216 
    217 	  const size_t __bytes = __n * sizeof(_Tp);
    218 	  if (__bytes > size_t(_S_max_bytes) || _S_force_new > 0)
    219 	    __ret = static_cast<_Tp*>(::operator new(__bytes));
    220 	  else
    221 	    {
    222 	      _Obj* volatile* __free_list = _M_get_free_list(__bytes);
    223 
    224 	      __scoped_lock sentry(_M_get_mutex());
    225 	      _Obj* __restrict__ __result = *__free_list;
    226 	      if (__builtin_expect(__result == 0, 0))
    227 		__ret = static_cast<_Tp*>(_M_refill(_M_round_up(__bytes)));
    228 	      else
    229 		{
    230 		  *__free_list = __result->_M_free_list_link;
    231 		  __ret = reinterpret_cast<_Tp*>(__result);
    232 		}
    233 	      if (__builtin_expect(__ret == 0, 0))
    234 		std::__throw_bad_alloc();
    235 	    }
    236 	}
    237       return __ret;
    238     }
    239 
    240   template<typename _Tp>
    241     void
    242     __pool_alloc<_Tp>::deallocate(pointer __p, size_type __n)
    243     {
    244       if (__builtin_expect(__n != 0 && __p != 0, true))
    245 	{
    246 	  const size_t __bytes = __n * sizeof(_Tp);
    247 	  if (__bytes > static_cast<size_t>(_S_max_bytes) || _S_force_new > 0)
    248 	    ::operator delete(__p);
    249 	  else
    250 	    {
    251 	      _Obj* volatile* __free_list = _M_get_free_list(__bytes);
    252 	      _Obj* __q = reinterpret_cast<_Obj*>(__p);
    253 
    254 	      __scoped_lock sentry(_M_get_mutex());
    255 	      __q ->_M_free_list_link = *__free_list;
    256 	      *__free_list = __q;
    257 	    }
    258 	}
    259     }
    260 
    261 _GLIBCXX_END_NAMESPACE
    262 
    263 #endif
    264