Home | History | Annotate | Download | only in internal
      1 #ifndef JEMALLOC_INTERNAL_H
      2 #define	JEMALLOC_INTERNAL_H
      3 
      4 #include "jemalloc_internal_defs.h"
      5 #include "jemalloc/internal/jemalloc_internal_decls.h"
      6 
      7 #ifdef JEMALLOC_UTRACE
      8 #include <sys/ktrace.h>
      9 #endif
     10 
     11 #define	JEMALLOC_NO_DEMANGLE
     12 #ifdef JEMALLOC_JET
     13 #  define JEMALLOC_N(n) jet_##n
     14 #  include "jemalloc/internal/public_namespace.h"
     15 #  define JEMALLOC_NO_RENAME
     16 #  include "../jemalloc.h"
     17 #  undef JEMALLOC_NO_RENAME
     18 #else
     19 #  define JEMALLOC_N(n) je_##n
     20 #  include "../jemalloc.h"
     21 #endif
     22 #include "jemalloc/internal/private_namespace.h"
     23 
     24 static const bool config_debug =
     25 #ifdef JEMALLOC_DEBUG
     26     true
     27 #else
     28     false
     29 #endif
     30     ;
     31 static const bool have_dss =
     32 #ifdef JEMALLOC_DSS
     33     true
     34 #else
     35     false
     36 #endif
     37     ;
     38 static const bool config_fill =
     39 #ifdef JEMALLOC_FILL
     40     true
     41 #else
     42     false
     43 #endif
     44     ;
     45 static const bool config_lazy_lock =
     46 #ifdef JEMALLOC_LAZY_LOCK
     47     true
     48 #else
     49     false
     50 #endif
     51     ;
     52 static const char * const config_malloc_conf = JEMALLOC_CONFIG_MALLOC_CONF;
     53 static const bool config_prof =
     54 #ifdef JEMALLOC_PROF
     55     true
     56 #else
     57     false
     58 #endif
     59     ;
     60 static const bool config_prof_libgcc =
     61 #ifdef JEMALLOC_PROF_LIBGCC
     62     true
     63 #else
     64     false
     65 #endif
     66     ;
     67 static const bool config_prof_libunwind =
     68 #ifdef JEMALLOC_PROF_LIBUNWIND
     69     true
     70 #else
     71     false
     72 #endif
     73     ;
     74 static const bool maps_coalesce =
     75 #ifdef JEMALLOC_MAPS_COALESCE
     76     true
     77 #else
     78     false
     79 #endif
     80     ;
     81 static const bool config_munmap =
     82 #ifdef JEMALLOC_MUNMAP
     83     true
     84 #else
     85     false
     86 #endif
     87     ;
     88 static const bool config_stats =
     89 #ifdef JEMALLOC_STATS
     90     true
     91 #else
     92     false
     93 #endif
     94     ;
     95 static const bool config_tcache =
     96 #ifdef JEMALLOC_TCACHE
     97     true
     98 #else
     99     false
    100 #endif
    101     ;
    102 static const bool config_tls =
    103 #ifdef JEMALLOC_TLS
    104     true
    105 #else
    106     false
    107 #endif
    108     ;
    109 static const bool config_utrace =
    110 #ifdef JEMALLOC_UTRACE
    111     true
    112 #else
    113     false
    114 #endif
    115     ;
    116 static const bool config_valgrind =
    117 #ifdef JEMALLOC_VALGRIND
    118     true
    119 #else
    120     false
    121 #endif
    122     ;
    123 static const bool config_xmalloc =
    124 #ifdef JEMALLOC_XMALLOC
    125     true
    126 #else
    127     false
    128 #endif
    129     ;
    130 static const bool config_ivsalloc =
    131 #ifdef JEMALLOC_IVSALLOC
    132     true
    133 #else
    134     false
    135 #endif
    136     ;
    137 static const bool config_cache_oblivious =
    138 #ifdef JEMALLOC_CACHE_OBLIVIOUS
    139     true
    140 #else
    141     false
    142 #endif
    143     ;
    144 
    145 #ifdef JEMALLOC_C11ATOMICS
    146 #include <stdatomic.h>
    147 #endif
    148 
    149 #ifdef JEMALLOC_ATOMIC9
    150 #include <machine/atomic.h>
    151 #endif
    152 
    153 #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
    154 #include <libkern/OSAtomic.h>
    155 #endif
    156 
    157 #ifdef JEMALLOC_ZONE
    158 #include <mach/mach_error.h>
    159 #include <mach/mach_init.h>
    160 #include <mach/vm_map.h>
    161 #include <malloc/malloc.h>
    162 #endif
    163 
    164 #define	RB_COMPACT
    165 #include "jemalloc/internal/rb.h"
    166 #include "jemalloc/internal/qr.h"
    167 #include "jemalloc/internal/ql.h"
    168 
    169 /*
    170  * jemalloc can conceptually be broken into components (arena, tcache, etc.),
    171  * but there are circular dependencies that cannot be broken without
    172  * substantial performance degradation.  In order to reduce the effect on
    173  * visual code flow, read the header files in multiple passes, with one of the
    174  * following cpp variables defined during each pass:
    175  *
    176  *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
    177  *                        types.
    178  *   JEMALLOC_H_STRUCTS : Data structures.
    179  *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
    180  *   JEMALLOC_H_INLINES : Inline functions.
    181  */
    182 /******************************************************************************/
    183 #define	JEMALLOC_H_TYPES
    184 
    185 #include "jemalloc/internal/jemalloc_internal_macros.h"
    186 
    187 /* Size class index type. */
    188 typedef unsigned szind_t;
    189 
    190 /*
    191  * Flags bits:
    192  *
    193  * a: arena
    194  * t: tcache
    195  * 0: unused
    196  * z: zero
    197  * n: alignment
    198  *
    199  * aaaaaaaa aaaatttt tttttttt 0znnnnnn
    200  */
    201 #define	MALLOCX_ARENA_MASK	((int)~0xfffff)
    202 #define	MALLOCX_ARENA_MAX	0xffe
    203 #define	MALLOCX_TCACHE_MASK	((int)~0xfff000ffU)
    204 #define	MALLOCX_TCACHE_MAX	0xffd
    205 #define	MALLOCX_LG_ALIGN_MASK	((int)0x3f)
    206 /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
    207 #define	MALLOCX_ALIGN_GET_SPECIFIED(flags)				\
    208     (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
    209 #define	MALLOCX_ALIGN_GET(flags)					\
    210     (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
    211 #define	MALLOCX_ZERO_GET(flags)						\
    212     ((bool)(flags & MALLOCX_ZERO))
    213 
    214 #define	MALLOCX_TCACHE_GET(flags)					\
    215     (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
    216 #define	MALLOCX_ARENA_GET(flags)					\
    217     (((unsigned)(((unsigned)flags) >> 20)) - 1)
    218 
    219 /* Smallest size class to support. */
    220 #define	TINY_MIN		(1U << LG_TINY_MIN)
    221 
    222 /*
    223  * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
    224  * classes).
    225  */
    226 #ifndef LG_QUANTUM
    227 #  if (defined(__i386__) || defined(_M_IX86))
    228 #    define LG_QUANTUM		4
    229 #  endif
    230 #  ifdef __ia64__
    231 #    define LG_QUANTUM		4
    232 #  endif
    233 #  ifdef __alpha__
    234 #    define LG_QUANTUM		4
    235 #  endif
    236 #  if (defined(__sparc64__) || defined(__sparcv9))
    237 #    define LG_QUANTUM		4
    238 #  endif
    239 #  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
    240 #    define LG_QUANTUM		4
    241 #  endif
    242 #  ifdef __arm__
    243 #    define LG_QUANTUM		3
    244 #  endif
    245 #  ifdef __aarch64__
    246 #    define LG_QUANTUM		4
    247 #  endif
    248 #  ifdef __hppa__
    249 #    define LG_QUANTUM		4
    250 #  endif
    251 #  ifdef __mips__
    252 #    define LG_QUANTUM		3
    253 #  endif
    254 #  ifdef __or1k__
    255 #    define LG_QUANTUM		3
    256 #  endif
    257 #  ifdef __powerpc__
    258 #    define LG_QUANTUM		4
    259 #  endif
    260 #  ifdef __s390__
    261 #    define LG_QUANTUM		4
    262 #  endif
    263 #  ifdef __SH4__
    264 #    define LG_QUANTUM		4
    265 #  endif
    266 #  ifdef __tile__
    267 #    define LG_QUANTUM		4
    268 #  endif
    269 #  ifdef __le32__
    270 #    define LG_QUANTUM		4
    271 #  endif
    272 #  ifndef LG_QUANTUM
    273 #    error "Unknown minimum alignment for architecture; specify via "
    274 	 "--with-lg-quantum"
    275 #  endif
    276 #endif
    277 
    278 #define	QUANTUM			((size_t)(1U << LG_QUANTUM))
    279 #define	QUANTUM_MASK		(QUANTUM - 1)
    280 
    281 /* Return the smallest quantum multiple that is >= a. */
    282 #define	QUANTUM_CEILING(a)						\
    283 	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
    284 
    285 #define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
    286 #define	LONG_MASK		(LONG - 1)
    287 
    288 /* Return the smallest long multiple that is >= a. */
    289 #define	LONG_CEILING(a)							\
    290 	(((a) + LONG_MASK) & ~LONG_MASK)
    291 
    292 #define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
    293 #define	PTR_MASK		(SIZEOF_PTR - 1)
    294 
    295 /* Return the smallest (void *) multiple that is >= a. */
    296 #define	PTR_CEILING(a)							\
    297 	(((a) + PTR_MASK) & ~PTR_MASK)
    298 
    299 /*
    300  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
    301  * In addition, this controls the spacing of cacheline-spaced size classes.
    302  *
    303  * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
    304  * only handle raw constants.
    305  */
    306 #define	LG_CACHELINE		6
    307 #define	CACHELINE		64
    308 #define	CACHELINE_MASK		(CACHELINE - 1)
    309 
    310 /* Return the smallest cacheline multiple that is >= s. */
    311 #define	CACHELINE_CEILING(s)						\
    312 	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
    313 
    314 /* Page size.  LG_PAGE is determined by the configure script. */
    315 #ifdef PAGE_MASK
    316 #  undef PAGE_MASK
    317 #endif
    318 #define	PAGE		((size_t)(1U << LG_PAGE))
    319 #define	PAGE_MASK	((size_t)(PAGE - 1))
    320 
    321 /* Return the page base address for the page containing address a. */
    322 #define	PAGE_ADDR2BASE(a)						\
    323 	((void *)((uintptr_t)(a) & ~PAGE_MASK))
    324 
    325 /* Return the smallest pagesize multiple that is >= s. */
    326 #define	PAGE_CEILING(s)							\
    327 	(((s) + PAGE_MASK) & ~PAGE_MASK)
    328 
    329 /* Return the nearest aligned address at or below a. */
    330 #define	ALIGNMENT_ADDR2BASE(a, alignment)				\
    331 	((void *)((uintptr_t)(a) & (-(alignment))))
    332 
    333 /* Return the offset between a and the nearest aligned address at or below a. */
    334 #define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
    335 	((size_t)((uintptr_t)(a) & (alignment - 1)))
    336 
    337 /* Return the smallest alignment multiple that is >= s. */
    338 #define	ALIGNMENT_CEILING(s, alignment)					\
    339 	(((s) + (alignment - 1)) & (-(alignment)))
    340 
    341 /* Declare a variable-length array. */
    342 #if __STDC_VERSION__ < 199901L
    343 #  ifdef _MSC_VER
    344 #    include <malloc.h>
    345 #    define alloca _alloca
    346 #  else
    347 #    ifdef JEMALLOC_HAS_ALLOCA_H
    348 #      include <alloca.h>
    349 #    else
    350 #      include <stdlib.h>
    351 #    endif
    352 #  endif
    353 #  define VARIABLE_ARRAY(type, name, count) \
    354 	type *name = alloca(sizeof(type) * (count))
    355 #else
    356 #  define VARIABLE_ARRAY(type, name, count) type name[(count)]
    357 #endif
    358 
    359 #include "jemalloc/internal/nstime.h"
    360 #include "jemalloc/internal/valgrind.h"
    361 #include "jemalloc/internal/util.h"
    362 #include "jemalloc/internal/atomic.h"
    363 #include "jemalloc/internal/prng.h"
    364 #include "jemalloc/internal/ticker.h"
    365 #include "jemalloc/internal/ckh.h"
    366 #include "jemalloc/internal/size_classes.h"
    367 #include "jemalloc/internal/smoothstep.h"
    368 #include "jemalloc/internal/stats.h"
    369 #include "jemalloc/internal/ctl.h"
    370 #include "jemalloc/internal/mutex.h"
    371 #include "jemalloc/internal/tsd.h"
    372 #include "jemalloc/internal/mb.h"
    373 #include "jemalloc/internal/extent.h"
    374 #include "jemalloc/internal/arena.h"
    375 #include "jemalloc/internal/bitmap.h"
    376 #include "jemalloc/internal/base.h"
    377 #include "jemalloc/internal/rtree.h"
    378 #include "jemalloc/internal/pages.h"
    379 #include "jemalloc/internal/chunk.h"
    380 #include "jemalloc/internal/huge.h"
    381 #include "jemalloc/internal/tcache.h"
    382 #include "jemalloc/internal/hash.h"
    383 #include "jemalloc/internal/quarantine.h"
    384 #include "jemalloc/internal/prof.h"
    385 
    386 #undef JEMALLOC_H_TYPES
    387 /******************************************************************************/
    388 #define	JEMALLOC_H_STRUCTS
    389 
    390 #include "jemalloc/internal/nstime.h"
    391 #include "jemalloc/internal/valgrind.h"
    392 #include "jemalloc/internal/util.h"
    393 #include "jemalloc/internal/atomic.h"
    394 #include "jemalloc/internal/prng.h"
    395 #include "jemalloc/internal/ticker.h"
    396 #include "jemalloc/internal/ckh.h"
    397 #include "jemalloc/internal/size_classes.h"
    398 #include "jemalloc/internal/smoothstep.h"
    399 #include "jemalloc/internal/stats.h"
    400 #include "jemalloc/internal/ctl.h"
    401 #include "jemalloc/internal/mutex.h"
    402 #include "jemalloc/internal/mb.h"
    403 #include "jemalloc/internal/bitmap.h"
    404 #define	JEMALLOC_ARENA_STRUCTS_A
    405 #include "jemalloc/internal/arena.h"
    406 #undef JEMALLOC_ARENA_STRUCTS_A
    407 #include "jemalloc/internal/extent.h"
    408 #define	JEMALLOC_ARENA_STRUCTS_B
    409 #include "jemalloc/internal/arena.h"
    410 #undef JEMALLOC_ARENA_STRUCTS_B
    411 #include "jemalloc/internal/base.h"
    412 #include "jemalloc/internal/rtree.h"
    413 #include "jemalloc/internal/pages.h"
    414 #include "jemalloc/internal/chunk.h"
    415 #include "jemalloc/internal/huge.h"
    416 #include "jemalloc/internal/tcache.h"
    417 #include "jemalloc/internal/hash.h"
    418 #include "jemalloc/internal/quarantine.h"
    419 #include "jemalloc/internal/prof.h"
    420 
    421 #include "jemalloc/internal/tsd.h"
    422 
    423 #undef JEMALLOC_H_STRUCTS
    424 /******************************************************************************/
    425 #define	JEMALLOC_H_EXTERNS
    426 
    427 extern bool	opt_abort;
    428 extern const char	*opt_junk;
    429 extern bool	opt_junk_alloc;
    430 extern bool	opt_junk_free;
    431 extern size_t	opt_quarantine;
    432 extern bool	opt_redzone;
    433 extern bool	opt_utrace;
    434 extern bool	opt_xmalloc;
    435 extern bool	opt_zero;
    436 extern unsigned	opt_narenas;
    437 
    438 extern bool	in_valgrind;
    439 
    440 /* Number of CPUs. */
    441 extern unsigned	ncpus;
    442 
    443 /*
    444  * Arenas that are used to service external requests.  Not all elements of the
    445  * arenas array are necessarily used; arenas are created lazily as needed.
    446  */
    447 extern arena_t	**arenas;
    448 
    449 /*
    450  * index2size_tab encodes the same information as could be computed (at
    451  * unacceptable cost in some code paths) by index2size_compute().
    452  */
    453 extern size_t const	index2size_tab[NSIZES+1];
    454 /*
    455  * size2index_tab is a compact lookup table that rounds request sizes up to
    456  * size classes.  In order to reduce cache footprint, the table is compressed,
    457  * and all accesses are via size2index().
    458  */
    459 extern uint8_t const	size2index_tab[];
    460 
    461 void	*a0malloc(size_t size);
    462 void	a0dalloc(void *ptr);
    463 void	*bootstrap_malloc(size_t size);
    464 void	*bootstrap_calloc(size_t num, size_t size);
    465 void	bootstrap_free(void *ptr);
    466 arena_t	*arenas_extend(unsigned ind);
    467 unsigned	narenas_total_get(void);
    468 arena_t	*arena_init(unsigned ind);
    469 arena_tdata_t	*arena_tdata_get_hard(tsd_t *tsd, unsigned ind);
    470 arena_t	*arena_choose_hard(tsd_t *tsd);
    471 void	arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
    472 void	thread_allocated_cleanup(tsd_t *tsd);
    473 void	thread_deallocated_cleanup(tsd_t *tsd);
    474 void	arena_cleanup(tsd_t *tsd);
    475 void	arenas_tdata_cleanup(tsd_t *tsd);
    476 void	narenas_tdata_cleanup(tsd_t *tsd);
    477 void	arenas_tdata_bypass_cleanup(tsd_t *tsd);
    478 void	jemalloc_prefork(void);
    479 void	jemalloc_postfork_parent(void);
    480 void	jemalloc_postfork_child(void);
    481 
    482 #include "jemalloc/internal/nstime.h"
    483 #include "jemalloc/internal/valgrind.h"
    484 #include "jemalloc/internal/util.h"
    485 #include "jemalloc/internal/atomic.h"
    486 #include "jemalloc/internal/prng.h"
    487 #include "jemalloc/internal/ticker.h"
    488 #include "jemalloc/internal/ckh.h"
    489 #include "jemalloc/internal/size_classes.h"
    490 #include "jemalloc/internal/smoothstep.h"
    491 #include "jemalloc/internal/stats.h"
    492 #include "jemalloc/internal/ctl.h"
    493 #include "jemalloc/internal/mutex.h"
    494 #include "jemalloc/internal/mb.h"
    495 #include "jemalloc/internal/bitmap.h"
    496 #include "jemalloc/internal/extent.h"
    497 #include "jemalloc/internal/arena.h"
    498 #include "jemalloc/internal/base.h"
    499 #include "jemalloc/internal/rtree.h"
    500 #include "jemalloc/internal/pages.h"
    501 #include "jemalloc/internal/chunk.h"
    502 #include "jemalloc/internal/huge.h"
    503 #include "jemalloc/internal/tcache.h"
    504 #include "jemalloc/internal/hash.h"
    505 #include "jemalloc/internal/quarantine.h"
    506 #include "jemalloc/internal/prof.h"
    507 #include "jemalloc/internal/tsd.h"
    508 
    509 #undef JEMALLOC_H_EXTERNS
    510 /******************************************************************************/
    511 #define	JEMALLOC_H_INLINES
    512 
    513 #include "jemalloc/internal/nstime.h"
    514 #include "jemalloc/internal/valgrind.h"
    515 #include "jemalloc/internal/util.h"
    516 #include "jemalloc/internal/atomic.h"
    517 #include "jemalloc/internal/prng.h"
    518 #include "jemalloc/internal/ticker.h"
    519 #include "jemalloc/internal/ckh.h"
    520 #include "jemalloc/internal/size_classes.h"
    521 #include "jemalloc/internal/smoothstep.h"
    522 #include "jemalloc/internal/stats.h"
    523 #include "jemalloc/internal/ctl.h"
    524 #include "jemalloc/internal/mutex.h"
    525 #include "jemalloc/internal/tsd.h"
    526 #include "jemalloc/internal/mb.h"
    527 #include "jemalloc/internal/extent.h"
    528 #include "jemalloc/internal/base.h"
    529 #include "jemalloc/internal/rtree.h"
    530 #include "jemalloc/internal/pages.h"
    531 #include "jemalloc/internal/chunk.h"
    532 #include "jemalloc/internal/huge.h"
    533 
    534 #ifndef JEMALLOC_ENABLE_INLINE
    535 szind_t	size2index_compute(size_t size);
    536 szind_t	size2index_lookup(size_t size);
    537 szind_t	size2index(size_t size);
    538 size_t	index2size_compute(szind_t index);
    539 size_t	index2size_lookup(szind_t index);
    540 size_t	index2size(szind_t index);
    541 size_t	s2u_compute(size_t size);
    542 size_t	s2u_lookup(size_t size);
    543 size_t	s2u(size_t size);
    544 size_t	sa2u(size_t size, size_t alignment);
    545 arena_t	*arena_choose(tsd_t *tsd, arena_t *arena);
    546 arena_tdata_t	*arena_tdata_get(tsd_t *tsd, unsigned ind,
    547     bool refresh_if_missing);
    548 arena_t	*arena_get(unsigned ind, bool init_if_missing);
    549 ticker_t	*decay_ticker_get(tsd_t *tsd, unsigned ind);
    550 #endif
    551 
    552 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
    553 JEMALLOC_INLINE szind_t
    554 size2index_compute(size_t size)
    555 {
    556 
    557 #if (NTBINS != 0)
    558 	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
    559 		szind_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
    560 		szind_t lg_ceil = lg_floor(pow2_ceil_zu(size));
    561 		return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
    562 	}
    563 #endif
    564 	{
    565 		szind_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
    566 		    (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
    567 		    : lg_floor((size<<1)-1);
    568 		szind_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
    569 		    x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
    570 		szind_t grp = shift << LG_SIZE_CLASS_GROUP;
    571 
    572 		szind_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
    573 		    ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
    574 
    575 		size_t delta_inverse_mask = ZI(-1) << lg_delta;
    576 		szind_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
    577 		    ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
    578 
    579 		szind_t index = NTBINS + grp + mod;
    580 		return (index);
    581 	}
    582 }
    583 
    584 JEMALLOC_ALWAYS_INLINE szind_t
    585 size2index_lookup(size_t size)
    586 {
    587 
    588 	assert(size <= LOOKUP_MAXCLASS);
    589 	{
    590 		szind_t ret = (size2index_tab[(size-1) >> LG_TINY_MIN]);
    591 		assert(ret == size2index_compute(size));
    592 		return (ret);
    593 	}
    594 }
    595 
    596 JEMALLOC_ALWAYS_INLINE szind_t
    597 size2index(size_t size)
    598 {
    599 
    600 	assert(size > 0);
    601 	if (likely(size <= LOOKUP_MAXCLASS))
    602 		return (size2index_lookup(size));
    603 	return (size2index_compute(size));
    604 }
    605 
    606 JEMALLOC_INLINE size_t
    607 index2size_compute(szind_t index)
    608 {
    609 
    610 #if (NTBINS > 0)
    611 	if (index < NTBINS)
    612 		return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
    613 #endif
    614 	{
    615 		size_t reduced_index = index - NTBINS;
    616 		size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
    617 		size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
    618 		    1);
    619 
    620 		size_t grp_size_mask = ~((!!grp)-1);
    621 		size_t grp_size = ((ZU(1) << (LG_QUANTUM +
    622 		    (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
    623 
    624 		size_t shift = (grp == 0) ? 1 : grp;
    625 		size_t lg_delta = shift + (LG_QUANTUM-1);
    626 		size_t mod_size = (mod+1) << lg_delta;
    627 
    628 		size_t usize = grp_size + mod_size;
    629 		return (usize);
    630 	}
    631 }
    632 
    633 JEMALLOC_ALWAYS_INLINE size_t
    634 index2size_lookup(szind_t index)
    635 {
    636 	size_t ret = (size_t)index2size_tab[index];
    637 	assert(ret == index2size_compute(index));
    638 	return (ret);
    639 }
    640 
    641 JEMALLOC_ALWAYS_INLINE size_t
    642 index2size(szind_t index)
    643 {
    644 
    645 	assert(index < NSIZES);
    646 	return (index2size_lookup(index));
    647 }
    648 
    649 JEMALLOC_ALWAYS_INLINE size_t
    650 s2u_compute(size_t size)
    651 {
    652 
    653 #if (NTBINS > 0)
    654 	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
    655 		size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
    656 		size_t lg_ceil = lg_floor(pow2_ceil_zu(size));
    657 		return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
    658 		    (ZU(1) << lg_ceil));
    659 	}
    660 #endif
    661 	{
    662 		size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
    663 		    (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
    664 		    : lg_floor((size<<1)-1);
    665 		size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
    666 		    ?  LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
    667 		size_t delta = ZU(1) << lg_delta;
    668 		size_t delta_mask = delta - 1;
    669 		size_t usize = (size + delta_mask) & ~delta_mask;
    670 		return (usize);
    671 	}
    672 }
    673 
    674 JEMALLOC_ALWAYS_INLINE size_t
    675 s2u_lookup(size_t size)
    676 {
    677 	size_t ret = index2size_lookup(size2index_lookup(size));
    678 
    679 	assert(ret == s2u_compute(size));
    680 	return (ret);
    681 }
    682 
    683 /*
    684  * Compute usable size that would result from allocating an object with the
    685  * specified size.
    686  */
    687 JEMALLOC_ALWAYS_INLINE size_t
    688 s2u(size_t size)
    689 {
    690 
    691 	assert(size > 0);
    692 	if (likely(size <= LOOKUP_MAXCLASS))
    693 		return (s2u_lookup(size));
    694 	return (s2u_compute(size));
    695 }
    696 
    697 /*
    698  * Compute usable size that would result from allocating an object with the
    699  * specified size and alignment.
    700  */
    701 JEMALLOC_ALWAYS_INLINE size_t
    702 sa2u(size_t size, size_t alignment)
    703 {
    704 	size_t usize;
    705 
    706 	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
    707 
    708 	/* Try for a small size class. */
    709 	if (size <= SMALL_MAXCLASS && alignment < PAGE) {
    710 		/*
    711 		 * Round size up to the nearest multiple of alignment.
    712 		 *
    713 		 * This done, we can take advantage of the fact that for each
    714 		 * small size class, every object is aligned at the smallest
    715 		 * power of two that is non-zero in the base two representation
    716 		 * of the size.  For example:
    717 		 *
    718 		 *   Size |   Base 2 | Minimum alignment
    719 		 *   -----+----------+------------------
    720 		 *     96 |  1100000 |  32
    721 		 *    144 | 10100000 |  32
    722 		 *    192 | 11000000 |  64
    723 		 */
    724 		usize = s2u(ALIGNMENT_CEILING(size, alignment));
    725 		if (usize < LARGE_MINCLASS)
    726 			return (usize);
    727 	}
    728 
    729 	/* Try for a large size class. */
    730 	if (likely(size <= large_maxclass) && likely(alignment < chunksize)) {
    731 		/*
    732 		 * We can't achieve subpage alignment, so round up alignment
    733 		 * to the minimum that can actually be supported.
    734 		 */
    735 		alignment = PAGE_CEILING(alignment);
    736 
    737 		/* Make sure result is a large size class. */
    738 		usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
    739 
    740 		/*
    741 		 * Calculate the size of the over-size run that arena_palloc()
    742 		 * would need to allocate in order to guarantee the alignment.
    743 		 */
    744 		if (usize + large_pad + alignment - PAGE <= arena_maxrun)
    745 			return (usize);
    746 	}
    747 
    748 	/* Huge size class.  Beware of overflow. */
    749 
    750 	if (unlikely(alignment > HUGE_MAXCLASS))
    751 		return (0);
    752 
    753 	/*
    754 	 * We can't achieve subchunk alignment, so round up alignment to the
    755 	 * minimum that can actually be supported.
    756 	 */
    757 	alignment = CHUNK_CEILING(alignment);
    758 
    759 	/* Make sure result is a huge size class. */
    760 	if (size <= chunksize)
    761 		usize = chunksize;
    762 	else {
    763 		usize = s2u(size);
    764 		if (usize < size) {
    765 			/* size_t overflow. */
    766 			return (0);
    767 		}
    768 	}
    769 
    770 	/*
    771 	 * Calculate the multi-chunk mapping that huge_palloc() would need in
    772 	 * order to guarantee the alignment.
    773 	 */
    774 	if (usize + alignment - PAGE < usize) {
    775 		/* size_t overflow. */
    776 		return (0);
    777 	}
    778 	return (usize);
    779 }
    780 
    781 /* Choose an arena based on a per-thread value. */
    782 JEMALLOC_INLINE arena_t *
    783 arena_choose(tsd_t *tsd, arena_t *arena)
    784 {
    785 	arena_t *ret;
    786 
    787 	if (arena != NULL)
    788 		return (arena);
    789 
    790 	if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
    791 		ret = arena_choose_hard(tsd);
    792 
    793 	return (ret);
    794 }
    795 
    796 JEMALLOC_INLINE arena_tdata_t *
    797 arena_tdata_get(tsd_t *tsd, unsigned ind, bool refresh_if_missing)
    798 {
    799 	arena_tdata_t *tdata;
    800 	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
    801 
    802 	if (unlikely(arenas_tdata == NULL)) {
    803 		/* arenas_tdata hasn't been initialized yet. */
    804 		return (arena_tdata_get_hard(tsd, ind));
    805 	}
    806 	if (unlikely(ind >= tsd_narenas_tdata_get(tsd))) {
    807 		/*
    808 		 * ind is invalid, cache is old (too small), or tdata to be
    809 		 * initialized.
    810 		 */
    811 		return (refresh_if_missing ? arena_tdata_get_hard(tsd, ind) :
    812 		    NULL);
    813 	}
    814 
    815 	tdata = &arenas_tdata[ind];
    816 	if (likely(tdata != NULL) || !refresh_if_missing)
    817 		return (tdata);
    818 	return (arena_tdata_get_hard(tsd, ind));
    819 }
    820 
    821 JEMALLOC_INLINE arena_t *
    822 arena_get(unsigned ind, bool init_if_missing)
    823 {
    824 	arena_t *ret;
    825 
    826 	assert(ind <= MALLOCX_ARENA_MAX);
    827 
    828 	ret = arenas[ind];
    829 	if (unlikely(ret == NULL)) {
    830 		ret = atomic_read_p((void *)&arenas[ind]);
    831 		if (init_if_missing && unlikely(ret == NULL))
    832 			ret = arena_init(ind);
    833 	}
    834 	return (ret);
    835 }
    836 
    837 JEMALLOC_INLINE ticker_t *
    838 decay_ticker_get(tsd_t *tsd, unsigned ind)
    839 {
    840 	arena_tdata_t *tdata;
    841 
    842 	tdata = arena_tdata_get(tsd, ind, true);
    843 	if (unlikely(tdata == NULL))
    844 		return (NULL);
    845 	return (&tdata->decay_ticker);
    846 }
    847 #endif
    848 
    849 #include "jemalloc/internal/bitmap.h"
    850 /*
    851  * Include portions of arena.h interleaved with tcache.h in order to resolve
    852  * circular dependencies.
    853  */
    854 #define	JEMALLOC_ARENA_INLINE_A
    855 #include "jemalloc/internal/arena.h"
    856 #undef JEMALLOC_ARENA_INLINE_A
    857 #include "jemalloc/internal/tcache.h"
    858 #define	JEMALLOC_ARENA_INLINE_B
    859 #include "jemalloc/internal/arena.h"
    860 #undef JEMALLOC_ARENA_INLINE_B
    861 #include "jemalloc/internal/hash.h"
    862 #include "jemalloc/internal/quarantine.h"
    863 
    864 #ifndef JEMALLOC_ENABLE_INLINE
    865 arena_t	*iaalloc(const void *ptr);
    866 size_t	isalloc(const void *ptr, bool demote);
    867 void	*iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero,
    868     tcache_t *tcache, bool is_metadata, arena_t *arena, bool slow_path);
    869 void	*imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
    870     arena_t *arena);
    871 void	*imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path);
    872 void	*icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache,
    873     arena_t *arena);
    874 void	*icalloc(tsd_t *tsd, size_t size, szind_t ind);
    875 void	*ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
    876     tcache_t *tcache, bool is_metadata, arena_t *arena);
    877 void	*ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
    878     tcache_t *tcache, arena_t *arena);
    879 void	*ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
    880 size_t	ivsalloc(const void *ptr, bool demote);
    881 size_t	u2rz(size_t usize);
    882 size_t	p2rz(const void *ptr);
    883 void	idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
    884     bool slow_path);
    885 void	idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
    886 void	idalloc(tsd_t *tsd, void *ptr);
    887 void	iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path);
    888 void	isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
    889 void	isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
    890 void	*iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
    891     size_t extra, size_t alignment, bool zero, tcache_t *tcache,
    892     arena_t *arena);
    893 void	*iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
    894     size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
    895 void	*iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
    896     size_t alignment, bool zero);
    897 bool	ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
    898     size_t extra, size_t alignment, bool zero);
    899 #endif
    900 
    901 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
    902 JEMALLOC_ALWAYS_INLINE arena_t *
    903 iaalloc(const void *ptr)
    904 {
    905 
    906 	assert(ptr != NULL);
    907 
    908 	return (arena_aalloc(ptr));
    909 }
    910 
    911 /*
    912  * Typical usage:
    913  *   void *ptr = [...]
    914  *   size_t sz = isalloc(ptr, config_prof);
    915  */
    916 JEMALLOC_ALWAYS_INLINE size_t
    917 isalloc(const void *ptr, bool demote)
    918 {
    919 
    920 	assert(ptr != NULL);
    921 	/* Demotion only makes sense if config_prof is true. */
    922 	assert(config_prof || !demote);
    923 
    924 	return (arena_salloc(ptr, demote));
    925 }
    926 
    927 JEMALLOC_ALWAYS_INLINE void *
    928 iallocztm(tsd_t *tsd, size_t size, szind_t ind, bool zero, tcache_t *tcache,
    929     bool is_metadata, arena_t *arena, bool slow_path)
    930 {
    931 	void *ret;
    932 
    933 	assert(size != 0);
    934 
    935 	ret = arena_malloc(tsd, arena, size, ind, zero, tcache, slow_path);
    936 	if (config_stats && is_metadata && likely(ret != NULL)) {
    937 		arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
    938 		    config_prof));
    939 	}
    940 	return (ret);
    941 }
    942 
    943 JEMALLOC_ALWAYS_INLINE void *
    944 imalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
    945 {
    946 
    947 	return (iallocztm(tsd, size, ind, false, tcache, false, arena, true));
    948 }
    949 
    950 JEMALLOC_ALWAYS_INLINE void *
    951 imalloc(tsd_t *tsd, size_t size, szind_t ind, bool slow_path)
    952 {
    953 
    954 	return (iallocztm(tsd, size, ind, false, tcache_get(tsd, true), false,
    955 	    NULL, slow_path));
    956 }
    957 
    958 JEMALLOC_ALWAYS_INLINE void *
    959 icalloct(tsd_t *tsd, size_t size, szind_t ind, tcache_t *tcache, arena_t *arena)
    960 {
    961 
    962 	return (iallocztm(tsd, size, ind, true, tcache, false, arena, true));
    963 }
    964 
    965 JEMALLOC_ALWAYS_INLINE void *
    966 icalloc(tsd_t *tsd, size_t size, szind_t ind)
    967 {
    968 
    969 	return (iallocztm(tsd, size, ind, true, tcache_get(tsd, true), false,
    970 	    NULL, true));
    971 }
    972 
    973 JEMALLOC_ALWAYS_INLINE void *
    974 ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
    975     tcache_t *tcache, bool is_metadata, arena_t *arena)
    976 {
    977 	void *ret;
    978 
    979 	assert(usize != 0);
    980 	assert(usize == sa2u(usize, alignment));
    981 
    982 	ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
    983 	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
    984 	if (config_stats && is_metadata && likely(ret != NULL)) {
    985 		arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
    986 		    config_prof));
    987 	}
    988 	return (ret);
    989 }
    990 
    991 JEMALLOC_ALWAYS_INLINE void *
    992 ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
    993     tcache_t *tcache, arena_t *arena)
    994 {
    995 
    996 	return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
    997 }
    998 
    999 JEMALLOC_ALWAYS_INLINE void *
   1000 ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
   1001 {
   1002 
   1003 	return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd, true),
   1004 	    false, NULL));
   1005 }
   1006 
   1007 JEMALLOC_ALWAYS_INLINE size_t
   1008 ivsalloc(const void *ptr, bool demote)
   1009 {
   1010 	extent_node_t *node;
   1011 
   1012 	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
   1013 	node = chunk_lookup(ptr, false);
   1014 	if (node == NULL)
   1015 		return (0);
   1016 	/* Only arena chunks should be looked up via interior pointers. */
   1017 	assert(extent_node_addr_get(node) == ptr ||
   1018 	    extent_node_achunk_get(node));
   1019 
   1020 	return (isalloc(ptr, demote));
   1021 }
   1022 
   1023 JEMALLOC_INLINE size_t
   1024 u2rz(size_t usize)
   1025 {
   1026 	size_t ret;
   1027 
   1028 	if (usize <= SMALL_MAXCLASS) {
   1029 		szind_t binind = size2index(usize);
   1030 		ret = arena_bin_info[binind].redzone_size;
   1031 	} else
   1032 		ret = 0;
   1033 
   1034 	return (ret);
   1035 }
   1036 
   1037 JEMALLOC_INLINE size_t
   1038 p2rz(const void *ptr)
   1039 {
   1040 	size_t usize = isalloc(ptr, false);
   1041 
   1042 	return (u2rz(usize));
   1043 }
   1044 
   1045 JEMALLOC_ALWAYS_INLINE void
   1046 idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata,
   1047     bool slow_path)
   1048 {
   1049 
   1050 	assert(ptr != NULL);
   1051 	if (config_stats && is_metadata) {
   1052 		arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
   1053 		    config_prof));
   1054 	}
   1055 
   1056 	arena_dalloc(tsd, ptr, tcache, slow_path);
   1057 }
   1058 
   1059 JEMALLOC_ALWAYS_INLINE void
   1060 idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
   1061 {
   1062 
   1063 	idalloctm(tsd, ptr, tcache, false, true);
   1064 }
   1065 
   1066 JEMALLOC_ALWAYS_INLINE void
   1067 idalloc(tsd_t *tsd, void *ptr)
   1068 {
   1069 
   1070 	idalloctm(tsd, ptr, tcache_get(tsd, false), false, true);
   1071 }
   1072 
   1073 JEMALLOC_ALWAYS_INLINE void
   1074 iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path)
   1075 {
   1076 
   1077 	if (slow_path && config_fill && unlikely(opt_quarantine))
   1078 		quarantine(tsd, ptr);
   1079 	else
   1080 		idalloctm(tsd, ptr, tcache, false, slow_path);
   1081 }
   1082 
   1083 JEMALLOC_ALWAYS_INLINE void
   1084 isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
   1085 {
   1086 
   1087 	arena_sdalloc(tsd, ptr, size, tcache);
   1088 }
   1089 
   1090 JEMALLOC_ALWAYS_INLINE void
   1091 isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
   1092 {
   1093 
   1094 	if (config_fill && unlikely(opt_quarantine))
   1095 		quarantine(tsd, ptr);
   1096 	else
   1097 		isdalloct(tsd, ptr, size, tcache);
   1098 }
   1099 
   1100 JEMALLOC_ALWAYS_INLINE void *
   1101 iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
   1102     size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
   1103 {
   1104 	void *p;
   1105 	size_t usize, copysize;
   1106 
   1107 	usize = sa2u(size + extra, alignment);
   1108 	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
   1109 		return (NULL);
   1110 	p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
   1111 	if (p == NULL) {
   1112 		if (extra == 0)
   1113 			return (NULL);
   1114 		/* Try again, without extra this time. */
   1115 		usize = sa2u(size, alignment);
   1116 		if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
   1117 			return (NULL);
   1118 		p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
   1119 		if (p == NULL)
   1120 			return (NULL);
   1121 	}
   1122 	/*
   1123 	 * Copy at most size bytes (not size+extra), since the caller has no
   1124 	 * expectation that the extra bytes will be reliably preserved.
   1125 	 */
   1126 	copysize = (size < oldsize) ? size : oldsize;
   1127 	memcpy(p, ptr, copysize);
   1128 	isqalloc(tsd, ptr, oldsize, tcache);
   1129 	return (p);
   1130 }
   1131 
   1132 JEMALLOC_ALWAYS_INLINE void *
   1133 iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
   1134     bool zero, tcache_t *tcache, arena_t *arena)
   1135 {
   1136 
   1137 	assert(ptr != NULL);
   1138 	assert(size != 0);
   1139 
   1140 	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
   1141 	    != 0) {
   1142 		/*
   1143 		 * Existing object alignment is inadequate; allocate new space
   1144 		 * and copy.
   1145 		 */
   1146 		return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
   1147 		    zero, tcache, arena));
   1148 	}
   1149 
   1150 	return (arena_ralloc(tsd, arena, ptr, oldsize, size, alignment, zero,
   1151 	    tcache));
   1152 }
   1153 
   1154 JEMALLOC_ALWAYS_INLINE void *
   1155 iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
   1156     bool zero)
   1157 {
   1158 
   1159 	return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
   1160 	    tcache_get(tsd, true), NULL));
   1161 }
   1162 
   1163 JEMALLOC_ALWAYS_INLINE bool
   1164 ixalloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t extra,
   1165     size_t alignment, bool zero)
   1166 {
   1167 
   1168 	assert(ptr != NULL);
   1169 	assert(size != 0);
   1170 
   1171 	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
   1172 	    != 0) {
   1173 		/* Existing object alignment is inadequate. */
   1174 		return (true);
   1175 	}
   1176 
   1177 	return (arena_ralloc_no_move(tsd, ptr, oldsize, size, extra, zero));
   1178 }
   1179 #endif
   1180 
   1181 #include "jemalloc/internal/prof.h"
   1182 
   1183 #undef JEMALLOC_H_INLINES
   1184 /******************************************************************************/
   1185 #endif /* JEMALLOC_INTERNAL_H */
   1186