Home | History | Annotate | Download | only in internal
      1 #ifndef JEMALLOC_INTERNAL_H
      2 #define	JEMALLOC_INTERNAL_H
      3 
      4 #include "jemalloc_internal_defs.h"
      5 #include "jemalloc/internal/jemalloc_internal_decls.h"
      6 
      7 #ifdef JEMALLOC_UTRACE
      8 #include <sys/ktrace.h>
      9 #endif
     10 
     11 #define	JEMALLOC_NO_DEMANGLE
     12 #ifdef JEMALLOC_JET
     13 #  define JEMALLOC_N(n) jet_##n
     14 #  include "jemalloc/internal/public_namespace.h"
     15 #  define JEMALLOC_NO_RENAME
     16 #  include "../jemalloc.h"
     17 #  undef JEMALLOC_NO_RENAME
     18 #else
     19 #  define JEMALLOC_N(n) je_##n
     20 #  include "../jemalloc.h"
     21 #endif
     22 #include "jemalloc/internal/private_namespace.h"
     23 
     24 static const bool config_debug =
     25 #ifdef JEMALLOC_DEBUG
     26     true
     27 #else
     28     false
     29 #endif
     30     ;
     31 static const bool have_dss =
     32 #ifdef JEMALLOC_DSS
     33     true
     34 #else
     35     false
     36 #endif
     37     ;
     38 static const bool config_fill =
     39 #ifdef JEMALLOC_FILL
     40     true
     41 #else
     42     false
     43 #endif
     44     ;
     45 static const bool config_lazy_lock =
     46 #ifdef JEMALLOC_LAZY_LOCK
     47     true
     48 #else
     49     false
     50 #endif
     51     ;
     52 static const bool config_prof =
     53 #ifdef JEMALLOC_PROF
     54     true
     55 #else
     56     false
     57 #endif
     58     ;
     59 static const bool config_prof_libgcc =
     60 #ifdef JEMALLOC_PROF_LIBGCC
     61     true
     62 #else
     63     false
     64 #endif
     65     ;
     66 static const bool config_prof_libunwind =
     67 #ifdef JEMALLOC_PROF_LIBUNWIND
     68     true
     69 #else
     70     false
     71 #endif
     72     ;
     73 static const bool config_munmap =
     74 #ifdef JEMALLOC_MUNMAP
     75     true
     76 #else
     77     false
     78 #endif
     79     ;
     80 static const bool config_stats =
     81 #ifdef JEMALLOC_STATS
     82     true
     83 #else
     84     false
     85 #endif
     86     ;
     87 static const bool config_tcache =
     88 #ifdef JEMALLOC_TCACHE
     89     true
     90 #else
     91     false
     92 #endif
     93     ;
     94 static const bool config_tls =
     95 #ifdef JEMALLOC_TLS
     96     true
     97 #else
     98     false
     99 #endif
    100     ;
    101 static const bool config_utrace =
    102 #ifdef JEMALLOC_UTRACE
    103     true
    104 #else
    105     false
    106 #endif
    107     ;
    108 static const bool config_valgrind =
    109 #ifdef JEMALLOC_VALGRIND
    110     true
    111 #else
    112     false
    113 #endif
    114     ;
    115 static const bool config_xmalloc =
    116 #ifdef JEMALLOC_XMALLOC
    117     true
    118 #else
    119     false
    120 #endif
    121     ;
    122 static const bool config_ivsalloc =
    123 #ifdef JEMALLOC_IVSALLOC
    124     true
    125 #else
    126     false
    127 #endif
    128     ;
    129 static const bool config_cache_oblivious =
    130 #ifdef JEMALLOC_CACHE_OBLIVIOUS
    131     true
    132 #else
    133     false
    134 #endif
    135     ;
    136 
    137 #ifdef JEMALLOC_C11ATOMICS
    138 #include <stdatomic.h>
    139 #endif
    140 
    141 #ifdef JEMALLOC_ATOMIC9
    142 #include <machine/atomic.h>
    143 #endif
    144 
    145 #if (defined(JEMALLOC_OSATOMIC) || defined(JEMALLOC_OSSPIN))
    146 #include <libkern/OSAtomic.h>
    147 #endif
    148 
    149 #ifdef JEMALLOC_ZONE
    150 #include <mach/mach_error.h>
    151 #include <mach/mach_init.h>
    152 #include <mach/vm_map.h>
    153 #include <malloc/malloc.h>
    154 #endif
    155 
    156 #define	RB_COMPACT
    157 #include "jemalloc/internal/rb.h"
    158 #include "jemalloc/internal/qr.h"
    159 #include "jemalloc/internal/ql.h"
    160 
    161 /*
    162  * jemalloc can conceptually be broken into components (arena, tcache, etc.),
    163  * but there are circular dependencies that cannot be broken without
    164  * substantial performance degradation.  In order to reduce the effect on
    165  * visual code flow, read the header files in multiple passes, with one of the
    166  * following cpp variables defined during each pass:
    167  *
    168  *   JEMALLOC_H_TYPES   : Preprocessor-defined constants and psuedo-opaque data
    169  *                        types.
    170  *   JEMALLOC_H_STRUCTS : Data structures.
    171  *   JEMALLOC_H_EXTERNS : Extern data declarations and function prototypes.
    172  *   JEMALLOC_H_INLINES : Inline functions.
    173  */
    174 /******************************************************************************/
    175 #define	JEMALLOC_H_TYPES
    176 
    177 #include "jemalloc/internal/jemalloc_internal_macros.h"
    178 
    179 /* Size class index type. */
    180 typedef unsigned index_t;
    181 
    182 /*
    183  * Flags bits:
    184  *
    185  * a: arena
    186  * t: tcache
    187  * 0: unused
    188  * z: zero
    189  * n: alignment
    190  *
    191  * aaaaaaaa aaaatttt tttttttt 0znnnnnn
    192  */
    193 #define	MALLOCX_ARENA_MASK	((int)~0xfffff)
    194 #define	MALLOCX_ARENA_MAX	0xffe
    195 #define	MALLOCX_TCACHE_MASK	((int)~0xfff000ffU)
    196 #define	MALLOCX_TCACHE_MAX	0xffd
    197 #define	MALLOCX_LG_ALIGN_MASK	((int)0x3f)
    198 /* Use MALLOCX_ALIGN_GET() if alignment may not be specified in flags. */
    199 #define	MALLOCX_ALIGN_GET_SPECIFIED(flags)				\
    200     (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK))
    201 #define	MALLOCX_ALIGN_GET(flags)					\
    202     (MALLOCX_ALIGN_GET_SPECIFIED(flags) & (SIZE_T_MAX-1))
    203 #define	MALLOCX_ZERO_GET(flags)						\
    204     ((bool)(flags & MALLOCX_ZERO))
    205 
    206 #define	MALLOCX_TCACHE_GET(flags)					\
    207     (((unsigned)((flags & MALLOCX_TCACHE_MASK) >> 8)) - 2)
    208 #define	MALLOCX_ARENA_GET(flags)					\
    209     (((unsigned)(((unsigned)flags) >> 20)) - 1)
    210 
    211 /* Smallest size class to support. */
    212 #define	TINY_MIN		(1U << LG_TINY_MIN)
    213 
    214 /*
    215  * Minimum allocation alignment is 2^LG_QUANTUM bytes (ignoring tiny size
    216  * classes).
    217  */
    218 #ifndef LG_QUANTUM
    219 #  if (defined(__i386__) || defined(_M_IX86))
    220 #    define LG_QUANTUM		4
    221 #  endif
    222 #  ifdef __ia64__
    223 #    define LG_QUANTUM		4
    224 #  endif
    225 #  ifdef __alpha__
    226 #    define LG_QUANTUM		4
    227 #  endif
    228 #  ifdef __sparc64__
    229 #    define LG_QUANTUM		4
    230 #  endif
    231 #  if (defined(__amd64__) || defined(__x86_64__) || defined(_M_X64))
    232 #    define LG_QUANTUM		4
    233 #  endif
    234 #  ifdef __arm__
    235 #    define LG_QUANTUM		3
    236 #  endif
    237 #  ifdef __aarch64__
    238 #    define LG_QUANTUM		4
    239 #  endif
    240 #  ifdef __hppa__
    241 #    define LG_QUANTUM		4
    242 #  endif
    243 #  ifdef __mips__
    244 #    define LG_QUANTUM		3
    245 #  endif
    246 #  ifdef __or1k__
    247 #    define LG_QUANTUM		3
    248 #  endif
    249 #  ifdef __powerpc__
    250 #    define LG_QUANTUM		4
    251 #  endif
    252 #  ifdef __s390__
    253 #    define LG_QUANTUM		4
    254 #  endif
    255 #  ifdef __SH4__
    256 #    define LG_QUANTUM		4
    257 #  endif
    258 #  ifdef __tile__
    259 #    define LG_QUANTUM		4
    260 #  endif
    261 #  ifdef __le32__
    262 #    define LG_QUANTUM		4
    263 #  endif
    264 #  ifndef LG_QUANTUM
    265 #    error "Unknown minimum alignment for architecture; specify via "
    266 	 "--with-lg-quantum"
    267 #  endif
    268 #endif
    269 
    270 #define	QUANTUM			((size_t)(1U << LG_QUANTUM))
    271 #define	QUANTUM_MASK		(QUANTUM - 1)
    272 
    273 /* Return the smallest quantum multiple that is >= a. */
    274 #define	QUANTUM_CEILING(a)						\
    275 	(((a) + QUANTUM_MASK) & ~QUANTUM_MASK)
    276 
    277 #define	LONG			((size_t)(1U << LG_SIZEOF_LONG))
    278 #define	LONG_MASK		(LONG - 1)
    279 
    280 /* Return the smallest long multiple that is >= a. */
    281 #define	LONG_CEILING(a)							\
    282 	(((a) + LONG_MASK) & ~LONG_MASK)
    283 
    284 #define	SIZEOF_PTR		(1U << LG_SIZEOF_PTR)
    285 #define	PTR_MASK		(SIZEOF_PTR - 1)
    286 
    287 /* Return the smallest (void *) multiple that is >= a. */
    288 #define	PTR_CEILING(a)							\
    289 	(((a) + PTR_MASK) & ~PTR_MASK)
    290 
    291 /*
    292  * Maximum size of L1 cache line.  This is used to avoid cache line aliasing.
    293  * In addition, this controls the spacing of cacheline-spaced size classes.
    294  *
    295  * CACHELINE cannot be based on LG_CACHELINE because __declspec(align()) can
    296  * only handle raw constants.
    297  */
    298 #define	LG_CACHELINE		6
    299 #define	CACHELINE		64
    300 #define	CACHELINE_MASK		(CACHELINE - 1)
    301 
    302 /* Return the smallest cacheline multiple that is >= s. */
    303 #define	CACHELINE_CEILING(s)						\
    304 	(((s) + CACHELINE_MASK) & ~CACHELINE_MASK)
    305 
    306 /* Page size.  LG_PAGE is determined by the configure script. */
    307 #ifdef PAGE_MASK
    308 #  undef PAGE_MASK
    309 #endif
    310 #define	PAGE		((size_t)(1U << LG_PAGE))
    311 #define	PAGE_MASK	((size_t)(PAGE - 1))
    312 
    313 /* Return the smallest pagesize multiple that is >= s. */
    314 #define	PAGE_CEILING(s)							\
    315 	(((s) + PAGE_MASK) & ~PAGE_MASK)
    316 
    317 /* Return the nearest aligned address at or below a. */
    318 #define	ALIGNMENT_ADDR2BASE(a, alignment)				\
    319 	((void *)((uintptr_t)(a) & (-(alignment))))
    320 
    321 /* Return the offset between a and the nearest aligned address at or below a. */
    322 #define	ALIGNMENT_ADDR2OFFSET(a, alignment)				\
    323 	((size_t)((uintptr_t)(a) & (alignment - 1)))
    324 
    325 /* Return the smallest alignment multiple that is >= s. */
    326 #define	ALIGNMENT_CEILING(s, alignment)					\
    327 	(((s) + (alignment - 1)) & (-(alignment)))
    328 
    329 /* Declare a variable-length array. */
    330 #if __STDC_VERSION__ < 199901L
    331 #  ifdef _MSC_VER
    332 #    include <malloc.h>
    333 #    define alloca _alloca
    334 #  else
    335 #    ifdef JEMALLOC_HAS_ALLOCA_H
    336 #      include <alloca.h>
    337 #    else
    338 #      include <stdlib.h>
    339 #    endif
    340 #  endif
    341 #  define VARIABLE_ARRAY(type, name, count) \
    342 	type *name = alloca(sizeof(type) * (count))
    343 #else
    344 #  define VARIABLE_ARRAY(type, name, count) type name[(count)]
    345 #endif
    346 
    347 #include "jemalloc/internal/valgrind.h"
    348 #include "jemalloc/internal/util.h"
    349 #include "jemalloc/internal/atomic.h"
    350 #include "jemalloc/internal/prng.h"
    351 #include "jemalloc/internal/ckh.h"
    352 #include "jemalloc/internal/size_classes.h"
    353 #include "jemalloc/internal/stats.h"
    354 #include "jemalloc/internal/ctl.h"
    355 #include "jemalloc/internal/mutex.h"
    356 #include "jemalloc/internal/tsd.h"
    357 #include "jemalloc/internal/mb.h"
    358 #include "jemalloc/internal/extent.h"
    359 #include "jemalloc/internal/arena.h"
    360 #include "jemalloc/internal/bitmap.h"
    361 #include "jemalloc/internal/base.h"
    362 #include "jemalloc/internal/rtree.h"
    363 #include "jemalloc/internal/chunk.h"
    364 #include "jemalloc/internal/huge.h"
    365 #include "jemalloc/internal/tcache.h"
    366 #include "jemalloc/internal/hash.h"
    367 #include "jemalloc/internal/quarantine.h"
    368 #include "jemalloc/internal/prof.h"
    369 
    370 #undef JEMALLOC_H_TYPES
    371 /******************************************************************************/
    372 #define	JEMALLOC_H_STRUCTS
    373 
    374 #include "jemalloc/internal/valgrind.h"
    375 #include "jemalloc/internal/util.h"
    376 #include "jemalloc/internal/atomic.h"
    377 #include "jemalloc/internal/prng.h"
    378 #include "jemalloc/internal/ckh.h"
    379 #include "jemalloc/internal/size_classes.h"
    380 #include "jemalloc/internal/stats.h"
    381 #include "jemalloc/internal/ctl.h"
    382 #include "jemalloc/internal/mutex.h"
    383 #include "jemalloc/internal/mb.h"
    384 #include "jemalloc/internal/bitmap.h"
    385 #define	JEMALLOC_ARENA_STRUCTS_A
    386 #include "jemalloc/internal/arena.h"
    387 #undef JEMALLOC_ARENA_STRUCTS_A
    388 #include "jemalloc/internal/extent.h"
    389 #define	JEMALLOC_ARENA_STRUCTS_B
    390 #include "jemalloc/internal/arena.h"
    391 #undef JEMALLOC_ARENA_STRUCTS_B
    392 #include "jemalloc/internal/base.h"
    393 #include "jemalloc/internal/rtree.h"
    394 #include "jemalloc/internal/chunk.h"
    395 #include "jemalloc/internal/huge.h"
    396 #include "jemalloc/internal/tcache.h"
    397 #include "jemalloc/internal/hash.h"
    398 #include "jemalloc/internal/quarantine.h"
    399 #include "jemalloc/internal/prof.h"
    400 
    401 #include "jemalloc/internal/tsd.h"
    402 
    403 #undef JEMALLOC_H_STRUCTS
    404 /******************************************************************************/
    405 #define	JEMALLOC_H_EXTERNS
    406 
    407 extern bool	opt_abort;
    408 extern const char	*opt_junk;
    409 extern bool	opt_junk_alloc;
    410 extern bool	opt_junk_free;
    411 extern size_t	opt_quarantine;
    412 extern bool	opt_redzone;
    413 extern bool	opt_utrace;
    414 extern bool	opt_xmalloc;
    415 extern bool	opt_zero;
    416 extern size_t	opt_narenas;
    417 
    418 extern bool	in_valgrind;
    419 
    420 /* Number of CPUs. */
    421 extern unsigned		ncpus;
    422 
    423 /*
    424  * index2size_tab encodes the same information as could be computed (at
    425  * unacceptable cost in some code paths) by index2size_compute().
    426  */
    427 extern size_t const	index2size_tab[NSIZES];
    428 /*
    429  * size2index_tab is a compact lookup table that rounds request sizes up to
    430  * size classes.  In order to reduce cache footprint, the table is compressed,
    431  * and all accesses are via size2index().
    432  */
    433 extern uint8_t const	size2index_tab[];
    434 
    435 arena_t	*a0get(void);
    436 void	*a0malloc(size_t size);
    437 void	a0dalloc(void *ptr);
    438 void	*bootstrap_malloc(size_t size);
    439 void	*bootstrap_calloc(size_t num, size_t size);
    440 void	bootstrap_free(void *ptr);
    441 arena_t	*arenas_extend(unsigned ind);
    442 arena_t	*arena_init(unsigned ind);
    443 unsigned	narenas_total_get(void);
    444 arena_t	*arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing);
    445 arena_t	*arena_choose_hard(tsd_t *tsd);
    446 void	arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind);
    447 unsigned	arena_nbound(unsigned ind);
    448 void	thread_allocated_cleanup(tsd_t *tsd);
    449 void	thread_deallocated_cleanup(tsd_t *tsd);
    450 void	arena_cleanup(tsd_t *tsd);
    451 void	arenas_cache_cleanup(tsd_t *tsd);
    452 void	narenas_cache_cleanup(tsd_t *tsd);
    453 void	arenas_cache_bypass_cleanup(tsd_t *tsd);
    454 void	jemalloc_prefork(void);
    455 void	jemalloc_postfork_parent(void);
    456 void	jemalloc_postfork_child(void);
    457 
    458 #include "jemalloc/internal/valgrind.h"
    459 #include "jemalloc/internal/util.h"
    460 #include "jemalloc/internal/atomic.h"
    461 #include "jemalloc/internal/prng.h"
    462 #include "jemalloc/internal/ckh.h"
    463 #include "jemalloc/internal/size_classes.h"
    464 #include "jemalloc/internal/stats.h"
    465 #include "jemalloc/internal/ctl.h"
    466 #include "jemalloc/internal/mutex.h"
    467 #include "jemalloc/internal/mb.h"
    468 #include "jemalloc/internal/bitmap.h"
    469 #include "jemalloc/internal/extent.h"
    470 #include "jemalloc/internal/arena.h"
    471 #include "jemalloc/internal/base.h"
    472 #include "jemalloc/internal/rtree.h"
    473 #include "jemalloc/internal/chunk.h"
    474 #include "jemalloc/internal/huge.h"
    475 #include "jemalloc/internal/tcache.h"
    476 #include "jemalloc/internal/hash.h"
    477 #include "jemalloc/internal/quarantine.h"
    478 #include "jemalloc/internal/prof.h"
    479 #include "jemalloc/internal/tsd.h"
    480 
    481 #undef JEMALLOC_H_EXTERNS
    482 /******************************************************************************/
    483 #define	JEMALLOC_H_INLINES
    484 
    485 #include "jemalloc/internal/valgrind.h"
    486 #include "jemalloc/internal/util.h"
    487 #include "jemalloc/internal/atomic.h"
    488 #include "jemalloc/internal/prng.h"
    489 #include "jemalloc/internal/ckh.h"
    490 #include "jemalloc/internal/size_classes.h"
    491 #include "jemalloc/internal/stats.h"
    492 #include "jemalloc/internal/ctl.h"
    493 #include "jemalloc/internal/mutex.h"
    494 #include "jemalloc/internal/tsd.h"
    495 #include "jemalloc/internal/mb.h"
    496 #include "jemalloc/internal/extent.h"
    497 #include "jemalloc/internal/base.h"
    498 #include "jemalloc/internal/rtree.h"
    499 #include "jemalloc/internal/chunk.h"
    500 #include "jemalloc/internal/huge.h"
    501 
    502 #ifndef JEMALLOC_ENABLE_INLINE
    503 index_t	size2index_compute(size_t size);
    504 index_t	size2index_lookup(size_t size);
    505 index_t	size2index(size_t size);
    506 size_t	index2size_compute(index_t index);
    507 size_t	index2size_lookup(index_t index);
    508 size_t	index2size(index_t index);
    509 size_t	s2u_compute(size_t size);
    510 size_t	s2u_lookup(size_t size);
    511 size_t	s2u(size_t size);
    512 size_t	sa2u(size_t size, size_t alignment);
    513 arena_t	*arena_choose(tsd_t *tsd, arena_t *arena);
    514 arena_t	*arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
    515     bool refresh_if_missing);
    516 #endif
    517 
    518 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
    519 JEMALLOC_INLINE index_t
    520 size2index_compute(size_t size)
    521 {
    522 
    523 #if (NTBINS != 0)
    524 	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
    525 		size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
    526 		size_t lg_ceil = lg_floor(pow2_ceil(size));
    527 		return (lg_ceil < lg_tmin ? 0 : lg_ceil - lg_tmin);
    528 	} else
    529 #endif
    530 	{
    531 		size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
    532 		    (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
    533 		    : lg_floor((size<<1)-1);
    534 		size_t shift = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM) ? 0 :
    535 		    x - (LG_SIZE_CLASS_GROUP + LG_QUANTUM);
    536 		size_t grp = shift << LG_SIZE_CLASS_GROUP;
    537 
    538 		size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
    539 		    ? LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
    540 
    541 		size_t delta_inverse_mask = ZI(-1) << lg_delta;
    542 		size_t mod = ((((size-1) & delta_inverse_mask) >> lg_delta)) &
    543 		    ((ZU(1) << LG_SIZE_CLASS_GROUP) - 1);
    544 
    545 		size_t index = NTBINS + grp + mod;
    546 		return (index);
    547 	}
    548 }
    549 
    550 JEMALLOC_ALWAYS_INLINE index_t
    551 size2index_lookup(size_t size)
    552 {
    553 
    554 	assert(size <= LOOKUP_MAXCLASS);
    555 	{
    556 		size_t ret = ((size_t)(size2index_tab[(size-1) >>
    557 		    LG_TINY_MIN]));
    558 		assert(ret == size2index_compute(size));
    559 		return (ret);
    560 	}
    561 }
    562 
    563 JEMALLOC_ALWAYS_INLINE index_t
    564 size2index(size_t size)
    565 {
    566 
    567 	assert(size > 0);
    568 	if (likely(size <= LOOKUP_MAXCLASS))
    569 		return (size2index_lookup(size));
    570 	else
    571 		return (size2index_compute(size));
    572 }
    573 
    574 JEMALLOC_INLINE size_t
    575 index2size_compute(index_t index)
    576 {
    577 
    578 #if (NTBINS > 0)
    579 	if (index < NTBINS)
    580 		return (ZU(1) << (LG_TINY_MAXCLASS - NTBINS + 1 + index));
    581 	else
    582 #endif
    583 	{
    584 		size_t reduced_index = index - NTBINS;
    585 		size_t grp = reduced_index >> LG_SIZE_CLASS_GROUP;
    586 		size_t mod = reduced_index & ((ZU(1) << LG_SIZE_CLASS_GROUP) -
    587 		    1);
    588 
    589 		size_t grp_size_mask = ~((!!grp)-1);
    590 		size_t grp_size = ((ZU(1) << (LG_QUANTUM +
    591 		    (LG_SIZE_CLASS_GROUP-1))) << grp) & grp_size_mask;
    592 
    593 		size_t shift = (grp == 0) ? 1 : grp;
    594 		size_t lg_delta = shift + (LG_QUANTUM-1);
    595 		size_t mod_size = (mod+1) << lg_delta;
    596 
    597 		size_t usize = grp_size + mod_size;
    598 		return (usize);
    599 	}
    600 }
    601 
    602 JEMALLOC_ALWAYS_INLINE size_t
    603 index2size_lookup(index_t index)
    604 {
    605 	size_t ret = (size_t)index2size_tab[index];
    606 	assert(ret == index2size_compute(index));
    607 	return (ret);
    608 }
    609 
    610 JEMALLOC_ALWAYS_INLINE size_t
    611 index2size(index_t index)
    612 {
    613 
    614 	assert(index < NSIZES);
    615 	return (index2size_lookup(index));
    616 }
    617 
    618 JEMALLOC_ALWAYS_INLINE size_t
    619 s2u_compute(size_t size)
    620 {
    621 
    622 #if (NTBINS > 0)
    623 	if (size <= (ZU(1) << LG_TINY_MAXCLASS)) {
    624 		size_t lg_tmin = LG_TINY_MAXCLASS - NTBINS + 1;
    625 		size_t lg_ceil = lg_floor(pow2_ceil(size));
    626 		return (lg_ceil < lg_tmin ? (ZU(1) << lg_tmin) :
    627 		    (ZU(1) << lg_ceil));
    628 	} else
    629 #endif
    630 	{
    631 		size_t x = unlikely(ZI(size) < 0) ? ((size<<1) ?
    632 		    (ZU(1)<<(LG_SIZEOF_PTR+3)) : ((ZU(1)<<(LG_SIZEOF_PTR+3))-1))
    633 		    : lg_floor((size<<1)-1);
    634 		size_t lg_delta = (x < LG_SIZE_CLASS_GROUP + LG_QUANTUM + 1)
    635 		    ?  LG_QUANTUM : x - LG_SIZE_CLASS_GROUP - 1;
    636 		size_t delta = ZU(1) << lg_delta;
    637 		size_t delta_mask = delta - 1;
    638 		size_t usize = (size + delta_mask) & ~delta_mask;
    639 		return (usize);
    640 	}
    641 }
    642 
    643 JEMALLOC_ALWAYS_INLINE size_t
    644 s2u_lookup(size_t size)
    645 {
    646 	size_t ret = index2size_lookup(size2index_lookup(size));
    647 
    648 	assert(ret == s2u_compute(size));
    649 	return (ret);
    650 }
    651 
    652 /*
    653  * Compute usable size that would result from allocating an object with the
    654  * specified size.
    655  */
    656 JEMALLOC_ALWAYS_INLINE size_t
    657 s2u(size_t size)
    658 {
    659 
    660 	assert(size > 0);
    661 	if (likely(size <= LOOKUP_MAXCLASS))
    662 		return (s2u_lookup(size));
    663 	else
    664 		return (s2u_compute(size));
    665 }
    666 
    667 /*
    668  * Compute usable size that would result from allocating an object with the
    669  * specified size and alignment.
    670  */
    671 JEMALLOC_ALWAYS_INLINE size_t
    672 sa2u(size_t size, size_t alignment)
    673 {
    674 	size_t usize;
    675 
    676 	assert(alignment != 0 && ((alignment - 1) & alignment) == 0);
    677 
    678 	/* Try for a small size class. */
    679 	if (size <= SMALL_MAXCLASS && alignment < PAGE) {
    680 		/*
    681 		 * Round size up to the nearest multiple of alignment.
    682 		 *
    683 		 * This done, we can take advantage of the fact that for each
    684 		 * small size class, every object is aligned at the smallest
    685 		 * power of two that is non-zero in the base two representation
    686 		 * of the size.  For example:
    687 		 *
    688 		 *   Size |   Base 2 | Minimum alignment
    689 		 *   -----+----------+------------------
    690 		 *     96 |  1100000 |  32
    691 		 *    144 | 10100000 |  32
    692 		 *    192 | 11000000 |  64
    693 		 */
    694 		usize = s2u(ALIGNMENT_CEILING(size, alignment));
    695 		if (usize < LARGE_MINCLASS)
    696 			return (usize);
    697 	}
    698 
    699 	/* Try for a large size class. */
    700 	if (likely(size <= arena_maxclass) && likely(alignment < chunksize)) {
    701 		/*
    702 		 * We can't achieve subpage alignment, so round up alignment
    703 		 * to the minimum that can actually be supported.
    704 		 */
    705 		alignment = PAGE_CEILING(alignment);
    706 
    707 		/* Make sure result is a large size class. */
    708 		usize = (size <= LARGE_MINCLASS) ? LARGE_MINCLASS : s2u(size);
    709 
    710 		/*
    711 		 * Calculate the size of the over-size run that arena_palloc()
    712 		 * would need to allocate in order to guarantee the alignment.
    713 		 */
    714 		if (usize + alignment - PAGE <= arena_maxrun)
    715 			return (usize);
    716 	}
    717 
    718 	/* Huge size class.  Beware of size_t overflow. */
    719 
    720 	/*
    721 	 * We can't achieve subchunk alignment, so round up alignment to the
    722 	 * minimum that can actually be supported.
    723 	 */
    724 	alignment = CHUNK_CEILING(alignment);
    725 	if (alignment == 0) {
    726 		/* size_t overflow. */
    727 		return (0);
    728 	}
    729 
    730 	/* Make sure result is a huge size class. */
    731 	if (size <= chunksize)
    732 		usize = chunksize;
    733 	else {
    734 		usize = s2u(size);
    735 		if (usize < size) {
    736 			/* size_t overflow. */
    737 			return (0);
    738 		}
    739 	}
    740 
    741 	/*
    742 	 * Calculate the multi-chunk mapping that huge_palloc() would need in
    743 	 * order to guarantee the alignment.
    744 	 */
    745 	if (usize + alignment - PAGE < usize) {
    746 		/* size_t overflow. */
    747 		return (0);
    748 	}
    749 	return (usize);
    750 }
    751 
    752 /* Choose an arena based on a per-thread value. */
    753 JEMALLOC_INLINE arena_t *
    754 arena_choose(tsd_t *tsd, arena_t *arena)
    755 {
    756 	arena_t *ret;
    757 
    758 	if (arena != NULL)
    759 		return (arena);
    760 
    761 	if (unlikely((ret = tsd_arena_get(tsd)) == NULL))
    762 		ret = arena_choose_hard(tsd);
    763 
    764 	return (ret);
    765 }
    766 
    767 JEMALLOC_INLINE arena_t *
    768 arena_get(tsd_t *tsd, unsigned ind, bool init_if_missing,
    769     bool refresh_if_missing)
    770 {
    771 	arena_t *arena;
    772 	arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
    773 
    774 	/* init_if_missing requires refresh_if_missing. */
    775 	assert(!init_if_missing || refresh_if_missing);
    776 
    777 	if (unlikely(arenas_cache == NULL)) {
    778 		/* arenas_cache hasn't been initialized yet. */
    779 		return (arena_get_hard(tsd, ind, init_if_missing));
    780 	}
    781 	if (unlikely(ind >= tsd_narenas_cache_get(tsd))) {
    782 		/*
    783 		 * ind is invalid, cache is old (too small), or arena to be
    784 		 * initialized.
    785 		 */
    786 		return (refresh_if_missing ? arena_get_hard(tsd, ind,
    787 		    init_if_missing) : NULL);
    788 	}
    789 	arena = arenas_cache[ind];
    790 	if (likely(arena != NULL) || !refresh_if_missing)
    791 		return (arena);
    792 	return (arena_get_hard(tsd, ind, init_if_missing));
    793 }
    794 #endif
    795 
    796 #include "jemalloc/internal/bitmap.h"
    797 /*
    798  * Include portions of arena.h interleaved with tcache.h in order to resolve
    799  * circular dependencies.
    800  */
    801 #define	JEMALLOC_ARENA_INLINE_A
    802 #include "jemalloc/internal/arena.h"
    803 #undef JEMALLOC_ARENA_INLINE_A
    804 #include "jemalloc/internal/tcache.h"
    805 #define	JEMALLOC_ARENA_INLINE_B
    806 #include "jemalloc/internal/arena.h"
    807 #undef JEMALLOC_ARENA_INLINE_B
    808 #include "jemalloc/internal/hash.h"
    809 #include "jemalloc/internal/quarantine.h"
    810 
    811 #ifndef JEMALLOC_ENABLE_INLINE
    812 arena_t	*iaalloc(const void *ptr);
    813 size_t	isalloc(const void *ptr, bool demote);
    814 void	*iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache,
    815     bool is_metadata, arena_t *arena);
    816 void	*imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
    817 void	*imalloc(tsd_t *tsd, size_t size);
    818 void	*icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena);
    819 void	*icalloc(tsd_t *tsd, size_t size);
    820 void	*ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
    821     tcache_t *tcache, bool is_metadata, arena_t *arena);
    822 void	*ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
    823     tcache_t *tcache, arena_t *arena);
    824 void	*ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero);
    825 size_t	ivsalloc(const void *ptr, bool demote);
    826 size_t	u2rz(size_t usize);
    827 size_t	p2rz(const void *ptr);
    828 void	idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata);
    829 void	idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache);
    830 void	idalloc(tsd_t *tsd, void *ptr);
    831 void	iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache);
    832 void	isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
    833 void	isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache);
    834 void	*iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
    835     size_t extra, size_t alignment, bool zero, tcache_t *tcache,
    836     arena_t *arena);
    837 void	*iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
    838     size_t alignment, bool zero, tcache_t *tcache, arena_t *arena);
    839 void	*iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
    840     size_t alignment, bool zero);
    841 bool	ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra,
    842     size_t alignment, bool zero);
    843 #endif
    844 
    845 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_C_))
    846 JEMALLOC_ALWAYS_INLINE arena_t *
    847 iaalloc(const void *ptr)
    848 {
    849 
    850 	assert(ptr != NULL);
    851 
    852 	return (arena_aalloc(ptr));
    853 }
    854 
    855 /*
    856  * Typical usage:
    857  *   void *ptr = [...]
    858  *   size_t sz = isalloc(ptr, config_prof);
    859  */
    860 JEMALLOC_ALWAYS_INLINE size_t
    861 isalloc(const void *ptr, bool demote)
    862 {
    863 
    864 	assert(ptr != NULL);
    865 	/* Demotion only makes sense if config_prof is true. */
    866 	assert(config_prof || !demote);
    867 
    868 	return (arena_salloc(ptr, demote));
    869 }
    870 
    871 JEMALLOC_ALWAYS_INLINE void *
    872 iallocztm(tsd_t *tsd, size_t size, bool zero, tcache_t *tcache, bool is_metadata,
    873     arena_t *arena)
    874 {
    875 	void *ret;
    876 
    877 	assert(size != 0);
    878 
    879 	ret = arena_malloc(tsd, arena, size, zero, tcache);
    880 	if (config_stats && is_metadata && likely(ret != NULL)) {
    881 		arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
    882 		    config_prof));
    883 	}
    884 	return (ret);
    885 }
    886 
    887 JEMALLOC_ALWAYS_INLINE void *
    888 imalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
    889 {
    890 
    891 	return (iallocztm(tsd, size, false, tcache, false, arena));
    892 }
    893 
    894 JEMALLOC_ALWAYS_INLINE void *
    895 imalloc(tsd_t *tsd, size_t size)
    896 {
    897 
    898 	return (iallocztm(tsd, size, false, tcache_get(tsd, true), false, NULL));
    899 }
    900 
    901 JEMALLOC_ALWAYS_INLINE void *
    902 icalloct(tsd_t *tsd, size_t size, tcache_t *tcache, arena_t *arena)
    903 {
    904 
    905 	return (iallocztm(tsd, size, true, tcache, false, arena));
    906 }
    907 
    908 JEMALLOC_ALWAYS_INLINE void *
    909 icalloc(tsd_t *tsd, size_t size)
    910 {
    911 
    912 	return (iallocztm(tsd, size, true, tcache_get(tsd, true), false, NULL));
    913 }
    914 
    915 JEMALLOC_ALWAYS_INLINE void *
    916 ipallocztm(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
    917     tcache_t *tcache, bool is_metadata, arena_t *arena)
    918 {
    919 	void *ret;
    920 
    921 	assert(usize != 0);
    922 	assert(usize == sa2u(usize, alignment));
    923 
    924 	ret = arena_palloc(tsd, arena, usize, alignment, zero, tcache);
    925 	assert(ALIGNMENT_ADDR2BASE(ret, alignment) == ret);
    926 	if (config_stats && is_metadata && likely(ret != NULL)) {
    927 		arena_metadata_allocated_add(iaalloc(ret), isalloc(ret,
    928 		    config_prof));
    929 	}
    930 	return (ret);
    931 }
    932 
    933 JEMALLOC_ALWAYS_INLINE void *
    934 ipalloct(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
    935     tcache_t *tcache, arena_t *arena)
    936 {
    937 
    938 	return (ipallocztm(tsd, usize, alignment, zero, tcache, false, arena));
    939 }
    940 
    941 JEMALLOC_ALWAYS_INLINE void *
    942 ipalloc(tsd_t *tsd, size_t usize, size_t alignment, bool zero)
    943 {
    944 
    945 	return (ipallocztm(tsd, usize, alignment, zero, tcache_get(tsd,
    946 	    NULL), false, NULL));
    947 }
    948 
    949 JEMALLOC_ALWAYS_INLINE size_t
    950 ivsalloc(const void *ptr, bool demote)
    951 {
    952 	extent_node_t *node;
    953 
    954 	/* Return 0 if ptr is not within a chunk managed by jemalloc. */
    955 	node = chunk_lookup(ptr, false);
    956 	if (node == NULL)
    957 		return (0);
    958 	/* Only arena chunks should be looked up via interior pointers. */
    959 	assert(extent_node_addr_get(node) == ptr ||
    960 	    extent_node_achunk_get(node));
    961 
    962 	return (isalloc(ptr, demote));
    963 }
    964 
    965 JEMALLOC_INLINE size_t
    966 u2rz(size_t usize)
    967 {
    968 	size_t ret;
    969 
    970 	if (usize <= SMALL_MAXCLASS) {
    971 		index_t binind = size2index(usize);
    972 		ret = arena_bin_info[binind].redzone_size;
    973 	} else
    974 		ret = 0;
    975 
    976 	return (ret);
    977 }
    978 
    979 JEMALLOC_INLINE size_t
    980 p2rz(const void *ptr)
    981 {
    982 	size_t usize = isalloc(ptr, false);
    983 
    984 	return (u2rz(usize));
    985 }
    986 
    987 JEMALLOC_ALWAYS_INLINE void
    988 idalloctm(tsd_t *tsd, void *ptr, tcache_t *tcache, bool is_metadata)
    989 {
    990 
    991 	assert(ptr != NULL);
    992 	if (config_stats && is_metadata) {
    993 		arena_metadata_allocated_sub(iaalloc(ptr), isalloc(ptr,
    994 		    config_prof));
    995 	}
    996 
    997 	arena_dalloc(tsd, ptr, tcache);
    998 }
    999 
   1000 JEMALLOC_ALWAYS_INLINE void
   1001 idalloct(tsd_t *tsd, void *ptr, tcache_t *tcache)
   1002 {
   1003 
   1004 	idalloctm(tsd, ptr, tcache, false);
   1005 }
   1006 
   1007 JEMALLOC_ALWAYS_INLINE void
   1008 idalloc(tsd_t *tsd, void *ptr)
   1009 {
   1010 
   1011 	idalloctm(tsd, ptr, tcache_get(tsd, false), false);
   1012 }
   1013 
   1014 JEMALLOC_ALWAYS_INLINE void
   1015 iqalloc(tsd_t *tsd, void *ptr, tcache_t *tcache)
   1016 {
   1017 
   1018 	if (config_fill && unlikely(opt_quarantine))
   1019 		quarantine(tsd, ptr);
   1020 	else
   1021 		idalloctm(tsd, ptr, tcache, false);
   1022 }
   1023 
   1024 JEMALLOC_ALWAYS_INLINE void
   1025 isdalloct(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
   1026 {
   1027 
   1028 	arena_sdalloc(tsd, ptr, size, tcache);
   1029 }
   1030 
   1031 JEMALLOC_ALWAYS_INLINE void
   1032 isqalloc(tsd_t *tsd, void *ptr, size_t size, tcache_t *tcache)
   1033 {
   1034 
   1035 	if (config_fill && unlikely(opt_quarantine))
   1036 		quarantine(tsd, ptr);
   1037 	else
   1038 		isdalloct(tsd, ptr, size, tcache);
   1039 }
   1040 
   1041 JEMALLOC_ALWAYS_INLINE void *
   1042 iralloct_realign(tsd_t *tsd, void *ptr, size_t oldsize, size_t size,
   1043     size_t extra, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
   1044 {
   1045 	void *p;
   1046 	size_t usize, copysize;
   1047 
   1048 	usize = sa2u(size + extra, alignment);
   1049 	if (usize == 0)
   1050 		return (NULL);
   1051 	p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
   1052 	if (p == NULL) {
   1053 		if (extra == 0)
   1054 			return (NULL);
   1055 		/* Try again, without extra this time. */
   1056 		usize = sa2u(size, alignment);
   1057 		if (usize == 0)
   1058 			return (NULL);
   1059 		p = ipalloct(tsd, usize, alignment, zero, tcache, arena);
   1060 		if (p == NULL)
   1061 			return (NULL);
   1062 	}
   1063 	/*
   1064 	 * Copy at most size bytes (not size+extra), since the caller has no
   1065 	 * expectation that the extra bytes will be reliably preserved.
   1066 	 */
   1067 	copysize = (size < oldsize) ? size : oldsize;
   1068 	memcpy(p, ptr, copysize);
   1069 	isqalloc(tsd, ptr, oldsize, tcache);
   1070 	return (p);
   1071 }
   1072 
   1073 JEMALLOC_ALWAYS_INLINE void *
   1074 iralloct(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
   1075     bool zero, tcache_t *tcache, arena_t *arena)
   1076 {
   1077 
   1078 	assert(ptr != NULL);
   1079 	assert(size != 0);
   1080 
   1081 	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
   1082 	    != 0) {
   1083 		/*
   1084 		 * Existing object alignment is inadequate; allocate new space
   1085 		 * and copy.
   1086 		 */
   1087 		return (iralloct_realign(tsd, ptr, oldsize, size, 0, alignment,
   1088 		    zero, tcache, arena));
   1089 	}
   1090 
   1091 	return (arena_ralloc(tsd, arena, ptr, oldsize, size, 0, alignment, zero,
   1092 	    tcache));
   1093 }
   1094 
   1095 JEMALLOC_ALWAYS_INLINE void *
   1096 iralloc(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, size_t alignment,
   1097     bool zero)
   1098 {
   1099 
   1100 	return (iralloct(tsd, ptr, oldsize, size, alignment, zero,
   1101 	    tcache_get(tsd, true), NULL));
   1102 }
   1103 
   1104 JEMALLOC_ALWAYS_INLINE bool
   1105 ixalloc(void *ptr, size_t oldsize, size_t size, size_t extra, size_t alignment,
   1106     bool zero)
   1107 {
   1108 
   1109 	assert(ptr != NULL);
   1110 	assert(size != 0);
   1111 
   1112 	if (alignment != 0 && ((uintptr_t)ptr & ((uintptr_t)alignment-1))
   1113 	    != 0) {
   1114 		/* Existing object alignment is inadequate. */
   1115 		return (true);
   1116 	}
   1117 
   1118 	return (arena_ralloc_no_move(ptr, oldsize, size, extra, zero));
   1119 }
   1120 #endif
   1121 
   1122 #include "jemalloc/internal/prof.h"
   1123 
   1124 #undef JEMALLOC_H_INLINES
   1125 /******************************************************************************/
   1126 #endif /* JEMALLOC_INTERNAL_H */
   1127