Home | History | Annotate | Download | only in src
      1 #define	JEMALLOC_PROF_C_
      2 #include "jemalloc/internal/jemalloc_internal.h"
      3 /******************************************************************************/
      4 
      5 #ifdef JEMALLOC_PROF_LIBUNWIND
      6 #define	UNW_LOCAL_ONLY
      7 #include <libunwind.h>
      8 #endif
      9 
     10 #ifdef JEMALLOC_PROF_LIBGCC
     11 #include <unwind.h>
     12 #endif
     13 
     14 /******************************************************************************/
     15 /* Data. */
     16 
     17 bool		opt_prof = false;
     18 bool		opt_prof_active = true;
     19 bool		opt_prof_thread_active_init = true;
     20 size_t		opt_lg_prof_sample = LG_PROF_SAMPLE_DEFAULT;
     21 ssize_t		opt_lg_prof_interval = LG_PROF_INTERVAL_DEFAULT;
     22 bool		opt_prof_gdump = false;
     23 bool		opt_prof_final = false;
     24 bool		opt_prof_leak = false;
     25 bool		opt_prof_accum = false;
     26 char		opt_prof_prefix[
     27     /* Minimize memory bloat for non-prof builds. */
     28 #ifdef JEMALLOC_PROF
     29     PATH_MAX +
     30 #endif
     31     1];
     32 
     33 /*
     34  * Initialized as opt_prof_active, and accessed via
     35  * prof_active_[gs]et{_unlocked,}().
     36  */
     37 bool			prof_active;
     38 static malloc_mutex_t	prof_active_mtx;
     39 
     40 /*
     41  * Initialized as opt_prof_thread_active_init, and accessed via
     42  * prof_thread_active_init_[gs]et().
     43  */
     44 static bool		prof_thread_active_init;
     45 static malloc_mutex_t	prof_thread_active_init_mtx;
     46 
     47 /*
     48  * Initialized as opt_prof_gdump, and accessed via
     49  * prof_gdump_[gs]et{_unlocked,}().
     50  */
     51 bool			prof_gdump_val;
     52 static malloc_mutex_t	prof_gdump_mtx;
     53 
     54 uint64_t	prof_interval = 0;
     55 
     56 size_t		lg_prof_sample;
     57 
     58 /*
     59  * Table of mutexes that are shared among gctx's.  These are leaf locks, so
     60  * there is no problem with using them for more than one gctx at the same time.
     61  * The primary motivation for this sharing though is that gctx's are ephemeral,
     62  * and destroying mutexes causes complications for systems that allocate when
     63  * creating/destroying mutexes.
     64  */
     65 static malloc_mutex_t	*gctx_locks;
     66 static unsigned		cum_gctxs; /* Atomic counter. */
     67 
     68 /*
     69  * Table of mutexes that are shared among tdata's.  No operations require
     70  * holding multiple tdata locks, so there is no problem with using them for more
     71  * than one tdata at the same time, even though a gctx lock may be acquired
     72  * while holding a tdata lock.
     73  */
     74 static malloc_mutex_t	*tdata_locks;
     75 
     76 /*
     77  * Global hash of (prof_bt_t *)-->(prof_gctx_t *).  This is the master data
     78  * structure that knows about all backtraces currently captured.
     79  */
     80 static ckh_t		bt2gctx;
     81 static malloc_mutex_t	bt2gctx_mtx;
     82 
     83 /*
     84  * Tree of all extant prof_tdata_t structures, regardless of state,
     85  * {attached,detached,expired}.
     86  */
     87 static prof_tdata_tree_t	tdatas;
     88 static malloc_mutex_t	tdatas_mtx;
     89 
     90 static uint64_t		next_thr_uid;
     91 static malloc_mutex_t	next_thr_uid_mtx;
     92 
     93 static malloc_mutex_t	prof_dump_seq_mtx;
     94 static uint64_t		prof_dump_seq;
     95 static uint64_t		prof_dump_iseq;
     96 static uint64_t		prof_dump_mseq;
     97 static uint64_t		prof_dump_useq;
     98 
     99 /*
    100  * This buffer is rather large for stack allocation, so use a single buffer for
    101  * all profile dumps.
    102  */
    103 static malloc_mutex_t	prof_dump_mtx;
    104 static char		prof_dump_buf[
    105     /* Minimize memory bloat for non-prof builds. */
    106 #ifdef JEMALLOC_PROF
    107     PROF_DUMP_BUFSIZE
    108 #else
    109     1
    110 #endif
    111 ];
    112 static unsigned		prof_dump_buf_end;
    113 static int		prof_dump_fd;
    114 
    115 /* Do not dump any profiles until bootstrapping is complete. */
    116 static bool		prof_booted = false;
    117 
    118 /******************************************************************************/
    119 /*
    120  * Function prototypes for static functions that are referenced prior to
    121  * definition.
    122  */
    123 
    124 static bool	prof_tctx_should_destroy(prof_tctx_t *tctx);
    125 static void	prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx);
    126 static bool	prof_tdata_should_destroy(prof_tdata_t *tdata,
    127     bool even_if_attached);
    128 static void	prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata,
    129     bool even_if_attached);
    130 static char	*prof_thread_name_alloc(tsd_t *tsd, const char *thread_name);
    131 
    132 /******************************************************************************/
    133 /* Red-black trees. */
    134 
    135 JEMALLOC_INLINE_C int
    136 prof_tctx_comp(const prof_tctx_t *a, const prof_tctx_t *b)
    137 {
    138 	uint64_t a_thr_uid = a->thr_uid;
    139 	uint64_t b_thr_uid = b->thr_uid;
    140 	int ret = (a_thr_uid > b_thr_uid) - (a_thr_uid < b_thr_uid);
    141 	if (ret == 0) {
    142 		uint64_t a_tctx_uid = a->tctx_uid;
    143 		uint64_t b_tctx_uid = b->tctx_uid;
    144 		ret = (a_tctx_uid > b_tctx_uid) - (a_tctx_uid < b_tctx_uid);
    145 	}
    146 	return (ret);
    147 }
    148 
    149 rb_gen(static UNUSED, tctx_tree_, prof_tctx_tree_t, prof_tctx_t,
    150     tctx_link, prof_tctx_comp)
    151 
    152 JEMALLOC_INLINE_C int
    153 prof_gctx_comp(const prof_gctx_t *a, const prof_gctx_t *b)
    154 {
    155 	unsigned a_len = a->bt.len;
    156 	unsigned b_len = b->bt.len;
    157 	unsigned comp_len = (a_len < b_len) ? a_len : b_len;
    158 	int ret = memcmp(a->bt.vec, b->bt.vec, comp_len * sizeof(void *));
    159 	if (ret == 0)
    160 		ret = (a_len > b_len) - (a_len < b_len);
    161 	return (ret);
    162 }
    163 
    164 rb_gen(static UNUSED, gctx_tree_, prof_gctx_tree_t, prof_gctx_t, dump_link,
    165     prof_gctx_comp)
    166 
    167 JEMALLOC_INLINE_C int
    168 prof_tdata_comp(const prof_tdata_t *a, const prof_tdata_t *b)
    169 {
    170 	int ret;
    171 	uint64_t a_uid = a->thr_uid;
    172 	uint64_t b_uid = b->thr_uid;
    173 
    174 	ret = ((a_uid > b_uid) - (a_uid < b_uid));
    175 	if (ret == 0) {
    176 		uint64_t a_discrim = a->thr_discrim;
    177 		uint64_t b_discrim = b->thr_discrim;
    178 
    179 		ret = ((a_discrim > b_discrim) - (a_discrim < b_discrim));
    180 	}
    181 	return (ret);
    182 }
    183 
    184 rb_gen(static UNUSED, tdata_tree_, prof_tdata_tree_t, prof_tdata_t, tdata_link,
    185     prof_tdata_comp)
    186 
    187 /******************************************************************************/
    188 
    189 void
    190 prof_alloc_rollback(tsd_t *tsd, prof_tctx_t *tctx, bool updated)
    191 {
    192 	prof_tdata_t *tdata;
    193 
    194 	cassert(config_prof);
    195 
    196 	if (updated) {
    197 		/*
    198 		 * Compute a new sample threshold.  This isn't very important in
    199 		 * practice, because this function is rarely executed, so the
    200 		 * potential for sample bias is minimal except in contrived
    201 		 * programs.
    202 		 */
    203 		tdata = prof_tdata_get(tsd, true);
    204 		if (tdata != NULL)
    205 			prof_sample_threshold_update(tctx->tdata);
    206 	}
    207 
    208 	if ((uintptr_t)tctx > (uintptr_t)1U) {
    209 		malloc_mutex_lock(tctx->tdata->lock);
    210 		tctx->prepared = false;
    211 		if (prof_tctx_should_destroy(tctx))
    212 			prof_tctx_destroy(tsd, tctx);
    213 		else
    214 			malloc_mutex_unlock(tctx->tdata->lock);
    215 	}
    216 }
    217 
    218 void
    219 prof_malloc_sample_object(const void *ptr, size_t usize, prof_tctx_t *tctx)
    220 {
    221 
    222 	prof_tctx_set(ptr, tctx);
    223 
    224 	malloc_mutex_lock(tctx->tdata->lock);
    225 	tctx->cnts.curobjs++;
    226 	tctx->cnts.curbytes += usize;
    227 	if (opt_prof_accum) {
    228 		tctx->cnts.accumobjs++;
    229 		tctx->cnts.accumbytes += usize;
    230 	}
    231 	tctx->prepared = false;
    232 	malloc_mutex_unlock(tctx->tdata->lock);
    233 }
    234 
    235 void
    236 prof_free_sampled_object(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
    237 {
    238 
    239 	malloc_mutex_lock(tctx->tdata->lock);
    240 	assert(tctx->cnts.curobjs > 0);
    241 	assert(tctx->cnts.curbytes >= usize);
    242 	tctx->cnts.curobjs--;
    243 	tctx->cnts.curbytes -= usize;
    244 
    245 	if (prof_tctx_should_destroy(tctx))
    246 		prof_tctx_destroy(tsd, tctx);
    247 	else
    248 		malloc_mutex_unlock(tctx->tdata->lock);
    249 }
    250 
    251 void
    252 bt_init(prof_bt_t *bt, void **vec)
    253 {
    254 
    255 	cassert(config_prof);
    256 
    257 	bt->vec = vec;
    258 	bt->len = 0;
    259 }
    260 
    261 JEMALLOC_INLINE_C void
    262 prof_enter(tsd_t *tsd, prof_tdata_t *tdata)
    263 {
    264 
    265 	cassert(config_prof);
    266 	assert(tdata == prof_tdata_get(tsd, false));
    267 
    268 	if (tdata != NULL) {
    269 		assert(!tdata->enq);
    270 		tdata->enq = true;
    271 	}
    272 
    273 	malloc_mutex_lock(&bt2gctx_mtx);
    274 }
    275 
    276 JEMALLOC_INLINE_C void
    277 prof_leave(tsd_t *tsd, prof_tdata_t *tdata)
    278 {
    279 
    280 	cassert(config_prof);
    281 	assert(tdata == prof_tdata_get(tsd, false));
    282 
    283 	malloc_mutex_unlock(&bt2gctx_mtx);
    284 
    285 	if (tdata != NULL) {
    286 		bool idump, gdump;
    287 
    288 		assert(tdata->enq);
    289 		tdata->enq = false;
    290 		idump = tdata->enq_idump;
    291 		tdata->enq_idump = false;
    292 		gdump = tdata->enq_gdump;
    293 		tdata->enq_gdump = false;
    294 
    295 		if (idump)
    296 			prof_idump();
    297 		if (gdump)
    298 			prof_gdump();
    299 	}
    300 }
    301 
    302 #ifdef JEMALLOC_PROF_LIBUNWIND
    303 void
    304 prof_backtrace(prof_bt_t *bt)
    305 {
    306 	int nframes;
    307 
    308 	cassert(config_prof);
    309 	assert(bt->len == 0);
    310 	assert(bt->vec != NULL);
    311 
    312 	nframes = unw_backtrace(bt->vec, PROF_BT_MAX);
    313 	if (nframes <= 0)
    314 		return;
    315 	bt->len = nframes;
    316 }
    317 #elif (defined(JEMALLOC_PROF_LIBGCC))
    318 static _Unwind_Reason_Code
    319 prof_unwind_init_callback(struct _Unwind_Context *context, void *arg)
    320 {
    321 
    322 	cassert(config_prof);
    323 
    324 	return (_URC_NO_REASON);
    325 }
    326 
    327 static _Unwind_Reason_Code
    328 prof_unwind_callback(struct _Unwind_Context *context, void *arg)
    329 {
    330 	prof_unwind_data_t *data = (prof_unwind_data_t *)arg;
    331 	void *ip;
    332 
    333 	cassert(config_prof);
    334 
    335 	ip = (void *)_Unwind_GetIP(context);
    336 	if (ip == NULL)
    337 		return (_URC_END_OF_STACK);
    338 	data->bt->vec[data->bt->len] = ip;
    339 	data->bt->len++;
    340 	if (data->bt->len == data->max)
    341 		return (_URC_END_OF_STACK);
    342 
    343 	return (_URC_NO_REASON);
    344 }
    345 
    346 void
    347 prof_backtrace(prof_bt_t *bt)
    348 {
    349 	prof_unwind_data_t data = {bt, PROF_BT_MAX};
    350 
    351 	cassert(config_prof);
    352 
    353 	_Unwind_Backtrace(prof_unwind_callback, &data);
    354 }
    355 #elif (defined(JEMALLOC_PROF_GCC))
    356 void
    357 prof_backtrace(prof_bt_t *bt)
    358 {
    359 #define	BT_FRAME(i)							\
    360 	if ((i) < PROF_BT_MAX) {					\
    361 		void *p;						\
    362 		if (__builtin_frame_address(i) == 0)			\
    363 			return;						\
    364 		p = __builtin_return_address(i);			\
    365 		if (p == NULL)						\
    366 			return;						\
    367 		bt->vec[(i)] = p;					\
    368 		bt->len = (i) + 1;					\
    369 	} else								\
    370 		return;
    371 
    372 	cassert(config_prof);
    373 
    374 	BT_FRAME(0)
    375 	BT_FRAME(1)
    376 	BT_FRAME(2)
    377 	BT_FRAME(3)
    378 	BT_FRAME(4)
    379 	BT_FRAME(5)
    380 	BT_FRAME(6)
    381 	BT_FRAME(7)
    382 	BT_FRAME(8)
    383 	BT_FRAME(9)
    384 
    385 	BT_FRAME(10)
    386 	BT_FRAME(11)
    387 	BT_FRAME(12)
    388 	BT_FRAME(13)
    389 	BT_FRAME(14)
    390 	BT_FRAME(15)
    391 	BT_FRAME(16)
    392 	BT_FRAME(17)
    393 	BT_FRAME(18)
    394 	BT_FRAME(19)
    395 
    396 	BT_FRAME(20)
    397 	BT_FRAME(21)
    398 	BT_FRAME(22)
    399 	BT_FRAME(23)
    400 	BT_FRAME(24)
    401 	BT_FRAME(25)
    402 	BT_FRAME(26)
    403 	BT_FRAME(27)
    404 	BT_FRAME(28)
    405 	BT_FRAME(29)
    406 
    407 	BT_FRAME(30)
    408 	BT_FRAME(31)
    409 	BT_FRAME(32)
    410 	BT_FRAME(33)
    411 	BT_FRAME(34)
    412 	BT_FRAME(35)
    413 	BT_FRAME(36)
    414 	BT_FRAME(37)
    415 	BT_FRAME(38)
    416 	BT_FRAME(39)
    417 
    418 	BT_FRAME(40)
    419 	BT_FRAME(41)
    420 	BT_FRAME(42)
    421 	BT_FRAME(43)
    422 	BT_FRAME(44)
    423 	BT_FRAME(45)
    424 	BT_FRAME(46)
    425 	BT_FRAME(47)
    426 	BT_FRAME(48)
    427 	BT_FRAME(49)
    428 
    429 	BT_FRAME(50)
    430 	BT_FRAME(51)
    431 	BT_FRAME(52)
    432 	BT_FRAME(53)
    433 	BT_FRAME(54)
    434 	BT_FRAME(55)
    435 	BT_FRAME(56)
    436 	BT_FRAME(57)
    437 	BT_FRAME(58)
    438 	BT_FRAME(59)
    439 
    440 	BT_FRAME(60)
    441 	BT_FRAME(61)
    442 	BT_FRAME(62)
    443 	BT_FRAME(63)
    444 	BT_FRAME(64)
    445 	BT_FRAME(65)
    446 	BT_FRAME(66)
    447 	BT_FRAME(67)
    448 	BT_FRAME(68)
    449 	BT_FRAME(69)
    450 
    451 	BT_FRAME(70)
    452 	BT_FRAME(71)
    453 	BT_FRAME(72)
    454 	BT_FRAME(73)
    455 	BT_FRAME(74)
    456 	BT_FRAME(75)
    457 	BT_FRAME(76)
    458 	BT_FRAME(77)
    459 	BT_FRAME(78)
    460 	BT_FRAME(79)
    461 
    462 	BT_FRAME(80)
    463 	BT_FRAME(81)
    464 	BT_FRAME(82)
    465 	BT_FRAME(83)
    466 	BT_FRAME(84)
    467 	BT_FRAME(85)
    468 	BT_FRAME(86)
    469 	BT_FRAME(87)
    470 	BT_FRAME(88)
    471 	BT_FRAME(89)
    472 
    473 	BT_FRAME(90)
    474 	BT_FRAME(91)
    475 	BT_FRAME(92)
    476 	BT_FRAME(93)
    477 	BT_FRAME(94)
    478 	BT_FRAME(95)
    479 	BT_FRAME(96)
    480 	BT_FRAME(97)
    481 	BT_FRAME(98)
    482 	BT_FRAME(99)
    483 
    484 	BT_FRAME(100)
    485 	BT_FRAME(101)
    486 	BT_FRAME(102)
    487 	BT_FRAME(103)
    488 	BT_FRAME(104)
    489 	BT_FRAME(105)
    490 	BT_FRAME(106)
    491 	BT_FRAME(107)
    492 	BT_FRAME(108)
    493 	BT_FRAME(109)
    494 
    495 	BT_FRAME(110)
    496 	BT_FRAME(111)
    497 	BT_FRAME(112)
    498 	BT_FRAME(113)
    499 	BT_FRAME(114)
    500 	BT_FRAME(115)
    501 	BT_FRAME(116)
    502 	BT_FRAME(117)
    503 	BT_FRAME(118)
    504 	BT_FRAME(119)
    505 
    506 	BT_FRAME(120)
    507 	BT_FRAME(121)
    508 	BT_FRAME(122)
    509 	BT_FRAME(123)
    510 	BT_FRAME(124)
    511 	BT_FRAME(125)
    512 	BT_FRAME(126)
    513 	BT_FRAME(127)
    514 #undef BT_FRAME
    515 }
    516 #else
    517 void
    518 prof_backtrace(prof_bt_t *bt)
    519 {
    520 
    521 	cassert(config_prof);
    522 	not_reached();
    523 }
    524 #endif
    525 
    526 static malloc_mutex_t *
    527 prof_gctx_mutex_choose(void)
    528 {
    529 	unsigned ngctxs = atomic_add_u(&cum_gctxs, 1);
    530 
    531 	return (&gctx_locks[(ngctxs - 1) % PROF_NCTX_LOCKS]);
    532 }
    533 
    534 static malloc_mutex_t *
    535 prof_tdata_mutex_choose(uint64_t thr_uid)
    536 {
    537 
    538 	return (&tdata_locks[thr_uid % PROF_NTDATA_LOCKS]);
    539 }
    540 
    541 static prof_gctx_t *
    542 prof_gctx_create(tsd_t *tsd, prof_bt_t *bt)
    543 {
    544 	/*
    545 	 * Create a single allocation that has space for vec of length bt->len.
    546 	 */
    547 	prof_gctx_t *gctx = (prof_gctx_t *)iallocztm(tsd, offsetof(prof_gctx_t,
    548 	    vec) + (bt->len * sizeof(void *)), false, tcache_get(tsd, true),
    549 	    true, NULL);
    550 	if (gctx == NULL)
    551 		return (NULL);
    552 	gctx->lock = prof_gctx_mutex_choose();
    553 	/*
    554 	 * Set nlimbo to 1, in order to avoid a race condition with
    555 	 * prof_tctx_destroy()/prof_gctx_try_destroy().
    556 	 */
    557 	gctx->nlimbo = 1;
    558 	tctx_tree_new(&gctx->tctxs);
    559 	/* Duplicate bt. */
    560 	memcpy(gctx->vec, bt->vec, bt->len * sizeof(void *));
    561 	gctx->bt.vec = gctx->vec;
    562 	gctx->bt.len = bt->len;
    563 	return (gctx);
    564 }
    565 
    566 static void
    567 prof_gctx_try_destroy(tsd_t *tsd, prof_tdata_t *tdata_self, prof_gctx_t *gctx,
    568     prof_tdata_t *tdata)
    569 {
    570 
    571 	cassert(config_prof);
    572 
    573 	/*
    574 	 * Check that gctx is still unused by any thread cache before destroying
    575 	 * it.  prof_lookup() increments gctx->nlimbo in order to avoid a race
    576 	 * condition with this function, as does prof_tctx_destroy() in order to
    577 	 * avoid a race between the main body of prof_tctx_destroy() and entry
    578 	 * into this function.
    579 	 */
    580 	prof_enter(tsd, tdata_self);
    581 	malloc_mutex_lock(gctx->lock);
    582 	assert(gctx->nlimbo != 0);
    583 	if (tctx_tree_empty(&gctx->tctxs) && gctx->nlimbo == 1) {
    584 		/* Remove gctx from bt2gctx. */
    585 		if (ckh_remove(tsd, &bt2gctx, &gctx->bt, NULL, NULL))
    586 			not_reached();
    587 		prof_leave(tsd, tdata_self);
    588 		/* Destroy gctx. */
    589 		malloc_mutex_unlock(gctx->lock);
    590 		idalloctm(tsd, gctx, tcache_get(tsd, false), true);
    591 	} else {
    592 		/*
    593 		 * Compensate for increment in prof_tctx_destroy() or
    594 		 * prof_lookup().
    595 		 */
    596 		gctx->nlimbo--;
    597 		malloc_mutex_unlock(gctx->lock);
    598 		prof_leave(tsd, tdata_self);
    599 	}
    600 }
    601 
    602 /* tctx->tdata->lock must be held. */
    603 static bool
    604 prof_tctx_should_destroy(prof_tctx_t *tctx)
    605 {
    606 
    607 	if (opt_prof_accum)
    608 		return (false);
    609 	if (tctx->cnts.curobjs != 0)
    610 		return (false);
    611 	if (tctx->prepared)
    612 		return (false);
    613 	return (true);
    614 }
    615 
    616 static bool
    617 prof_gctx_should_destroy(prof_gctx_t *gctx)
    618 {
    619 
    620 	if (opt_prof_accum)
    621 		return (false);
    622 	if (!tctx_tree_empty(&gctx->tctxs))
    623 		return (false);
    624 	if (gctx->nlimbo != 0)
    625 		return (false);
    626 	return (true);
    627 }
    628 
    629 /* tctx->tdata->lock is held upon entry, and released before return. */
    630 static void
    631 prof_tctx_destroy(tsd_t *tsd, prof_tctx_t *tctx)
    632 {
    633 	prof_tdata_t *tdata = tctx->tdata;
    634 	prof_gctx_t *gctx = tctx->gctx;
    635 	bool destroy_tdata, destroy_tctx, destroy_gctx;
    636 
    637 	assert(tctx->cnts.curobjs == 0);
    638 	assert(tctx->cnts.curbytes == 0);
    639 	assert(!opt_prof_accum);
    640 	assert(tctx->cnts.accumobjs == 0);
    641 	assert(tctx->cnts.accumbytes == 0);
    642 
    643 	ckh_remove(tsd, &tdata->bt2tctx, &gctx->bt, NULL, NULL);
    644 	destroy_tdata = prof_tdata_should_destroy(tdata, false);
    645 	malloc_mutex_unlock(tdata->lock);
    646 
    647 	malloc_mutex_lock(gctx->lock);
    648 	switch (tctx->state) {
    649 	case prof_tctx_state_nominal:
    650 		tctx_tree_remove(&gctx->tctxs, tctx);
    651 		destroy_tctx = true;
    652 		if (prof_gctx_should_destroy(gctx)) {
    653 			/*
    654 			 * Increment gctx->nlimbo in order to keep another
    655 			 * thread from winning the race to destroy gctx while
    656 			 * this one has gctx->lock dropped.  Without this, it
    657 			 * would be possible for another thread to:
    658 			 *
    659 			 * 1) Sample an allocation associated with gctx.
    660 			 * 2) Deallocate the sampled object.
    661 			 * 3) Successfully prof_gctx_try_destroy(gctx).
    662 			 *
    663 			 * The result would be that gctx no longer exists by the
    664 			 * time this thread accesses it in
    665 			 * prof_gctx_try_destroy().
    666 			 */
    667 			gctx->nlimbo++;
    668 			destroy_gctx = true;
    669 		} else
    670 			destroy_gctx = false;
    671 		break;
    672 	case prof_tctx_state_dumping:
    673 		/*
    674 		 * A dumping thread needs tctx to remain valid until dumping
    675 		 * has finished.  Change state such that the dumping thread will
    676 		 * complete destruction during a late dump iteration phase.
    677 		 */
    678 		tctx->state = prof_tctx_state_purgatory;
    679 		destroy_tctx = false;
    680 		destroy_gctx = false;
    681 		break;
    682 	default:
    683 		not_reached();
    684 		destroy_tctx = false;
    685 		destroy_gctx = false;
    686 	}
    687 	malloc_mutex_unlock(gctx->lock);
    688 	if (destroy_gctx) {
    689 		prof_gctx_try_destroy(tsd, prof_tdata_get(tsd, false), gctx,
    690 		    tdata);
    691 	}
    692 
    693 	if (destroy_tdata)
    694 		prof_tdata_destroy(tsd, tdata, false);
    695 
    696 	if (destroy_tctx)
    697 		idalloctm(tsd, tctx, tcache_get(tsd, false), true);
    698 }
    699 
    700 static bool
    701 prof_lookup_global(tsd_t *tsd, prof_bt_t *bt, prof_tdata_t *tdata,
    702     void **p_btkey, prof_gctx_t **p_gctx, bool *p_new_gctx)
    703 {
    704 	union {
    705 		prof_gctx_t	*p;
    706 		void		*v;
    707 	} gctx;
    708 	union {
    709 		prof_bt_t	*p;
    710 		void		*v;
    711 	} btkey;
    712 	bool new_gctx;
    713 
    714 	prof_enter(tsd, tdata);
    715 	if (ckh_search(&bt2gctx, bt, &btkey.v, &gctx.v)) {
    716 		/* bt has never been seen before.  Insert it. */
    717 		gctx.p = prof_gctx_create(tsd, bt);
    718 		if (gctx.v == NULL) {
    719 			prof_leave(tsd, tdata);
    720 			return (true);
    721 		}
    722 		btkey.p = &gctx.p->bt;
    723 		if (ckh_insert(tsd, &bt2gctx, btkey.v, gctx.v)) {
    724 			/* OOM. */
    725 			prof_leave(tsd, tdata);
    726 			idalloctm(tsd, gctx.v, tcache_get(tsd, false), true);
    727 			return (true);
    728 		}
    729 		new_gctx = true;
    730 	} else {
    731 		/*
    732 		 * Increment nlimbo, in order to avoid a race condition with
    733 		 * prof_tctx_destroy()/prof_gctx_try_destroy().
    734 		 */
    735 		malloc_mutex_lock(gctx.p->lock);
    736 		gctx.p->nlimbo++;
    737 		malloc_mutex_unlock(gctx.p->lock);
    738 		new_gctx = false;
    739 	}
    740 	prof_leave(tsd, tdata);
    741 
    742 	*p_btkey = btkey.v;
    743 	*p_gctx = gctx.p;
    744 	*p_new_gctx = new_gctx;
    745 	return (false);
    746 }
    747 
    748 prof_tctx_t *
    749 prof_lookup(tsd_t *tsd, prof_bt_t *bt)
    750 {
    751 	union {
    752 		prof_tctx_t	*p;
    753 		void		*v;
    754 	} ret;
    755 	prof_tdata_t *tdata;
    756 	bool not_found;
    757 
    758 	cassert(config_prof);
    759 
    760 	tdata = prof_tdata_get(tsd, false);
    761 	if (tdata == NULL)
    762 		return (NULL);
    763 
    764 	malloc_mutex_lock(tdata->lock);
    765 	not_found = ckh_search(&tdata->bt2tctx, bt, NULL, &ret.v);
    766 	if (!not_found) /* Note double negative! */
    767 		ret.p->prepared = true;
    768 	malloc_mutex_unlock(tdata->lock);
    769 	if (not_found) {
    770 		tcache_t *tcache;
    771 		void *btkey;
    772 		prof_gctx_t *gctx;
    773 		bool new_gctx, error;
    774 
    775 		/*
    776 		 * This thread's cache lacks bt.  Look for it in the global
    777 		 * cache.
    778 		 */
    779 		if (prof_lookup_global(tsd, bt, tdata, &btkey, &gctx,
    780 		    &new_gctx))
    781 			return (NULL);
    782 
    783 		/* Link a prof_tctx_t into gctx for this thread. */
    784 		tcache = tcache_get(tsd, true);
    785 		ret.v = iallocztm(tsd, sizeof(prof_tctx_t), false, tcache, true,
    786 		    NULL);
    787 		if (ret.p == NULL) {
    788 			if (new_gctx)
    789 				prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
    790 			return (NULL);
    791 		}
    792 		ret.p->tdata = tdata;
    793 		ret.p->thr_uid = tdata->thr_uid;
    794 		memset(&ret.p->cnts, 0, sizeof(prof_cnt_t));
    795 		ret.p->gctx = gctx;
    796 		ret.p->tctx_uid = tdata->tctx_uid_next++;
    797 		ret.p->prepared = true;
    798 		ret.p->state = prof_tctx_state_initializing;
    799 		malloc_mutex_lock(tdata->lock);
    800 		error = ckh_insert(tsd, &tdata->bt2tctx, btkey, ret.v);
    801 		malloc_mutex_unlock(tdata->lock);
    802 		if (error) {
    803 			if (new_gctx)
    804 				prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
    805 			idalloctm(tsd, ret.v, tcache, true);
    806 			return (NULL);
    807 		}
    808 		malloc_mutex_lock(gctx->lock);
    809 		ret.p->state = prof_tctx_state_nominal;
    810 		tctx_tree_insert(&gctx->tctxs, ret.p);
    811 		gctx->nlimbo--;
    812 		malloc_mutex_unlock(gctx->lock);
    813 	}
    814 
    815 	return (ret.p);
    816 }
    817 
    818 void
    819 prof_sample_threshold_update(prof_tdata_t *tdata)
    820 {
    821 	/*
    822 	 * The body of this function is compiled out unless heap profiling is
    823 	 * enabled, so that it is possible to compile jemalloc with floating
    824 	 * point support completely disabled.  Avoiding floating point code is
    825 	 * important on memory-constrained systems, but it also enables a
    826 	 * workaround for versions of glibc that don't properly save/restore
    827 	 * floating point registers during dynamic lazy symbol loading (which
    828 	 * internally calls into whatever malloc implementation happens to be
    829 	 * integrated into the application).  Note that some compilers (e.g.
    830 	 * gcc 4.8) may use floating point registers for fast memory moves, so
    831 	 * jemalloc must be compiled with such optimizations disabled (e.g.
    832 	 * -mno-sse) in order for the workaround to be complete.
    833 	 */
    834 #ifdef JEMALLOC_PROF
    835 	uint64_t r;
    836 	double u;
    837 
    838 	if (!config_prof)
    839 		return;
    840 
    841 	if (lg_prof_sample == 0) {
    842 		tdata->bytes_until_sample = 0;
    843 		return;
    844 	}
    845 
    846 	/*
    847 	 * Compute sample interval as a geometrically distributed random
    848 	 * variable with mean (2^lg_prof_sample).
    849 	 *
    850 	 *                             __        __
    851 	 *                             |  log(u)  |                     1
    852 	 * tdata->bytes_until_sample = | -------- |, where p = ---------------
    853 	 *                             | log(1-p) |             lg_prof_sample
    854 	 *                                                     2
    855 	 *
    856 	 * For more information on the math, see:
    857 	 *
    858 	 *   Non-Uniform Random Variate Generation
    859 	 *   Luc Devroye
    860 	 *   Springer-Verlag, New York, 1986
    861 	 *   pp 500
    862 	 *   (http://luc.devroye.org/rnbookindex.html)
    863 	 */
    864 	prng64(r, 53, tdata->prng_state, UINT64_C(6364136223846793005),
    865 	    UINT64_C(1442695040888963407));
    866 	u = (double)r * (1.0/9007199254740992.0L);
    867 	tdata->bytes_until_sample = (uint64_t)(log(u) /
    868 	    log(1.0 - (1.0 / (double)((uint64_t)1U << lg_prof_sample))))
    869 	    + (uint64_t)1U;
    870 #endif
    871 }
    872 
    873 #ifdef JEMALLOC_JET
    874 static prof_tdata_t *
    875 prof_tdata_count_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
    876 {
    877 	size_t *tdata_count = (size_t *)arg;
    878 
    879 	(*tdata_count)++;
    880 
    881 	return (NULL);
    882 }
    883 
    884 size_t
    885 prof_tdata_count(void)
    886 {
    887 	size_t tdata_count = 0;
    888 
    889 	malloc_mutex_lock(&tdatas_mtx);
    890 	tdata_tree_iter(&tdatas, NULL, prof_tdata_count_iter,
    891 	    (void *)&tdata_count);
    892 	malloc_mutex_unlock(&tdatas_mtx);
    893 
    894 	return (tdata_count);
    895 }
    896 #endif
    897 
    898 #ifdef JEMALLOC_JET
    899 size_t
    900 prof_bt_count(void)
    901 {
    902 	size_t bt_count;
    903 	tsd_t *tsd;
    904 	prof_tdata_t *tdata;
    905 
    906 	tsd = tsd_fetch();
    907 	tdata = prof_tdata_get(tsd, false);
    908 	if (tdata == NULL)
    909 		return (0);
    910 
    911 	malloc_mutex_lock(&bt2gctx_mtx);
    912 	bt_count = ckh_count(&bt2gctx);
    913 	malloc_mutex_unlock(&bt2gctx_mtx);
    914 
    915 	return (bt_count);
    916 }
    917 #endif
    918 
    919 #ifdef JEMALLOC_JET
    920 #undef prof_dump_open
    921 #define	prof_dump_open JEMALLOC_N(prof_dump_open_impl)
    922 #endif
    923 static int
    924 prof_dump_open(bool propagate_err, const char *filename)
    925 {
    926 	int fd;
    927 
    928 	fd = creat(filename, 0644);
    929 	if (fd == -1 && !propagate_err) {
    930 		malloc_printf("<jemalloc>: creat(\"%s\"), 0644) failed\n",
    931 		    filename);
    932 		if (opt_abort)
    933 			abort();
    934 	}
    935 
    936 	return (fd);
    937 }
    938 #ifdef JEMALLOC_JET
    939 #undef prof_dump_open
    940 #define	prof_dump_open JEMALLOC_N(prof_dump_open)
    941 prof_dump_open_t *prof_dump_open = JEMALLOC_N(prof_dump_open_impl);
    942 #endif
    943 
    944 static bool
    945 prof_dump_flush(bool propagate_err)
    946 {
    947 	bool ret = false;
    948 	ssize_t err;
    949 
    950 	cassert(config_prof);
    951 
    952 	err = write(prof_dump_fd, prof_dump_buf, prof_dump_buf_end);
    953 	if (err == -1) {
    954 		if (!propagate_err) {
    955 			malloc_write("<jemalloc>: write() failed during heap "
    956 			    "profile flush\n");
    957 			if (opt_abort)
    958 				abort();
    959 		}
    960 		ret = true;
    961 	}
    962 	prof_dump_buf_end = 0;
    963 
    964 	return (ret);
    965 }
    966 
    967 static bool
    968 prof_dump_close(bool propagate_err)
    969 {
    970 	bool ret;
    971 
    972 	assert(prof_dump_fd != -1);
    973 	ret = prof_dump_flush(propagate_err);
    974 	close(prof_dump_fd);
    975 	prof_dump_fd = -1;
    976 
    977 	return (ret);
    978 }
    979 
    980 static bool
    981 prof_dump_write(bool propagate_err, const char *s)
    982 {
    983 	unsigned i, slen, n;
    984 
    985 	cassert(config_prof);
    986 
    987 	i = 0;
    988 	slen = strlen(s);
    989 	while (i < slen) {
    990 		/* Flush the buffer if it is full. */
    991 		if (prof_dump_buf_end == PROF_DUMP_BUFSIZE)
    992 			if (prof_dump_flush(propagate_err) && propagate_err)
    993 				return (true);
    994 
    995 		if (prof_dump_buf_end + slen <= PROF_DUMP_BUFSIZE) {
    996 			/* Finish writing. */
    997 			n = slen - i;
    998 		} else {
    999 			/* Write as much of s as will fit. */
   1000 			n = PROF_DUMP_BUFSIZE - prof_dump_buf_end;
   1001 		}
   1002 		memcpy(&prof_dump_buf[prof_dump_buf_end], &s[i], n);
   1003 		prof_dump_buf_end += n;
   1004 		i += n;
   1005 	}
   1006 
   1007 	return (false);
   1008 }
   1009 
   1010 JEMALLOC_ATTR(format(printf, 2, 3))
   1011 static bool
   1012 prof_dump_printf(bool propagate_err, const char *format, ...)
   1013 {
   1014 	bool ret;
   1015 	va_list ap;
   1016 	char buf[PROF_PRINTF_BUFSIZE];
   1017 
   1018 	va_start(ap, format);
   1019 	malloc_vsnprintf(buf, sizeof(buf), format, ap);
   1020 	va_end(ap);
   1021 	ret = prof_dump_write(propagate_err, buf);
   1022 
   1023 	return (ret);
   1024 }
   1025 
   1026 /* tctx->tdata->lock is held. */
   1027 static void
   1028 prof_tctx_merge_tdata(prof_tctx_t *tctx, prof_tdata_t *tdata)
   1029 {
   1030 
   1031 	malloc_mutex_lock(tctx->gctx->lock);
   1032 
   1033 	switch (tctx->state) {
   1034 	case prof_tctx_state_initializing:
   1035 		malloc_mutex_unlock(tctx->gctx->lock);
   1036 		return;
   1037 	case prof_tctx_state_nominal:
   1038 		tctx->state = prof_tctx_state_dumping;
   1039 		malloc_mutex_unlock(tctx->gctx->lock);
   1040 
   1041 		memcpy(&tctx->dump_cnts, &tctx->cnts, sizeof(prof_cnt_t));
   1042 
   1043 		tdata->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
   1044 		tdata->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
   1045 		if (opt_prof_accum) {
   1046 			tdata->cnt_summed.accumobjs +=
   1047 			    tctx->dump_cnts.accumobjs;
   1048 			tdata->cnt_summed.accumbytes +=
   1049 			    tctx->dump_cnts.accumbytes;
   1050 		}
   1051 		break;
   1052 	case prof_tctx_state_dumping:
   1053 	case prof_tctx_state_purgatory:
   1054 		not_reached();
   1055 	}
   1056 }
   1057 
   1058 /* gctx->lock is held. */
   1059 static void
   1060 prof_tctx_merge_gctx(prof_tctx_t *tctx, prof_gctx_t *gctx)
   1061 {
   1062 
   1063 	gctx->cnt_summed.curobjs += tctx->dump_cnts.curobjs;
   1064 	gctx->cnt_summed.curbytes += tctx->dump_cnts.curbytes;
   1065 	if (opt_prof_accum) {
   1066 		gctx->cnt_summed.accumobjs += tctx->dump_cnts.accumobjs;
   1067 		gctx->cnt_summed.accumbytes += tctx->dump_cnts.accumbytes;
   1068 	}
   1069 }
   1070 
   1071 /* tctx->gctx is held. */
   1072 static prof_tctx_t *
   1073 prof_tctx_merge_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
   1074 {
   1075 
   1076 	switch (tctx->state) {
   1077 	case prof_tctx_state_nominal:
   1078 		/* New since dumping started; ignore. */
   1079 		break;
   1080 	case prof_tctx_state_dumping:
   1081 	case prof_tctx_state_purgatory:
   1082 		prof_tctx_merge_gctx(tctx, tctx->gctx);
   1083 		break;
   1084 	default:
   1085 		not_reached();
   1086 	}
   1087 
   1088 	return (NULL);
   1089 }
   1090 
   1091 /* gctx->lock is held. */
   1092 static prof_tctx_t *
   1093 prof_tctx_dump_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
   1094 {
   1095 	bool propagate_err = *(bool *)arg;
   1096 
   1097 	if (prof_dump_printf(propagate_err,
   1098 	    "  t%"PRIu64": %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]\n",
   1099 	    tctx->thr_uid, tctx->dump_cnts.curobjs, tctx->dump_cnts.curbytes,
   1100 	    tctx->dump_cnts.accumobjs, tctx->dump_cnts.accumbytes))
   1101 		return (tctx);
   1102 	return (NULL);
   1103 }
   1104 
   1105 /* tctx->gctx is held. */
   1106 static prof_tctx_t *
   1107 prof_tctx_finish_iter(prof_tctx_tree_t *tctxs, prof_tctx_t *tctx, void *arg)
   1108 {
   1109 	prof_tctx_t *ret;
   1110 
   1111 	switch (tctx->state) {
   1112 	case prof_tctx_state_nominal:
   1113 		/* New since dumping started; ignore. */
   1114 		break;
   1115 	case prof_tctx_state_dumping:
   1116 		tctx->state = prof_tctx_state_nominal;
   1117 		break;
   1118 	case prof_tctx_state_purgatory:
   1119 		ret = tctx;
   1120 		goto label_return;
   1121 	default:
   1122 		not_reached();
   1123 	}
   1124 
   1125 	ret = NULL;
   1126 label_return:
   1127 	return (ret);
   1128 }
   1129 
   1130 static void
   1131 prof_dump_gctx_prep(prof_gctx_t *gctx, prof_gctx_tree_t *gctxs)
   1132 {
   1133 
   1134 	cassert(config_prof);
   1135 
   1136 	malloc_mutex_lock(gctx->lock);
   1137 
   1138 	/*
   1139 	 * Increment nlimbo so that gctx won't go away before dump.
   1140 	 * Additionally, link gctx into the dump list so that it is included in
   1141 	 * prof_dump()'s second pass.
   1142 	 */
   1143 	gctx->nlimbo++;
   1144 	gctx_tree_insert(gctxs, gctx);
   1145 
   1146 	memset(&gctx->cnt_summed, 0, sizeof(prof_cnt_t));
   1147 
   1148 	malloc_mutex_unlock(gctx->lock);
   1149 }
   1150 
   1151 static prof_gctx_t *
   1152 prof_gctx_merge_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
   1153 {
   1154 	size_t *leak_ngctx = (size_t *)arg;
   1155 
   1156 	malloc_mutex_lock(gctx->lock);
   1157 	tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_merge_iter, NULL);
   1158 	if (gctx->cnt_summed.curobjs != 0)
   1159 		(*leak_ngctx)++;
   1160 	malloc_mutex_unlock(gctx->lock);
   1161 
   1162 	return (NULL);
   1163 }
   1164 
   1165 static void
   1166 prof_gctx_finish(tsd_t *tsd, prof_gctx_tree_t *gctxs)
   1167 {
   1168 	prof_tdata_t *tdata = prof_tdata_get(tsd, false);
   1169 	prof_gctx_t *gctx;
   1170 
   1171 	/*
   1172 	 * Standard tree iteration won't work here, because as soon as we
   1173 	 * decrement gctx->nlimbo and unlock gctx, another thread can
   1174 	 * concurrently destroy it, which will corrupt the tree.  Therefore,
   1175 	 * tear down the tree one node at a time during iteration.
   1176 	 */
   1177 	while ((gctx = gctx_tree_first(gctxs)) != NULL) {
   1178 		gctx_tree_remove(gctxs, gctx);
   1179 		malloc_mutex_lock(gctx->lock);
   1180 		{
   1181 			prof_tctx_t *next;
   1182 
   1183 			next = NULL;
   1184 			do {
   1185 				prof_tctx_t *to_destroy =
   1186 				    tctx_tree_iter(&gctx->tctxs, next,
   1187 				    prof_tctx_finish_iter, NULL);
   1188 				if (to_destroy != NULL) {
   1189 					next = tctx_tree_next(&gctx->tctxs,
   1190 					    to_destroy);
   1191 					tctx_tree_remove(&gctx->tctxs,
   1192 					    to_destroy);
   1193 					idalloctm(tsd, to_destroy,
   1194 					    tcache_get(tsd, false), true);
   1195 				} else
   1196 					next = NULL;
   1197 			} while (next != NULL);
   1198 		}
   1199 		gctx->nlimbo--;
   1200 		if (prof_gctx_should_destroy(gctx)) {
   1201 			gctx->nlimbo++;
   1202 			malloc_mutex_unlock(gctx->lock);
   1203 			prof_gctx_try_destroy(tsd, tdata, gctx, tdata);
   1204 		} else
   1205 			malloc_mutex_unlock(gctx->lock);
   1206 	}
   1207 }
   1208 
   1209 static prof_tdata_t *
   1210 prof_tdata_merge_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
   1211 {
   1212 	prof_cnt_t *cnt_all = (prof_cnt_t *)arg;
   1213 
   1214 	malloc_mutex_lock(tdata->lock);
   1215 	if (!tdata->expired) {
   1216 		size_t tabind;
   1217 		union {
   1218 			prof_tctx_t	*p;
   1219 			void		*v;
   1220 		} tctx;
   1221 
   1222 		tdata->dumping = true;
   1223 		memset(&tdata->cnt_summed, 0, sizeof(prof_cnt_t));
   1224 		for (tabind = 0; !ckh_iter(&tdata->bt2tctx, &tabind, NULL,
   1225 		    &tctx.v);)
   1226 			prof_tctx_merge_tdata(tctx.p, tdata);
   1227 
   1228 		cnt_all->curobjs += tdata->cnt_summed.curobjs;
   1229 		cnt_all->curbytes += tdata->cnt_summed.curbytes;
   1230 		if (opt_prof_accum) {
   1231 			cnt_all->accumobjs += tdata->cnt_summed.accumobjs;
   1232 			cnt_all->accumbytes += tdata->cnt_summed.accumbytes;
   1233 		}
   1234 	} else
   1235 		tdata->dumping = false;
   1236 	malloc_mutex_unlock(tdata->lock);
   1237 
   1238 	return (NULL);
   1239 }
   1240 
   1241 static prof_tdata_t *
   1242 prof_tdata_dump_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
   1243 {
   1244 	bool propagate_err = *(bool *)arg;
   1245 
   1246 	if (!tdata->dumping)
   1247 		return (NULL);
   1248 
   1249 	if (prof_dump_printf(propagate_err,
   1250 	    "  t%"PRIu64": %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]%s%s\n",
   1251 	    tdata->thr_uid, tdata->cnt_summed.curobjs,
   1252 	    tdata->cnt_summed.curbytes, tdata->cnt_summed.accumobjs,
   1253 	    tdata->cnt_summed.accumbytes,
   1254 	    (tdata->thread_name != NULL) ? " " : "",
   1255 	    (tdata->thread_name != NULL) ? tdata->thread_name : ""))
   1256 		return (tdata);
   1257 	return (NULL);
   1258 }
   1259 
   1260 #ifdef JEMALLOC_JET
   1261 #undef prof_dump_header
   1262 #define	prof_dump_header JEMALLOC_N(prof_dump_header_impl)
   1263 #endif
   1264 static bool
   1265 prof_dump_header(bool propagate_err, const prof_cnt_t *cnt_all)
   1266 {
   1267 	bool ret;
   1268 
   1269 	if (prof_dump_printf(propagate_err,
   1270 	    "heap_v2/%"PRIu64"\n"
   1271 	    "  t*: %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]\n",
   1272 	    ((uint64_t)1U << lg_prof_sample), cnt_all->curobjs,
   1273 	    cnt_all->curbytes, cnt_all->accumobjs, cnt_all->accumbytes))
   1274 		return (true);
   1275 
   1276 	malloc_mutex_lock(&tdatas_mtx);
   1277 	ret = (tdata_tree_iter(&tdatas, NULL, prof_tdata_dump_iter,
   1278 	    (void *)&propagate_err) != NULL);
   1279 	malloc_mutex_unlock(&tdatas_mtx);
   1280 	return (ret);
   1281 }
   1282 #ifdef JEMALLOC_JET
   1283 #undef prof_dump_header
   1284 #define	prof_dump_header JEMALLOC_N(prof_dump_header)
   1285 prof_dump_header_t *prof_dump_header = JEMALLOC_N(prof_dump_header_impl);
   1286 #endif
   1287 
   1288 /* gctx->lock is held. */
   1289 static bool
   1290 prof_dump_gctx(bool propagate_err, prof_gctx_t *gctx, const prof_bt_t *bt,
   1291     prof_gctx_tree_t *gctxs)
   1292 {
   1293 	bool ret;
   1294 	unsigned i;
   1295 
   1296 	cassert(config_prof);
   1297 
   1298 	/* Avoid dumping such gctx's that have no useful data. */
   1299 	if ((!opt_prof_accum && gctx->cnt_summed.curobjs == 0) ||
   1300 	    (opt_prof_accum && gctx->cnt_summed.accumobjs == 0)) {
   1301 		assert(gctx->cnt_summed.curobjs == 0);
   1302 		assert(gctx->cnt_summed.curbytes == 0);
   1303 		assert(gctx->cnt_summed.accumobjs == 0);
   1304 		assert(gctx->cnt_summed.accumbytes == 0);
   1305 		ret = false;
   1306 		goto label_return;
   1307 	}
   1308 
   1309 	if (prof_dump_printf(propagate_err, "@")) {
   1310 		ret = true;
   1311 		goto label_return;
   1312 	}
   1313 	for (i = 0; i < bt->len; i++) {
   1314 		if (prof_dump_printf(propagate_err, " %#"PRIxPTR,
   1315 		    (uintptr_t)bt->vec[i])) {
   1316 			ret = true;
   1317 			goto label_return;
   1318 		}
   1319 	}
   1320 
   1321 	if (prof_dump_printf(propagate_err,
   1322 	    "\n"
   1323 	    "  t*: %"PRIu64": %"PRIu64" [%"PRIu64": %"PRIu64"]\n",
   1324 	    gctx->cnt_summed.curobjs, gctx->cnt_summed.curbytes,
   1325 	    gctx->cnt_summed.accumobjs, gctx->cnt_summed.accumbytes)) {
   1326 		ret = true;
   1327 		goto label_return;
   1328 	}
   1329 
   1330 	if (tctx_tree_iter(&gctx->tctxs, NULL, prof_tctx_dump_iter,
   1331 	    (void *)&propagate_err) != NULL) {
   1332 		ret = true;
   1333 		goto label_return;
   1334 	}
   1335 
   1336 	ret = false;
   1337 label_return:
   1338 	return (ret);
   1339 }
   1340 
   1341 JEMALLOC_ATTR(format(printf, 1, 2))
   1342 static int
   1343 prof_open_maps(const char *format, ...)
   1344 {
   1345 	int mfd;
   1346 	va_list ap;
   1347 	char filename[PATH_MAX + 1];
   1348 
   1349 	va_start(ap, format);
   1350 	malloc_vsnprintf(filename, sizeof(filename), format, ap);
   1351 	va_end(ap);
   1352 	mfd = open(filename, O_RDONLY);
   1353 
   1354 	return (mfd);
   1355 }
   1356 
   1357 static bool
   1358 prof_dump_maps(bool propagate_err)
   1359 {
   1360 	bool ret;
   1361 	int mfd;
   1362 
   1363 	cassert(config_prof);
   1364 #ifdef __FreeBSD__
   1365 	mfd = prof_open_maps("/proc/curproc/map");
   1366 #else
   1367 	{
   1368 		int pid = getpid();
   1369 
   1370 		mfd = prof_open_maps("/proc/%d/task/%d/maps", pid, pid);
   1371 		if (mfd == -1)
   1372 			mfd = prof_open_maps("/proc/%d/maps", pid);
   1373 	}
   1374 #endif
   1375 	if (mfd != -1) {
   1376 		ssize_t nread;
   1377 
   1378 		if (prof_dump_write(propagate_err, "\nMAPPED_LIBRARIES:\n") &&
   1379 		    propagate_err) {
   1380 			ret = true;
   1381 			goto label_return;
   1382 		}
   1383 		nread = 0;
   1384 		do {
   1385 			prof_dump_buf_end += nread;
   1386 			if (prof_dump_buf_end == PROF_DUMP_BUFSIZE) {
   1387 				/* Make space in prof_dump_buf before read(). */
   1388 				if (prof_dump_flush(propagate_err) &&
   1389 				    propagate_err) {
   1390 					ret = true;
   1391 					goto label_return;
   1392 				}
   1393 			}
   1394 			nread = read(mfd, &prof_dump_buf[prof_dump_buf_end],
   1395 			    PROF_DUMP_BUFSIZE - prof_dump_buf_end);
   1396 		} while (nread > 0);
   1397 	} else {
   1398 		ret = true;
   1399 		goto label_return;
   1400 	}
   1401 
   1402 	ret = false;
   1403 label_return:
   1404 	if (mfd != -1)
   1405 		close(mfd);
   1406 	return (ret);
   1407 }
   1408 
   1409 static void
   1410 prof_leakcheck(const prof_cnt_t *cnt_all, size_t leak_ngctx,
   1411     const char *filename)
   1412 {
   1413 
   1414 	if (cnt_all->curbytes != 0) {
   1415 		malloc_printf("<jemalloc>: Leak summary: %"PRIu64" byte%s, %"
   1416 		    PRIu64" object%s, %zu context%s\n",
   1417 		    cnt_all->curbytes, (cnt_all->curbytes != 1) ? "s" : "",
   1418 		    cnt_all->curobjs, (cnt_all->curobjs != 1) ? "s" : "",
   1419 		    leak_ngctx, (leak_ngctx != 1) ? "s" : "");
   1420 		malloc_printf(
   1421 		    "<jemalloc>: Run jeprof on \"%s\" for leak detail\n",
   1422 		    filename);
   1423 	}
   1424 }
   1425 
   1426 static prof_gctx_t *
   1427 prof_gctx_dump_iter(prof_gctx_tree_t *gctxs, prof_gctx_t *gctx, void *arg)
   1428 {
   1429 	prof_gctx_t *ret;
   1430 	bool propagate_err = *(bool *)arg;
   1431 
   1432 	malloc_mutex_lock(gctx->lock);
   1433 
   1434 	if (prof_dump_gctx(propagate_err, gctx, &gctx->bt, gctxs)) {
   1435 		ret = gctx;
   1436 		goto label_return;
   1437 	}
   1438 
   1439 	ret = NULL;
   1440 label_return:
   1441 	malloc_mutex_unlock(gctx->lock);
   1442 	return (ret);
   1443 }
   1444 
   1445 static bool
   1446 prof_dump(tsd_t *tsd, bool propagate_err, const char *filename, bool leakcheck)
   1447 {
   1448 	prof_tdata_t *tdata;
   1449 	prof_cnt_t cnt_all;
   1450 	size_t tabind;
   1451 	union {
   1452 		prof_gctx_t	*p;
   1453 		void		*v;
   1454 	} gctx;
   1455 	size_t leak_ngctx;
   1456 	prof_gctx_tree_t gctxs;
   1457 
   1458 	cassert(config_prof);
   1459 
   1460 	tdata = prof_tdata_get(tsd, true);
   1461 	if (tdata == NULL)
   1462 		return (true);
   1463 
   1464 	malloc_mutex_lock(&prof_dump_mtx);
   1465 	prof_enter(tsd, tdata);
   1466 
   1467 	/*
   1468 	 * Put gctx's in limbo and clear their counters in preparation for
   1469 	 * summing.
   1470 	 */
   1471 	gctx_tree_new(&gctxs);
   1472 	for (tabind = 0; !ckh_iter(&bt2gctx, &tabind, NULL, &gctx.v);)
   1473 		prof_dump_gctx_prep(gctx.p, &gctxs);
   1474 
   1475 	/*
   1476 	 * Iterate over tdatas, and for the non-expired ones snapshot their tctx
   1477 	 * stats and merge them into the associated gctx's.
   1478 	 */
   1479 	memset(&cnt_all, 0, sizeof(prof_cnt_t));
   1480 	malloc_mutex_lock(&tdatas_mtx);
   1481 	tdata_tree_iter(&tdatas, NULL, prof_tdata_merge_iter, (void *)&cnt_all);
   1482 	malloc_mutex_unlock(&tdatas_mtx);
   1483 
   1484 	/* Merge tctx stats into gctx's. */
   1485 	leak_ngctx = 0;
   1486 	gctx_tree_iter(&gctxs, NULL, prof_gctx_merge_iter, (void *)&leak_ngctx);
   1487 
   1488 	prof_leave(tsd, tdata);
   1489 
   1490 	/* Create dump file. */
   1491 	if ((prof_dump_fd = prof_dump_open(propagate_err, filename)) == -1)
   1492 		goto label_open_close_error;
   1493 
   1494 	/* Dump profile header. */
   1495 	if (prof_dump_header(propagate_err, &cnt_all))
   1496 		goto label_write_error;
   1497 
   1498 	/* Dump per gctx profile stats. */
   1499 	if (gctx_tree_iter(&gctxs, NULL, prof_gctx_dump_iter,
   1500 	    (void *)&propagate_err) != NULL)
   1501 		goto label_write_error;
   1502 
   1503 	/* Dump /proc/<pid>/maps if possible. */
   1504 	if (prof_dump_maps(propagate_err))
   1505 		goto label_write_error;
   1506 
   1507 	if (prof_dump_close(propagate_err))
   1508 		goto label_open_close_error;
   1509 
   1510 	prof_gctx_finish(tsd, &gctxs);
   1511 	malloc_mutex_unlock(&prof_dump_mtx);
   1512 
   1513 	if (leakcheck)
   1514 		prof_leakcheck(&cnt_all, leak_ngctx, filename);
   1515 
   1516 	return (false);
   1517 label_write_error:
   1518 	prof_dump_close(propagate_err);
   1519 label_open_close_error:
   1520 	prof_gctx_finish(tsd, &gctxs);
   1521 	malloc_mutex_unlock(&prof_dump_mtx);
   1522 	return (true);
   1523 }
   1524 
   1525 #define	DUMP_FILENAME_BUFSIZE	(PATH_MAX + 1)
   1526 #define	VSEQ_INVALID		UINT64_C(0xffffffffffffffff)
   1527 static void
   1528 prof_dump_filename(char *filename, char v, uint64_t vseq)
   1529 {
   1530 
   1531 	cassert(config_prof);
   1532 
   1533 	if (vseq != VSEQ_INVALID) {
   1534 	        /* "<prefix>.<pid>.<seq>.v<vseq>.heap" */
   1535 		malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
   1536 		    "%s.%d.%"PRIu64".%c%"PRIu64".heap",
   1537 		    opt_prof_prefix, (int)getpid(), prof_dump_seq, v, vseq);
   1538 	} else {
   1539 	        /* "<prefix>.<pid>.<seq>.<v>.heap" */
   1540 		malloc_snprintf(filename, DUMP_FILENAME_BUFSIZE,
   1541 		    "%s.%d.%"PRIu64".%c.heap",
   1542 		    opt_prof_prefix, (int)getpid(), prof_dump_seq, v);
   1543 	}
   1544 	prof_dump_seq++;
   1545 }
   1546 
   1547 static void
   1548 prof_fdump(void)
   1549 {
   1550 	tsd_t *tsd;
   1551 	char filename[DUMP_FILENAME_BUFSIZE];
   1552 
   1553 	cassert(config_prof);
   1554 	assert(opt_prof_final);
   1555 	assert(opt_prof_prefix[0] != '\0');
   1556 
   1557 	if (!prof_booted)
   1558 		return;
   1559 	tsd = tsd_fetch();
   1560 
   1561 	malloc_mutex_lock(&prof_dump_seq_mtx);
   1562 	prof_dump_filename(filename, 'f', VSEQ_INVALID);
   1563 	malloc_mutex_unlock(&prof_dump_seq_mtx);
   1564 	prof_dump(tsd, false, filename, opt_prof_leak);
   1565 }
   1566 
   1567 void
   1568 prof_idump(void)
   1569 {
   1570 	tsd_t *tsd;
   1571 	prof_tdata_t *tdata;
   1572 	char filename[PATH_MAX + 1];
   1573 
   1574 	cassert(config_prof);
   1575 
   1576 	if (!prof_booted)
   1577 		return;
   1578 	tsd = tsd_fetch();
   1579 	tdata = prof_tdata_get(tsd, false);
   1580 	if (tdata == NULL)
   1581 		return;
   1582 	if (tdata->enq) {
   1583 		tdata->enq_idump = true;
   1584 		return;
   1585 	}
   1586 
   1587 	if (opt_prof_prefix[0] != '\0') {
   1588 		malloc_mutex_lock(&prof_dump_seq_mtx);
   1589 		prof_dump_filename(filename, 'i', prof_dump_iseq);
   1590 		prof_dump_iseq++;
   1591 		malloc_mutex_unlock(&prof_dump_seq_mtx);
   1592 		prof_dump(tsd, false, filename, false);
   1593 	}
   1594 }
   1595 
   1596 bool
   1597 prof_mdump(const char *filename)
   1598 {
   1599 	tsd_t *tsd;
   1600 	char filename_buf[DUMP_FILENAME_BUFSIZE];
   1601 
   1602 	cassert(config_prof);
   1603 
   1604 	if (!opt_prof || !prof_booted)
   1605 		return (true);
   1606 	tsd = tsd_fetch();
   1607 
   1608 	if (filename == NULL) {
   1609 		/* No filename specified, so automatically generate one. */
   1610 		if (opt_prof_prefix[0] == '\0')
   1611 			return (true);
   1612 		malloc_mutex_lock(&prof_dump_seq_mtx);
   1613 		prof_dump_filename(filename_buf, 'm', prof_dump_mseq);
   1614 		prof_dump_mseq++;
   1615 		malloc_mutex_unlock(&prof_dump_seq_mtx);
   1616 		filename = filename_buf;
   1617 	}
   1618 	return (prof_dump(tsd, true, filename, false));
   1619 }
   1620 
   1621 void
   1622 prof_gdump(void)
   1623 {
   1624 	tsd_t *tsd;
   1625 	prof_tdata_t *tdata;
   1626 	char filename[DUMP_FILENAME_BUFSIZE];
   1627 
   1628 	cassert(config_prof);
   1629 
   1630 	if (!prof_booted)
   1631 		return;
   1632 	tsd = tsd_fetch();
   1633 	tdata = prof_tdata_get(tsd, false);
   1634 	if (tdata == NULL)
   1635 		return;
   1636 	if (tdata->enq) {
   1637 		tdata->enq_gdump = true;
   1638 		return;
   1639 	}
   1640 
   1641 	if (opt_prof_prefix[0] != '\0') {
   1642 		malloc_mutex_lock(&prof_dump_seq_mtx);
   1643 		prof_dump_filename(filename, 'u', prof_dump_useq);
   1644 		prof_dump_useq++;
   1645 		malloc_mutex_unlock(&prof_dump_seq_mtx);
   1646 		prof_dump(tsd, false, filename, false);
   1647 	}
   1648 }
   1649 
   1650 static void
   1651 prof_bt_hash(const void *key, size_t r_hash[2])
   1652 {
   1653 	prof_bt_t *bt = (prof_bt_t *)key;
   1654 
   1655 	cassert(config_prof);
   1656 
   1657 	hash(bt->vec, bt->len * sizeof(void *), 0x94122f33U, r_hash);
   1658 }
   1659 
   1660 static bool
   1661 prof_bt_keycomp(const void *k1, const void *k2)
   1662 {
   1663 	const prof_bt_t *bt1 = (prof_bt_t *)k1;
   1664 	const prof_bt_t *bt2 = (prof_bt_t *)k2;
   1665 
   1666 	cassert(config_prof);
   1667 
   1668 	if (bt1->len != bt2->len)
   1669 		return (false);
   1670 	return (memcmp(bt1->vec, bt2->vec, bt1->len * sizeof(void *)) == 0);
   1671 }
   1672 
   1673 JEMALLOC_INLINE_C uint64_t
   1674 prof_thr_uid_alloc(void)
   1675 {
   1676 	uint64_t thr_uid;
   1677 
   1678 	malloc_mutex_lock(&next_thr_uid_mtx);
   1679 	thr_uid = next_thr_uid;
   1680 	next_thr_uid++;
   1681 	malloc_mutex_unlock(&next_thr_uid_mtx);
   1682 
   1683 	return (thr_uid);
   1684 }
   1685 
   1686 static prof_tdata_t *
   1687 prof_tdata_init_impl(tsd_t *tsd, uint64_t thr_uid, uint64_t thr_discrim,
   1688     char *thread_name, bool active)
   1689 {
   1690 	prof_tdata_t *tdata;
   1691 	tcache_t *tcache;
   1692 
   1693 	cassert(config_prof);
   1694 
   1695 	/* Initialize an empty cache for this thread. */
   1696 	tcache = tcache_get(tsd, true);
   1697 	tdata = (prof_tdata_t *)iallocztm(tsd, sizeof(prof_tdata_t), false,
   1698 	    tcache, true, NULL);
   1699 	if (tdata == NULL)
   1700 		return (NULL);
   1701 
   1702 	tdata->lock = prof_tdata_mutex_choose(thr_uid);
   1703 	tdata->thr_uid = thr_uid;
   1704 	tdata->thr_discrim = thr_discrim;
   1705 	tdata->thread_name = thread_name;
   1706 	tdata->attached = true;
   1707 	tdata->expired = false;
   1708 	tdata->tctx_uid_next = 0;
   1709 
   1710 	if (ckh_new(tsd, &tdata->bt2tctx, PROF_CKH_MINITEMS,
   1711 	    prof_bt_hash, prof_bt_keycomp)) {
   1712 		idalloctm(tsd, tdata, tcache, true);
   1713 		return (NULL);
   1714 	}
   1715 
   1716 	tdata->prng_state = (uint64_t)(uintptr_t)tdata;
   1717 	prof_sample_threshold_update(tdata);
   1718 
   1719 	tdata->enq = false;
   1720 	tdata->enq_idump = false;
   1721 	tdata->enq_gdump = false;
   1722 
   1723 	tdata->dumping = false;
   1724 	tdata->active = active;
   1725 
   1726 	malloc_mutex_lock(&tdatas_mtx);
   1727 	tdata_tree_insert(&tdatas, tdata);
   1728 	malloc_mutex_unlock(&tdatas_mtx);
   1729 
   1730 	return (tdata);
   1731 }
   1732 
   1733 prof_tdata_t *
   1734 prof_tdata_init(tsd_t *tsd)
   1735 {
   1736 
   1737 	return (prof_tdata_init_impl(tsd, prof_thr_uid_alloc(), 0, NULL,
   1738 	    prof_thread_active_init_get()));
   1739 }
   1740 
   1741 /* tdata->lock must be held. */
   1742 static bool
   1743 prof_tdata_should_destroy(prof_tdata_t *tdata, bool even_if_attached)
   1744 {
   1745 
   1746 	if (tdata->attached && !even_if_attached)
   1747 		return (false);
   1748 	if (ckh_count(&tdata->bt2tctx) != 0)
   1749 		return (false);
   1750 	return (true);
   1751 }
   1752 
   1753 /* tdatas_mtx must be held. */
   1754 static void
   1755 prof_tdata_destroy_locked(tsd_t *tsd, prof_tdata_t *tdata,
   1756     bool even_if_attached)
   1757 {
   1758 	tcache_t *tcache;
   1759 
   1760 	assert(prof_tdata_should_destroy(tdata, even_if_attached));
   1761 	assert(tsd_prof_tdata_get(tsd) != tdata);
   1762 
   1763 	tdata_tree_remove(&tdatas, tdata);
   1764 
   1765 	tcache = tcache_get(tsd, false);
   1766 	if (tdata->thread_name != NULL)
   1767 		idalloctm(tsd, tdata->thread_name, tcache, true);
   1768 	ckh_delete(tsd, &tdata->bt2tctx);
   1769 	idalloctm(tsd, tdata, tcache, true);
   1770 }
   1771 
   1772 static void
   1773 prof_tdata_destroy(tsd_t *tsd, prof_tdata_t *tdata, bool even_if_attached)
   1774 {
   1775 
   1776 	malloc_mutex_lock(&tdatas_mtx);
   1777 	prof_tdata_destroy_locked(tsd, tdata, even_if_attached);
   1778 	malloc_mutex_unlock(&tdatas_mtx);
   1779 }
   1780 
   1781 static void
   1782 prof_tdata_detach(tsd_t *tsd, prof_tdata_t *tdata)
   1783 {
   1784 	bool destroy_tdata;
   1785 
   1786 	malloc_mutex_lock(tdata->lock);
   1787 	if (tdata->attached) {
   1788 		destroy_tdata = prof_tdata_should_destroy(tdata, true);
   1789 		/*
   1790 		 * Only detach if !destroy_tdata, because detaching would allow
   1791 		 * another thread to win the race to destroy tdata.
   1792 		 */
   1793 		if (!destroy_tdata)
   1794 			tdata->attached = false;
   1795 		tsd_prof_tdata_set(tsd, NULL);
   1796 	} else
   1797 		destroy_tdata = false;
   1798 	malloc_mutex_unlock(tdata->lock);
   1799 	if (destroy_tdata)
   1800 		prof_tdata_destroy(tsd, tdata, true);
   1801 }
   1802 
   1803 prof_tdata_t *
   1804 prof_tdata_reinit(tsd_t *tsd, prof_tdata_t *tdata)
   1805 {
   1806 	uint64_t thr_uid = tdata->thr_uid;
   1807 	uint64_t thr_discrim = tdata->thr_discrim + 1;
   1808 	char *thread_name = (tdata->thread_name != NULL) ?
   1809 	    prof_thread_name_alloc(tsd, tdata->thread_name) : NULL;
   1810 	bool active = tdata->active;
   1811 
   1812 	prof_tdata_detach(tsd, tdata);
   1813 	return (prof_tdata_init_impl(tsd, thr_uid, thr_discrim, thread_name,
   1814 	    active));
   1815 }
   1816 
   1817 static bool
   1818 prof_tdata_expire(prof_tdata_t *tdata)
   1819 {
   1820 	bool destroy_tdata;
   1821 
   1822 	malloc_mutex_lock(tdata->lock);
   1823 	if (!tdata->expired) {
   1824 		tdata->expired = true;
   1825 		destroy_tdata = tdata->attached ? false :
   1826 		    prof_tdata_should_destroy(tdata, false);
   1827 	} else
   1828 		destroy_tdata = false;
   1829 	malloc_mutex_unlock(tdata->lock);
   1830 
   1831 	return (destroy_tdata);
   1832 }
   1833 
   1834 static prof_tdata_t *
   1835 prof_tdata_reset_iter(prof_tdata_tree_t *tdatas, prof_tdata_t *tdata, void *arg)
   1836 {
   1837 
   1838 	return (prof_tdata_expire(tdata) ? tdata : NULL);
   1839 }
   1840 
   1841 void
   1842 prof_reset(tsd_t *tsd, size_t lg_sample)
   1843 {
   1844 	prof_tdata_t *next;
   1845 
   1846 	assert(lg_sample < (sizeof(uint64_t) << 3));
   1847 
   1848 	malloc_mutex_lock(&prof_dump_mtx);
   1849 	malloc_mutex_lock(&tdatas_mtx);
   1850 
   1851 	lg_prof_sample = lg_sample;
   1852 
   1853 	next = NULL;
   1854 	do {
   1855 		prof_tdata_t *to_destroy = tdata_tree_iter(&tdatas, next,
   1856 		    prof_tdata_reset_iter, NULL);
   1857 		if (to_destroy != NULL) {
   1858 			next = tdata_tree_next(&tdatas, to_destroy);
   1859 			prof_tdata_destroy_locked(tsd, to_destroy, false);
   1860 		} else
   1861 			next = NULL;
   1862 	} while (next != NULL);
   1863 
   1864 	malloc_mutex_unlock(&tdatas_mtx);
   1865 	malloc_mutex_unlock(&prof_dump_mtx);
   1866 }
   1867 
   1868 void
   1869 prof_tdata_cleanup(tsd_t *tsd)
   1870 {
   1871 	prof_tdata_t *tdata;
   1872 
   1873 	if (!config_prof)
   1874 		return;
   1875 
   1876 	tdata = tsd_prof_tdata_get(tsd);
   1877 	if (tdata != NULL)
   1878 		prof_tdata_detach(tsd, tdata);
   1879 }
   1880 
   1881 bool
   1882 prof_active_get(void)
   1883 {
   1884 	bool prof_active_current;
   1885 
   1886 	malloc_mutex_lock(&prof_active_mtx);
   1887 	prof_active_current = prof_active;
   1888 	malloc_mutex_unlock(&prof_active_mtx);
   1889 	return (prof_active_current);
   1890 }
   1891 
   1892 bool
   1893 prof_active_set(bool active)
   1894 {
   1895 	bool prof_active_old;
   1896 
   1897 	malloc_mutex_lock(&prof_active_mtx);
   1898 	prof_active_old = prof_active;
   1899 	prof_active = active;
   1900 	malloc_mutex_unlock(&prof_active_mtx);
   1901 	return (prof_active_old);
   1902 }
   1903 
   1904 const char *
   1905 prof_thread_name_get(void)
   1906 {
   1907 	tsd_t *tsd;
   1908 	prof_tdata_t *tdata;
   1909 
   1910 	tsd = tsd_fetch();
   1911 	tdata = prof_tdata_get(tsd, true);
   1912 	if (tdata == NULL)
   1913 		return ("");
   1914 	return (tdata->thread_name != NULL ? tdata->thread_name : "");
   1915 }
   1916 
   1917 static char *
   1918 prof_thread_name_alloc(tsd_t *tsd, const char *thread_name)
   1919 {
   1920 	char *ret;
   1921 	size_t size;
   1922 
   1923 	if (thread_name == NULL)
   1924 		return (NULL);
   1925 
   1926 	size = strlen(thread_name) + 1;
   1927 	if (size == 1)
   1928 		return ("");
   1929 
   1930 	ret = iallocztm(tsd, size, false, tcache_get(tsd, true), true, NULL);
   1931 	if (ret == NULL)
   1932 		return (NULL);
   1933 	memcpy(ret, thread_name, size);
   1934 	return (ret);
   1935 }
   1936 
   1937 int
   1938 prof_thread_name_set(tsd_t *tsd, const char *thread_name)
   1939 {
   1940 	prof_tdata_t *tdata;
   1941 	unsigned i;
   1942 	char *s;
   1943 
   1944 	tdata = prof_tdata_get(tsd, true);
   1945 	if (tdata == NULL)
   1946 		return (EAGAIN);
   1947 
   1948 	/* Validate input. */
   1949 	if (thread_name == NULL)
   1950 		return (EFAULT);
   1951 	for (i = 0; thread_name[i] != '\0'; i++) {
   1952 		char c = thread_name[i];
   1953 		if (!isgraph(c) && !isblank(c))
   1954 			return (EFAULT);
   1955 	}
   1956 
   1957 	s = prof_thread_name_alloc(tsd, thread_name);
   1958 	if (s == NULL)
   1959 		return (EAGAIN);
   1960 
   1961 	if (tdata->thread_name != NULL) {
   1962 		idalloctm(tsd, tdata->thread_name, tcache_get(tsd, false),
   1963 		    true);
   1964 		tdata->thread_name = NULL;
   1965 	}
   1966 	if (strlen(s) > 0)
   1967 		tdata->thread_name = s;
   1968 	return (0);
   1969 }
   1970 
   1971 bool
   1972 prof_thread_active_get(void)
   1973 {
   1974 	tsd_t *tsd;
   1975 	prof_tdata_t *tdata;
   1976 
   1977 	tsd = tsd_fetch();
   1978 	tdata = prof_tdata_get(tsd, true);
   1979 	if (tdata == NULL)
   1980 		return (false);
   1981 	return (tdata->active);
   1982 }
   1983 
   1984 bool
   1985 prof_thread_active_set(bool active)
   1986 {
   1987 	tsd_t *tsd;
   1988 	prof_tdata_t *tdata;
   1989 
   1990 	tsd = tsd_fetch();
   1991 	tdata = prof_tdata_get(tsd, true);
   1992 	if (tdata == NULL)
   1993 		return (true);
   1994 	tdata->active = active;
   1995 	return (false);
   1996 }
   1997 
   1998 bool
   1999 prof_thread_active_init_get(void)
   2000 {
   2001 	bool active_init;
   2002 
   2003 	malloc_mutex_lock(&prof_thread_active_init_mtx);
   2004 	active_init = prof_thread_active_init;
   2005 	malloc_mutex_unlock(&prof_thread_active_init_mtx);
   2006 	return (active_init);
   2007 }
   2008 
   2009 bool
   2010 prof_thread_active_init_set(bool active_init)
   2011 {
   2012 	bool active_init_old;
   2013 
   2014 	malloc_mutex_lock(&prof_thread_active_init_mtx);
   2015 	active_init_old = prof_thread_active_init;
   2016 	prof_thread_active_init = active_init;
   2017 	malloc_mutex_unlock(&prof_thread_active_init_mtx);
   2018 	return (active_init_old);
   2019 }
   2020 
   2021 bool
   2022 prof_gdump_get(void)
   2023 {
   2024 	bool prof_gdump_current;
   2025 
   2026 	malloc_mutex_lock(&prof_gdump_mtx);
   2027 	prof_gdump_current = prof_gdump_val;
   2028 	malloc_mutex_unlock(&prof_gdump_mtx);
   2029 	return (prof_gdump_current);
   2030 }
   2031 
   2032 bool
   2033 prof_gdump_set(bool gdump)
   2034 {
   2035 	bool prof_gdump_old;
   2036 
   2037 	malloc_mutex_lock(&prof_gdump_mtx);
   2038 	prof_gdump_old = prof_gdump_val;
   2039 	prof_gdump_val = gdump;
   2040 	malloc_mutex_unlock(&prof_gdump_mtx);
   2041 	return (prof_gdump_old);
   2042 }
   2043 
   2044 void
   2045 prof_boot0(void)
   2046 {
   2047 
   2048 	cassert(config_prof);
   2049 
   2050 	memcpy(opt_prof_prefix, PROF_PREFIX_DEFAULT,
   2051 	    sizeof(PROF_PREFIX_DEFAULT));
   2052 }
   2053 
   2054 void
   2055 prof_boot1(void)
   2056 {
   2057 
   2058 	cassert(config_prof);
   2059 
   2060 	/*
   2061 	 * opt_prof must be in its final state before any arenas are
   2062 	 * initialized, so this function must be executed early.
   2063 	 */
   2064 
   2065 	if (opt_prof_leak && !opt_prof) {
   2066 		/*
   2067 		 * Enable opt_prof, but in such a way that profiles are never
   2068 		 * automatically dumped.
   2069 		 */
   2070 		opt_prof = true;
   2071 		opt_prof_gdump = false;
   2072 	} else if (opt_prof) {
   2073 		if (opt_lg_prof_interval >= 0) {
   2074 			prof_interval = (((uint64_t)1U) <<
   2075 			    opt_lg_prof_interval);
   2076 		}
   2077 	}
   2078 }
   2079 
   2080 bool
   2081 prof_boot2(void)
   2082 {
   2083 
   2084 	cassert(config_prof);
   2085 
   2086 	if (opt_prof) {
   2087 		tsd_t *tsd;
   2088 		unsigned i;
   2089 
   2090 		lg_prof_sample = opt_lg_prof_sample;
   2091 
   2092 		prof_active = opt_prof_active;
   2093 		if (malloc_mutex_init(&prof_active_mtx))
   2094 			return (true);
   2095 
   2096 		prof_gdump_val = opt_prof_gdump;
   2097 		if (malloc_mutex_init(&prof_gdump_mtx))
   2098 			return (true);
   2099 
   2100 		prof_thread_active_init = opt_prof_thread_active_init;
   2101 		if (malloc_mutex_init(&prof_thread_active_init_mtx))
   2102 			return (true);
   2103 
   2104 		tsd = tsd_fetch();
   2105 		if (ckh_new(tsd, &bt2gctx, PROF_CKH_MINITEMS, prof_bt_hash,
   2106 		    prof_bt_keycomp))
   2107 			return (true);
   2108 		if (malloc_mutex_init(&bt2gctx_mtx))
   2109 			return (true);
   2110 
   2111 		tdata_tree_new(&tdatas);
   2112 		if (malloc_mutex_init(&tdatas_mtx))
   2113 			return (true);
   2114 
   2115 		next_thr_uid = 0;
   2116 		if (malloc_mutex_init(&next_thr_uid_mtx))
   2117 			return (true);
   2118 
   2119 		if (malloc_mutex_init(&prof_dump_seq_mtx))
   2120 			return (true);
   2121 		if (malloc_mutex_init(&prof_dump_mtx))
   2122 			return (true);
   2123 
   2124 		if (opt_prof_final && opt_prof_prefix[0] != '\0' &&
   2125 		    atexit(prof_fdump) != 0) {
   2126 			malloc_write("<jemalloc>: Error in atexit()\n");
   2127 			if (opt_abort)
   2128 				abort();
   2129 		}
   2130 
   2131 		gctx_locks = (malloc_mutex_t *)base_alloc(PROF_NCTX_LOCKS *
   2132 		    sizeof(malloc_mutex_t));
   2133 		if (gctx_locks == NULL)
   2134 			return (true);
   2135 		for (i = 0; i < PROF_NCTX_LOCKS; i++) {
   2136 			if (malloc_mutex_init(&gctx_locks[i]))
   2137 				return (true);
   2138 		}
   2139 
   2140 		tdata_locks = (malloc_mutex_t *)base_alloc(PROF_NTDATA_LOCKS *
   2141 		    sizeof(malloc_mutex_t));
   2142 		if (tdata_locks == NULL)
   2143 			return (true);
   2144 		for (i = 0; i < PROF_NTDATA_LOCKS; i++) {
   2145 			if (malloc_mutex_init(&tdata_locks[i]))
   2146 				return (true);
   2147 		}
   2148 	}
   2149 
   2150 #ifdef JEMALLOC_PROF_LIBGCC
   2151 	/*
   2152 	 * Cause the backtracing machinery to allocate its internal state
   2153 	 * before enabling profiling.
   2154 	 */
   2155 	_Unwind_Backtrace(prof_unwind_init_callback, NULL);
   2156 #endif
   2157 
   2158 	prof_booted = true;
   2159 
   2160 	return (false);
   2161 }
   2162 
   2163 void
   2164 prof_prefork(void)
   2165 {
   2166 
   2167 	if (opt_prof) {
   2168 		unsigned i;
   2169 
   2170 		malloc_mutex_prefork(&tdatas_mtx);
   2171 		malloc_mutex_prefork(&bt2gctx_mtx);
   2172 		malloc_mutex_prefork(&next_thr_uid_mtx);
   2173 		malloc_mutex_prefork(&prof_dump_seq_mtx);
   2174 		for (i = 0; i < PROF_NCTX_LOCKS; i++)
   2175 			malloc_mutex_prefork(&gctx_locks[i]);
   2176 		for (i = 0; i < PROF_NTDATA_LOCKS; i++)
   2177 			malloc_mutex_prefork(&tdata_locks[i]);
   2178 	}
   2179 }
   2180 
   2181 void
   2182 prof_postfork_parent(void)
   2183 {
   2184 
   2185 	if (opt_prof) {
   2186 		unsigned i;
   2187 
   2188 		for (i = 0; i < PROF_NTDATA_LOCKS; i++)
   2189 			malloc_mutex_postfork_parent(&tdata_locks[i]);
   2190 		for (i = 0; i < PROF_NCTX_LOCKS; i++)
   2191 			malloc_mutex_postfork_parent(&gctx_locks[i]);
   2192 		malloc_mutex_postfork_parent(&prof_dump_seq_mtx);
   2193 		malloc_mutex_postfork_parent(&next_thr_uid_mtx);
   2194 		malloc_mutex_postfork_parent(&bt2gctx_mtx);
   2195 		malloc_mutex_postfork_parent(&tdatas_mtx);
   2196 	}
   2197 }
   2198 
   2199 void
   2200 prof_postfork_child(void)
   2201 {
   2202 
   2203 	if (opt_prof) {
   2204 		unsigned i;
   2205 
   2206 		for (i = 0; i < PROF_NTDATA_LOCKS; i++)
   2207 			malloc_mutex_postfork_child(&tdata_locks[i]);
   2208 		for (i = 0; i < PROF_NCTX_LOCKS; i++)
   2209 			malloc_mutex_postfork_child(&gctx_locks[i]);
   2210 		malloc_mutex_postfork_child(&prof_dump_seq_mtx);
   2211 		malloc_mutex_postfork_child(&next_thr_uid_mtx);
   2212 		malloc_mutex_postfork_child(&bt2gctx_mtx);
   2213 		malloc_mutex_postfork_child(&tdatas_mtx);
   2214 	}
   2215 }
   2216 
   2217 /******************************************************************************/
   2218