1 #define JEMALLOC_ARENA_C_ 2 #include "jemalloc/internal/jemalloc_internal.h" 3 4 /******************************************************************************/ 5 /* Data. */ 6 7 ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 8 arena_bin_info_t arena_bin_info[NBINS]; 9 10 JEMALLOC_ALIGNED(CACHELINE) 11 const uint32_t small_bin2size_tab[NBINS] = { 12 #define B2S_bin_yes(size) \ 13 size, 14 #define B2S_bin_no(size) 15 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 16 B2S_bin_##bin((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 17 SIZE_CLASSES 18 #undef B2S_bin_yes 19 #undef B2S_bin_no 20 #undef SC 21 }; 22 23 JEMALLOC_ALIGNED(CACHELINE) 24 const uint8_t small_size2bin_tab[] = { 25 #define S2B_3(i) i, 26 #define S2B_4(i) S2B_3(i) S2B_3(i) 27 #define S2B_5(i) S2B_4(i) S2B_4(i) 28 #define S2B_6(i) S2B_5(i) S2B_5(i) 29 #define S2B_7(i) S2B_6(i) S2B_6(i) 30 #define S2B_8(i) S2B_7(i) S2B_7(i) 31 #define S2B_9(i) S2B_8(i) S2B_8(i) 32 #define S2B_no(i) 33 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 34 S2B_##lg_delta_lookup(index) 35 SIZE_CLASSES 36 #undef S2B_3 37 #undef S2B_4 38 #undef S2B_5 39 #undef S2B_6 40 #undef S2B_7 41 #undef S2B_8 42 #undef S2B_9 43 #undef S2B_no 44 #undef SC 45 }; 46 47 /******************************************************************************/ 48 /* 49 * Function prototypes for static functions that are referenced prior to 50 * definition. 51 */ 52 53 static void arena_purge(arena_t *arena, bool all); 54 static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, 55 bool cleaned); 56 static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, 57 arena_run_t *run, arena_bin_t *bin); 58 static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 59 arena_run_t *run, arena_bin_t *bin); 60 61 /******************************************************************************/ 62 63 JEMALLOC_INLINE_C size_t 64 arena_mapelm_to_pageind(arena_chunk_map_t *mapelm) 65 { 66 uintptr_t map_offset = 67 CHUNK_ADDR2OFFSET(mapelm) - offsetof(arena_chunk_t, map); 68 69 return ((map_offset / sizeof(arena_chunk_map_t)) + map_bias); 70 } 71 72 JEMALLOC_INLINE_C size_t 73 arena_mapelm_to_bits(arena_chunk_map_t *mapelm) 74 { 75 76 return (mapelm->bits); 77 } 78 79 static inline int 80 arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) 81 { 82 uintptr_t a_mapelm = (uintptr_t)a; 83 uintptr_t b_mapelm = (uintptr_t)b; 84 85 assert(a != NULL); 86 assert(b != NULL); 87 88 return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); 89 } 90 91 /* Generate red-black tree functions. */ 92 rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, 93 u.rb_link, arena_run_comp) 94 95 static inline int 96 arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) 97 { 98 int ret; 99 size_t a_size; 100 size_t b_size = arena_mapelm_to_bits(b) & ~PAGE_MASK; 101 uintptr_t a_mapelm = (uintptr_t)a; 102 uintptr_t b_mapelm = (uintptr_t)b; 103 104 if (a_mapelm & CHUNK_MAP_KEY) 105 a_size = a_mapelm & ~PAGE_MASK; 106 else 107 a_size = arena_mapelm_to_bits(a) & ~PAGE_MASK; 108 109 ret = (a_size > b_size) - (a_size < b_size); 110 if (ret == 0 && (!(a_mapelm & CHUNK_MAP_KEY))) 111 ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); 112 113 return (ret); 114 } 115 116 /* Generate red-black tree functions. */ 117 rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, 118 u.rb_link, arena_avail_comp) 119 120 static inline int 121 arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b) 122 { 123 124 assert(a != NULL); 125 assert(b != NULL); 126 127 /* 128 * Short-circuit for self comparison. The following comparison code 129 * would come to the same result, but at the cost of executing the slow 130 * path. 131 */ 132 if (a == b) 133 return (0); 134 135 /* 136 * Order such that chunks with higher fragmentation are "less than" 137 * those with lower fragmentation -- purging order is from "least" to 138 * "greatest". Fragmentation is measured as: 139 * 140 * mean current avail run size 141 * -------------------------------- 142 * mean defragmented avail run size 143 * 144 * navail 145 * ----------- 146 * nruns_avail nruns_avail-nruns_adjac 147 * = ========================= = ----------------------- 148 * navail nruns_avail 149 * ----------------------- 150 * nruns_avail-nruns_adjac 151 * 152 * The following code multiplies away the denominator prior to 153 * comparison, in order to avoid division. 154 * 155 */ 156 { 157 size_t a_val = (a->nruns_avail - a->nruns_adjac) * 158 b->nruns_avail; 159 size_t b_val = (b->nruns_avail - b->nruns_adjac) * 160 a->nruns_avail; 161 162 if (a_val < b_val) 163 return (1); 164 if (a_val > b_val) 165 return (-1); 166 } 167 /* 168 * Break ties by chunk address. For fragmented chunks, report lower 169 * addresses as "lower", so that fragmentation reduction happens first 170 * at lower addresses. However, use the opposite ordering for 171 * unfragmented chunks, in order to increase the chances of 172 * re-allocating dirty runs. 173 */ 174 { 175 uintptr_t a_chunk = (uintptr_t)a; 176 uintptr_t b_chunk = (uintptr_t)b; 177 int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk)); 178 if (a->nruns_adjac == 0) { 179 assert(b->nruns_adjac == 0); 180 ret = -ret; 181 } 182 return (ret); 183 } 184 } 185 186 /* Generate red-black tree functions. */ 187 rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t, 188 dirty_link, arena_chunk_dirty_comp) 189 190 static inline bool 191 arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind) 192 { 193 bool ret; 194 195 if (pageind-1 < map_bias) 196 ret = false; 197 else { 198 ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0); 199 assert(ret == false || arena_mapbits_dirty_get(chunk, 200 pageind-1) != arena_mapbits_dirty_get(chunk, pageind)); 201 } 202 return (ret); 203 } 204 205 static inline bool 206 arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages) 207 { 208 bool ret; 209 210 if (pageind+npages == chunk_npages) 211 ret = false; 212 else { 213 assert(pageind+npages < chunk_npages); 214 ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0); 215 assert(ret == false || arena_mapbits_dirty_get(chunk, pageind) 216 != arena_mapbits_dirty_get(chunk, pageind+npages)); 217 } 218 return (ret); 219 } 220 221 static inline bool 222 arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages) 223 { 224 225 return (arena_avail_adjac_pred(chunk, pageind) || 226 arena_avail_adjac_succ(chunk, pageind, npages)); 227 } 228 229 static void 230 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 231 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) 232 { 233 234 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 235 LG_PAGE)); 236 237 /* 238 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be 239 * removed and reinserted even if the run to be inserted is clean. 240 */ 241 if (chunk->ndirty != 0) 242 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); 243 244 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) 245 chunk->nruns_adjac++; 246 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) 247 chunk->nruns_adjac++; 248 chunk->nruns_avail++; 249 assert(chunk->nruns_avail > chunk->nruns_adjac); 250 251 if (arena_mapbits_dirty_get(chunk, pageind) != 0) { 252 arena->ndirty += npages; 253 chunk->ndirty += npages; 254 } 255 if (chunk->ndirty != 0) 256 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); 257 258 arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk, 259 pageind)); 260 } 261 262 static void 263 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 264 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) 265 { 266 267 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 268 LG_PAGE)); 269 270 /* 271 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be 272 * removed and reinserted even if the run to be removed is clean. 273 */ 274 if (chunk->ndirty != 0) 275 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); 276 277 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) 278 chunk->nruns_adjac--; 279 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) 280 chunk->nruns_adjac--; 281 chunk->nruns_avail--; 282 assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail 283 == 0 && chunk->nruns_adjac == 0)); 284 285 if (arena_mapbits_dirty_get(chunk, pageind) != 0) { 286 arena->ndirty -= npages; 287 chunk->ndirty -= npages; 288 } 289 if (chunk->ndirty != 0) 290 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); 291 292 arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk, 293 pageind)); 294 } 295 296 static inline void * 297 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 298 { 299 void *ret; 300 unsigned regind; 301 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + 302 (uintptr_t)bin_info->bitmap_offset); 303 304 assert(run->nfree > 0); 305 assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false); 306 307 regind = bitmap_sfu(bitmap, &bin_info->bitmap_info); 308 ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + 309 (uintptr_t)(bin_info->reg_interval * regind)); 310 run->nfree--; 311 if (regind == run->nextind) 312 run->nextind++; 313 assert(regind < run->nextind); 314 return (ret); 315 } 316 317 static inline void 318 arena_run_reg_dalloc(arena_run_t *run, void *ptr) 319 { 320 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 321 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 322 size_t mapbits = arena_mapbits_get(chunk, pageind); 323 size_t binind = arena_ptr_small_binind_get(ptr, mapbits); 324 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 325 unsigned regind = arena_run_regind(run, bin_info, ptr); 326 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + 327 (uintptr_t)bin_info->bitmap_offset); 328 329 assert(run->nfree < bin_info->nregs); 330 /* Freeing an interior pointer can cause assertion failure. */ 331 assert(((uintptr_t)ptr - ((uintptr_t)run + 332 (uintptr_t)bin_info->reg0_offset)) % 333 (uintptr_t)bin_info->reg_interval == 0); 334 assert((uintptr_t)ptr >= (uintptr_t)run + 335 (uintptr_t)bin_info->reg0_offset); 336 /* Freeing an unallocated pointer can cause assertion failure. */ 337 assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind)); 338 339 bitmap_unset(bitmap, &bin_info->bitmap_info, regind); 340 run->nfree++; 341 } 342 343 static inline void 344 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) 345 { 346 347 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 348 (run_ind << LG_PAGE)), (npages << LG_PAGE)); 349 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, 350 (npages << LG_PAGE)); 351 } 352 353 static inline void 354 arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) 355 { 356 357 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind 358 << LG_PAGE)), PAGE); 359 } 360 361 static inline void 362 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) 363 { 364 size_t i; 365 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 366 367 arena_run_page_mark_zeroed(chunk, run_ind); 368 for (i = 0; i < PAGE / sizeof(size_t); i++) 369 assert(p[i] == 0); 370 } 371 372 static void 373 arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) 374 { 375 376 if (config_stats) { 377 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + 378 add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive - 379 sub_pages) << LG_PAGE); 380 if (cactive_diff != 0) 381 stats_cactive_add(cactive_diff); 382 } 383 } 384 385 static void 386 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 387 size_t flag_dirty, size_t need_pages) 388 { 389 size_t total_pages, rem_pages; 390 391 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 392 LG_PAGE; 393 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 394 flag_dirty); 395 assert(need_pages <= total_pages); 396 rem_pages = total_pages - need_pages; 397 398 arena_avail_remove(arena, chunk, run_ind, total_pages, true, true); 399 arena_cactive_update(arena, need_pages, 0); 400 arena->nactive += need_pages; 401 402 /* Keep track of trailing unused pages for later use. */ 403 if (rem_pages > 0) { 404 if (flag_dirty != 0) { 405 arena_mapbits_unallocated_set(chunk, 406 run_ind+need_pages, (rem_pages << LG_PAGE), 407 flag_dirty); 408 arena_mapbits_unallocated_set(chunk, 409 run_ind+total_pages-1, (rem_pages << LG_PAGE), 410 flag_dirty); 411 } else { 412 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, 413 (rem_pages << LG_PAGE), 414 arena_mapbits_unzeroed_get(chunk, 415 run_ind+need_pages)); 416 arena_mapbits_unallocated_set(chunk, 417 run_ind+total_pages-1, (rem_pages << LG_PAGE), 418 arena_mapbits_unzeroed_get(chunk, 419 run_ind+total_pages-1)); 420 } 421 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages, 422 false, true); 423 } 424 } 425 426 static void 427 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 428 bool remove, bool zero) 429 { 430 arena_chunk_t *chunk; 431 size_t flag_dirty, run_ind, need_pages, i; 432 433 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 434 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); 435 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 436 need_pages = (size >> LG_PAGE); 437 assert(need_pages > 0); 438 439 if (remove) { 440 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 441 need_pages); 442 } 443 444 if (zero) { 445 if (flag_dirty == 0) { 446 /* 447 * The run is clean, so some pages may be zeroed (i.e. 448 * never before touched). 449 */ 450 for (i = 0; i < need_pages; i++) { 451 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 452 != 0) 453 arena_run_zero(chunk, run_ind+i, 1); 454 else if (config_debug) { 455 arena_run_page_validate_zeroed(chunk, 456 run_ind+i); 457 } else { 458 arena_run_page_mark_zeroed(chunk, 459 run_ind+i); 460 } 461 } 462 } else { 463 /* The run is dirty, so all pages must be zeroed. */ 464 arena_run_zero(chunk, run_ind, need_pages); 465 } 466 } else { 467 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 468 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 469 } 470 471 /* 472 * Set the last element first, in case the run only contains one page 473 * (i.e. both statements set the same element). 474 */ 475 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty); 476 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); 477 } 478 479 static void 480 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 481 { 482 483 arena_run_split_large_helper(arena, run, size, true, zero); 484 } 485 486 static void 487 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 488 { 489 490 arena_run_split_large_helper(arena, run, size, false, zero); 491 } 492 493 static void 494 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, 495 size_t binind) 496 { 497 arena_chunk_t *chunk; 498 size_t flag_dirty, run_ind, need_pages, i; 499 500 assert(binind != BININD_INVALID); 501 502 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 503 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); 504 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 505 need_pages = (size >> LG_PAGE); 506 assert(need_pages > 0); 507 508 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages); 509 510 /* 511 * Propagate the dirty and unzeroed flags to the allocated small run, 512 * so that arena_dalloc_bin_run() has the ability to conditionally trim 513 * clean pages. 514 */ 515 arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty); 516 /* 517 * The first page will always be dirtied during small run 518 * initialization, so a validation failure here would not actually 519 * cause an observable failure. 520 */ 521 if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, 522 run_ind) == 0) 523 arena_run_page_validate_zeroed(chunk, run_ind); 524 for (i = 1; i < need_pages - 1; i++) { 525 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); 526 if (config_debug && flag_dirty == 0 && 527 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) 528 arena_run_page_validate_zeroed(chunk, run_ind+i); 529 } 530 arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1, 531 binind, flag_dirty); 532 if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, 533 run_ind+need_pages-1) == 0) 534 arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1); 535 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 536 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 537 } 538 539 static arena_chunk_t * 540 arena_chunk_init_spare(arena_t *arena) 541 { 542 arena_chunk_t *chunk; 543 544 assert(arena->spare != NULL); 545 546 chunk = arena->spare; 547 arena->spare = NULL; 548 549 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 550 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 551 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 552 arena_maxclass); 553 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 554 arena_maxclass); 555 assert(arena_mapbits_dirty_get(chunk, map_bias) == 556 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 557 558 return (chunk); 559 } 560 561 static arena_chunk_t * 562 arena_chunk_alloc_internal(arena_t *arena, size_t size, size_t alignment, 563 bool *zero) 564 { 565 arena_chunk_t *chunk; 566 chunk_alloc_t *chunk_alloc; 567 chunk_dalloc_t *chunk_dalloc; 568 569 chunk_alloc = arena->chunk_alloc; 570 chunk_dalloc = arena->chunk_dalloc; 571 malloc_mutex_unlock(&arena->lock); 572 chunk = (arena_chunk_t *)chunk_alloc_arena(chunk_alloc, chunk_dalloc, 573 arena->ind, size, alignment, zero); 574 malloc_mutex_lock(&arena->lock); 575 if (config_stats && chunk != NULL) 576 arena->stats.mapped += chunksize; 577 578 return (chunk); 579 } 580 581 void * 582 arena_chunk_alloc_huge(arena_t *arena, size_t size, size_t alignment, 583 bool *zero) 584 { 585 void *ret; 586 chunk_alloc_t *chunk_alloc; 587 chunk_dalloc_t *chunk_dalloc; 588 589 malloc_mutex_lock(&arena->lock); 590 chunk_alloc = arena->chunk_alloc; 591 chunk_dalloc = arena->chunk_dalloc; 592 if (config_stats) { 593 /* Optimistically update stats prior to unlocking. */ 594 arena->stats.mapped += size; 595 arena->stats.allocated_huge += size; 596 arena->stats.nmalloc_huge++; 597 arena->stats.nrequests_huge++; 598 } 599 arena->nactive += (size >> LG_PAGE); 600 malloc_mutex_unlock(&arena->lock); 601 602 ret = chunk_alloc_arena(chunk_alloc, chunk_dalloc, arena->ind, 603 size, alignment, zero); 604 if (config_stats) { 605 if (ret != NULL) 606 stats_cactive_add(size); 607 else { 608 /* Revert optimistic stats updates. */ 609 malloc_mutex_lock(&arena->lock); 610 arena->stats.mapped -= size; 611 arena->stats.allocated_huge -= size; 612 arena->stats.nmalloc_huge--; 613 malloc_mutex_unlock(&arena->lock); 614 } 615 } 616 617 return (ret); 618 } 619 620 static arena_chunk_t * 621 arena_chunk_init_hard(arena_t *arena) 622 { 623 arena_chunk_t *chunk; 624 bool zero; 625 size_t unzeroed, i; 626 627 assert(arena->spare == NULL); 628 629 zero = false; 630 chunk = arena_chunk_alloc_internal(arena, chunksize, chunksize, &zero); 631 if (chunk == NULL) 632 return (NULL); 633 634 chunk->arena = arena; 635 636 /* 637 * Claim that no pages are in use, since the header is merely overhead. 638 */ 639 chunk->ndirty = 0; 640 641 chunk->nruns_avail = 0; 642 chunk->nruns_adjac = 0; 643 644 /* 645 * Initialize the map to contain one maximal free untouched run. Mark 646 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk. 647 */ 648 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; 649 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass, 650 unzeroed); 651 /* 652 * There is no need to initialize the internal page map entries unless 653 * the chunk is not zeroed. 654 */ 655 if (zero == false) { 656 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 657 (void *)arena_mapp_get(chunk, map_bias+1), 658 (size_t)((uintptr_t) arena_mapp_get(chunk, chunk_npages-1) - 659 (uintptr_t)arena_mapp_get(chunk, map_bias+1))); 660 for (i = map_bias+1; i < chunk_npages-1; i++) 661 arena_mapbits_unzeroed_set(chunk, i, unzeroed); 662 } else { 663 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk, 664 map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, 665 chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, 666 map_bias+1))); 667 if (config_debug) { 668 for (i = map_bias+1; i < chunk_npages-1; i++) { 669 assert(arena_mapbits_unzeroed_get(chunk, i) == 670 unzeroed); 671 } 672 } 673 } 674 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass, 675 unzeroed); 676 677 return (chunk); 678 } 679 680 static arena_chunk_t * 681 arena_chunk_alloc(arena_t *arena) 682 { 683 arena_chunk_t *chunk; 684 685 if (arena->spare != NULL) 686 chunk = arena_chunk_init_spare(arena); 687 else { 688 chunk = arena_chunk_init_hard(arena); 689 if (chunk == NULL) 690 return (NULL); 691 } 692 693 /* Insert the run into the runs_avail tree. */ 694 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias, 695 false, false); 696 697 return (chunk); 698 } 699 700 static void 701 arena_chunk_dalloc_internal(arena_t *arena, arena_chunk_t *chunk) 702 { 703 chunk_dalloc_t *chunk_dalloc; 704 705 chunk_dalloc = arena->chunk_dalloc; 706 malloc_mutex_unlock(&arena->lock); 707 chunk_dalloc((void *)chunk, chunksize, arena->ind); 708 malloc_mutex_lock(&arena->lock); 709 if (config_stats) 710 arena->stats.mapped -= chunksize; 711 } 712 713 void 714 arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t size) 715 { 716 chunk_dalloc_t *chunk_dalloc; 717 718 malloc_mutex_lock(&arena->lock); 719 chunk_dalloc = arena->chunk_dalloc; 720 if (config_stats) { 721 arena->stats.mapped -= size; 722 arena->stats.allocated_huge -= size; 723 arena->stats.ndalloc_huge++; 724 stats_cactive_sub(size); 725 } 726 arena->nactive -= (size >> LG_PAGE); 727 malloc_mutex_unlock(&arena->lock); 728 chunk_dalloc(chunk, size, arena->ind); 729 } 730 731 static void 732 arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) 733 { 734 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 735 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 736 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 737 arena_maxclass); 738 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 739 arena_maxclass); 740 assert(arena_mapbits_dirty_get(chunk, map_bias) == 741 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 742 743 /* 744 * Remove run from the runs_avail tree, so that the arena does not use 745 * it. 746 */ 747 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias, 748 false, false); 749 750 if (arena->spare != NULL) { 751 arena_chunk_t *spare = arena->spare; 752 753 arena->spare = chunk; 754 arena_chunk_dalloc_internal(arena, spare); 755 } else 756 arena->spare = chunk; 757 } 758 759 static arena_run_t * 760 arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 761 { 762 arena_run_t *run; 763 arena_chunk_map_t *mapelm; 764 arena_chunk_map_t *key; 765 766 key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY); 767 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key); 768 if (mapelm != NULL) { 769 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); 770 size_t pageind = arena_mapelm_to_pageind(mapelm); 771 772 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << 773 LG_PAGE)); 774 arena_run_split_large(arena, run, size, zero); 775 return (run); 776 } 777 778 return (NULL); 779 } 780 781 static arena_run_t * 782 arena_run_alloc_large(arena_t *arena, size_t size, bool zero) 783 { 784 arena_chunk_t *chunk; 785 arena_run_t *run; 786 787 assert(size <= arena_maxclass); 788 assert((size & PAGE_MASK) == 0); 789 790 /* Search the arena's chunks for the lowest best fit. */ 791 run = arena_run_alloc_large_helper(arena, size, zero); 792 if (run != NULL) 793 return (run); 794 795 /* 796 * No usable runs. Create a new chunk from which to allocate the run. 797 */ 798 chunk = arena_chunk_alloc(arena); 799 if (chunk != NULL) { 800 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); 801 arena_run_split_large(arena, run, size, zero); 802 return (run); 803 } 804 805 /* 806 * arena_chunk_alloc() failed, but another thread may have made 807 * sufficient memory available while this one dropped arena->lock in 808 * arena_chunk_alloc(), so search one more time. 809 */ 810 return (arena_run_alloc_large_helper(arena, size, zero)); 811 } 812 813 static arena_run_t * 814 arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind) 815 { 816 arena_run_t *run; 817 arena_chunk_map_t *mapelm; 818 arena_chunk_map_t *key; 819 820 key = (arena_chunk_map_t *)(size | CHUNK_MAP_KEY); 821 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, key); 822 if (mapelm != NULL) { 823 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); 824 size_t pageind = arena_mapelm_to_pageind(mapelm); 825 826 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << 827 LG_PAGE)); 828 arena_run_split_small(arena, run, size, binind); 829 return (run); 830 } 831 832 return (NULL); 833 } 834 835 static arena_run_t * 836 arena_run_alloc_small(arena_t *arena, size_t size, size_t binind) 837 { 838 arena_chunk_t *chunk; 839 arena_run_t *run; 840 841 assert(size <= arena_maxclass); 842 assert((size & PAGE_MASK) == 0); 843 assert(binind != BININD_INVALID); 844 845 /* Search the arena's chunks for the lowest best fit. */ 846 run = arena_run_alloc_small_helper(arena, size, binind); 847 if (run != NULL) 848 return (run); 849 850 /* 851 * No usable runs. Create a new chunk from which to allocate the run. 852 */ 853 chunk = arena_chunk_alloc(arena); 854 if (chunk != NULL) { 855 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); 856 arena_run_split_small(arena, run, size, binind); 857 return (run); 858 } 859 860 /* 861 * arena_chunk_alloc() failed, but another thread may have made 862 * sufficient memory available while this one dropped arena->lock in 863 * arena_chunk_alloc(), so search one more time. 864 */ 865 return (arena_run_alloc_small_helper(arena, size, binind)); 866 } 867 868 static inline void 869 arena_maybe_purge(arena_t *arena) 870 { 871 size_t npurgeable, threshold; 872 873 /* Don't purge if the option is disabled. */ 874 if (opt_lg_dirty_mult < 0) 875 return; 876 /* Don't purge if all dirty pages are already being purged. */ 877 if (arena->ndirty <= arena->npurgatory) 878 return; 879 npurgeable = arena->ndirty - arena->npurgatory; 880 threshold = (arena->nactive >> opt_lg_dirty_mult); 881 /* 882 * Don't purge unless the number of purgeable pages exceeds the 883 * threshold. 884 */ 885 if (npurgeable <= threshold) 886 return; 887 888 arena_purge(arena, false); 889 } 890 891 static arena_chunk_t * 892 chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg) 893 { 894 size_t *ndirty = (size_t *)arg; 895 896 assert(chunk->ndirty != 0); 897 *ndirty += chunk->ndirty; 898 return (NULL); 899 } 900 901 static size_t 902 arena_compute_npurgatory(arena_t *arena, bool all) 903 { 904 size_t npurgatory, npurgeable; 905 906 /* 907 * Compute the minimum number of pages that this thread should try to 908 * purge. 909 */ 910 npurgeable = arena->ndirty - arena->npurgatory; 911 912 if (all == false) { 913 size_t threshold = (arena->nactive >> opt_lg_dirty_mult); 914 915 npurgatory = npurgeable - threshold; 916 } else 917 npurgatory = npurgeable; 918 919 return (npurgatory); 920 } 921 922 static void 923 arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all, 924 arena_chunk_mapelms_t *mapelms) 925 { 926 size_t pageind, npages; 927 928 /* 929 * Temporarily allocate free dirty runs within chunk. If all is false, 930 * only operate on dirty runs that are fragments; otherwise operate on 931 * all dirty runs. 932 */ 933 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { 934 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); 935 if (arena_mapbits_allocated_get(chunk, pageind) == 0) { 936 size_t run_size = 937 arena_mapbits_unallocated_size_get(chunk, pageind); 938 939 npages = run_size >> LG_PAGE; 940 assert(pageind + npages <= chunk_npages); 941 assert(arena_mapbits_dirty_get(chunk, pageind) == 942 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 943 944 if (arena_mapbits_dirty_get(chunk, pageind) != 0 && 945 (all || arena_avail_adjac(chunk, pageind, 946 npages))) { 947 arena_run_t *run = (arena_run_t *)((uintptr_t) 948 chunk + (uintptr_t)(pageind << LG_PAGE)); 949 950 arena_run_split_large(arena, run, run_size, 951 false); 952 /* Append to list for later processing. */ 953 ql_elm_new(mapelm, u.ql_link); 954 ql_tail_insert(mapelms, mapelm, u.ql_link); 955 } 956 } else { 957 /* Skip run. */ 958 if (arena_mapbits_large_get(chunk, pageind) != 0) { 959 npages = arena_mapbits_large_size_get(chunk, 960 pageind) >> LG_PAGE; 961 } else { 962 size_t binind; 963 arena_bin_info_t *bin_info; 964 arena_run_t *run = (arena_run_t *)((uintptr_t) 965 chunk + (uintptr_t)(pageind << LG_PAGE)); 966 967 assert(arena_mapbits_small_runind_get(chunk, 968 pageind) == 0); 969 binind = arena_bin_index(arena, run->bin); 970 bin_info = &arena_bin_info[binind]; 971 npages = bin_info->run_size >> LG_PAGE; 972 } 973 } 974 } 975 assert(pageind == chunk_npages); 976 assert(chunk->ndirty == 0 || all == false); 977 assert(chunk->nruns_adjac == 0); 978 } 979 980 static size_t 981 arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk, 982 arena_chunk_mapelms_t *mapelms) 983 { 984 size_t npurged, pageind, npages, nmadvise; 985 arena_chunk_map_t *mapelm; 986 987 malloc_mutex_unlock(&arena->lock); 988 if (config_stats) 989 nmadvise = 0; 990 npurged = 0; 991 ql_foreach(mapelm, mapelms, u.ql_link) { 992 bool unzeroed; 993 size_t flag_unzeroed, i; 994 995 pageind = arena_mapelm_to_pageind(mapelm); 996 npages = arena_mapbits_large_size_get(chunk, pageind) >> 997 LG_PAGE; 998 assert(pageind + npages <= chunk_npages); 999 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << 1000 LG_PAGE)), (npages << LG_PAGE)); 1001 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; 1002 /* 1003 * Set the unzeroed flag for all pages, now that pages_purge() 1004 * has returned whether the pages were zeroed as a side effect 1005 * of purging. This chunk map modification is safe even though 1006 * the arena mutex isn't currently owned by this thread, 1007 * because the run is marked as allocated, thus protecting it 1008 * from being modified by any other thread. As long as these 1009 * writes don't perturb the first and last elements' 1010 * CHUNK_MAP_ALLOCATED bits, behavior is well defined. 1011 */ 1012 for (i = 0; i < npages; i++) { 1013 arena_mapbits_unzeroed_set(chunk, pageind+i, 1014 flag_unzeroed); 1015 } 1016 npurged += npages; 1017 if (config_stats) 1018 nmadvise++; 1019 } 1020 malloc_mutex_lock(&arena->lock); 1021 if (config_stats) 1022 arena->stats.nmadvise += nmadvise; 1023 1024 return (npurged); 1025 } 1026 1027 static void 1028 arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk, 1029 arena_chunk_mapelms_t *mapelms) 1030 { 1031 arena_chunk_map_t *mapelm; 1032 size_t pageind; 1033 1034 /* Deallocate runs. */ 1035 for (mapelm = ql_first(mapelms); mapelm != NULL; 1036 mapelm = ql_first(mapelms)) { 1037 arena_run_t *run; 1038 1039 pageind = arena_mapelm_to_pageind(mapelm); 1040 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind << 1041 LG_PAGE)); 1042 ql_remove(mapelms, mapelm, u.ql_link); 1043 arena_run_dalloc(arena, run, false, true); 1044 } 1045 } 1046 1047 static inline size_t 1048 arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) 1049 { 1050 size_t npurged; 1051 arena_chunk_mapelms_t mapelms; 1052 1053 ql_new(&mapelms); 1054 1055 /* 1056 * If chunk is the spare, temporarily re-allocate it, 1) so that its 1057 * run is reinserted into runs_avail, and 2) so that it cannot be 1058 * completely discarded by another thread while arena->lock is dropped 1059 * by this thread. Note that the arena_run_dalloc() call will 1060 * implicitly deallocate the chunk, so no explicit action is required 1061 * in this function to deallocate the chunk. 1062 * 1063 * Note that once a chunk contains dirty pages, it cannot again contain 1064 * a single run unless 1) it is a dirty run, or 2) this function purges 1065 * dirty pages and causes the transition to a single clean run. Thus 1066 * (chunk == arena->spare) is possible, but it is not possible for 1067 * this function to be called on the spare unless it contains a dirty 1068 * run. 1069 */ 1070 if (chunk == arena->spare) { 1071 assert(arena_mapbits_dirty_get(chunk, map_bias) != 0); 1072 assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0); 1073 1074 arena_chunk_alloc(arena); 1075 } 1076 1077 if (config_stats) 1078 arena->stats.purged += chunk->ndirty; 1079 1080 /* 1081 * Operate on all dirty runs if there is no clean/dirty run 1082 * fragmentation. 1083 */ 1084 if (chunk->nruns_adjac == 0) 1085 all = true; 1086 1087 arena_chunk_stash_dirty(arena, chunk, all, &mapelms); 1088 npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms); 1089 arena_chunk_unstash_purged(arena, chunk, &mapelms); 1090 1091 return (npurged); 1092 } 1093 1094 static void 1095 arena_purge(arena_t *arena, bool all) 1096 { 1097 arena_chunk_t *chunk; 1098 size_t npurgatory; 1099 if (config_debug) { 1100 size_t ndirty = 0; 1101 1102 arena_chunk_dirty_iter(&arena->chunks_dirty, NULL, 1103 chunks_dirty_iter_cb, (void *)&ndirty); 1104 assert(ndirty == arena->ndirty); 1105 } 1106 assert(arena->ndirty > arena->npurgatory || all); 1107 assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - 1108 arena->npurgatory) || all); 1109 1110 if (config_stats) 1111 arena->stats.npurge++; 1112 1113 /* 1114 * Add the minimum number of pages this thread should try to purge to 1115 * arena->npurgatory. This will keep multiple threads from racing to 1116 * reduce ndirty below the threshold. 1117 */ 1118 npurgatory = arena_compute_npurgatory(arena, all); 1119 arena->npurgatory += npurgatory; 1120 1121 while (npurgatory > 0) { 1122 size_t npurgeable, npurged, nunpurged; 1123 1124 /* Get next chunk with dirty pages. */ 1125 chunk = arena_chunk_dirty_first(&arena->chunks_dirty); 1126 if (chunk == NULL) { 1127 /* 1128 * This thread was unable to purge as many pages as 1129 * originally intended, due to races with other threads 1130 * that either did some of the purging work, or re-used 1131 * dirty pages. 1132 */ 1133 arena->npurgatory -= npurgatory; 1134 return; 1135 } 1136 npurgeable = chunk->ndirty; 1137 assert(npurgeable != 0); 1138 1139 if (npurgeable > npurgatory && chunk->nruns_adjac == 0) { 1140 /* 1141 * This thread will purge all the dirty pages in chunk, 1142 * so set npurgatory to reflect this thread's intent to 1143 * purge the pages. This tends to reduce the chances 1144 * of the following scenario: 1145 * 1146 * 1) This thread sets arena->npurgatory such that 1147 * (arena->ndirty - arena->npurgatory) is at the 1148 * threshold. 1149 * 2) This thread drops arena->lock. 1150 * 3) Another thread causes one or more pages to be 1151 * dirtied, and immediately determines that it must 1152 * purge dirty pages. 1153 * 1154 * If this scenario *does* play out, that's okay, 1155 * because all of the purging work being done really 1156 * needs to happen. 1157 */ 1158 arena->npurgatory += npurgeable - npurgatory; 1159 npurgatory = npurgeable; 1160 } 1161 1162 /* 1163 * Keep track of how many pages are purgeable, versus how many 1164 * actually get purged, and adjust counters accordingly. 1165 */ 1166 arena->npurgatory -= npurgeable; 1167 npurgatory -= npurgeable; 1168 npurged = arena_chunk_purge(arena, chunk, all); 1169 nunpurged = npurgeable - npurged; 1170 arena->npurgatory += nunpurged; 1171 npurgatory += nunpurged; 1172 } 1173 } 1174 1175 void 1176 arena_purge_all(arena_t *arena) 1177 { 1178 1179 malloc_mutex_lock(&arena->lock); 1180 arena_purge(arena, true); 1181 malloc_mutex_unlock(&arena->lock); 1182 } 1183 1184 static void 1185 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1186 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty) 1187 { 1188 size_t size = *p_size; 1189 size_t run_ind = *p_run_ind; 1190 size_t run_pages = *p_run_pages; 1191 1192 /* Try to coalesce forward. */ 1193 if (run_ind + run_pages < chunk_npages && 1194 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && 1195 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { 1196 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, 1197 run_ind+run_pages); 1198 size_t nrun_pages = nrun_size >> LG_PAGE; 1199 1200 /* 1201 * Remove successor from runs_avail; the coalesced run is 1202 * inserted later. 1203 */ 1204 assert(arena_mapbits_unallocated_size_get(chunk, 1205 run_ind+run_pages+nrun_pages-1) == nrun_size); 1206 assert(arena_mapbits_dirty_get(chunk, 1207 run_ind+run_pages+nrun_pages-1) == flag_dirty); 1208 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages, 1209 false, true); 1210 1211 size += nrun_size; 1212 run_pages += nrun_pages; 1213 1214 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1215 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1216 size); 1217 } 1218 1219 /* Try to coalesce backward. */ 1220 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, 1221 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == 1222 flag_dirty) { 1223 size_t prun_size = arena_mapbits_unallocated_size_get(chunk, 1224 run_ind-1); 1225 size_t prun_pages = prun_size >> LG_PAGE; 1226 1227 run_ind -= prun_pages; 1228 1229 /* 1230 * Remove predecessor from runs_avail; the coalesced run is 1231 * inserted later. 1232 */ 1233 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1234 prun_size); 1235 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); 1236 arena_avail_remove(arena, chunk, run_ind, prun_pages, true, 1237 false); 1238 1239 size += prun_size; 1240 run_pages += prun_pages; 1241 1242 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1243 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1244 size); 1245 } 1246 1247 *p_size = size; 1248 *p_run_ind = run_ind; 1249 *p_run_pages = run_pages; 1250 } 1251 1252 static void 1253 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) 1254 { 1255 arena_chunk_t *chunk; 1256 size_t size, run_ind, run_pages, flag_dirty; 1257 1258 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1259 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); 1260 assert(run_ind >= map_bias); 1261 assert(run_ind < chunk_npages); 1262 if (arena_mapbits_large_get(chunk, run_ind) != 0) { 1263 size = arena_mapbits_large_size_get(chunk, run_ind); 1264 assert(size == PAGE || 1265 arena_mapbits_large_size_get(chunk, 1266 run_ind+(size>>LG_PAGE)-1) == 0); 1267 } else { 1268 size_t binind = arena_bin_index(arena, run->bin); 1269 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 1270 size = bin_info->run_size; 1271 } 1272 run_pages = (size >> LG_PAGE); 1273 arena_cactive_update(arena, 0, run_pages); 1274 arena->nactive -= run_pages; 1275 1276 /* 1277 * The run is dirty if the caller claims to have dirtied it, as well as 1278 * if it was already dirty before being allocated and the caller 1279 * doesn't claim to have cleaned it. 1280 */ 1281 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1282 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1283 if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0) 1284 dirty = true; 1285 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; 1286 1287 /* Mark pages as unallocated in the chunk map. */ 1288 if (dirty) { 1289 arena_mapbits_unallocated_set(chunk, run_ind, size, 1290 CHUNK_MAP_DIRTY); 1291 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1292 CHUNK_MAP_DIRTY); 1293 } else { 1294 arena_mapbits_unallocated_set(chunk, run_ind, size, 1295 arena_mapbits_unzeroed_get(chunk, run_ind)); 1296 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1297 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); 1298 } 1299 1300 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, 1301 flag_dirty); 1302 1303 /* Insert into runs_avail, now that coalescing is complete. */ 1304 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1305 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); 1306 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1307 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1308 arena_avail_insert(arena, chunk, run_ind, run_pages, true, true); 1309 1310 /* Deallocate chunk if it is now completely unused. */ 1311 if (size == arena_maxclass) { 1312 assert(run_ind == map_bias); 1313 assert(run_pages == (arena_maxclass >> LG_PAGE)); 1314 arena_chunk_dalloc(arena, chunk); 1315 } 1316 1317 /* 1318 * It is okay to do dirty page processing here even if the chunk was 1319 * deallocated above, since in that case it is the spare. Waiting 1320 * until after possible chunk deallocation to do dirty processing 1321 * allows for an old spare to be fully deallocated, thus decreasing the 1322 * chances of spuriously crossing the dirty page purging threshold. 1323 */ 1324 if (dirty) 1325 arena_maybe_purge(arena); 1326 } 1327 1328 static void 1329 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1330 size_t oldsize, size_t newsize) 1331 { 1332 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; 1333 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 1334 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1335 1336 assert(oldsize > newsize); 1337 1338 /* 1339 * Update the chunk map so that arena_run_dalloc() can treat the 1340 * leading run as separately allocated. Set the last element of each 1341 * run first, in case of single-page runs. 1342 */ 1343 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1344 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1345 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty); 1346 1347 if (config_debug) { 1348 UNUSED size_t tail_npages = newsize >> LG_PAGE; 1349 assert(arena_mapbits_large_size_get(chunk, 1350 pageind+head_npages+tail_npages-1) == 0); 1351 assert(arena_mapbits_dirty_get(chunk, 1352 pageind+head_npages+tail_npages-1) == flag_dirty); 1353 } 1354 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, 1355 flag_dirty); 1356 1357 arena_run_dalloc(arena, run, false, false); 1358 } 1359 1360 static void 1361 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1362 size_t oldsize, size_t newsize, bool dirty) 1363 { 1364 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; 1365 size_t head_npages = newsize >> LG_PAGE; 1366 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1367 1368 assert(oldsize > newsize); 1369 1370 /* 1371 * Update the chunk map so that arena_run_dalloc() can treat the 1372 * trailing run as separately allocated. Set the last element of each 1373 * run first, in case of single-page runs. 1374 */ 1375 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1376 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1377 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty); 1378 1379 if (config_debug) { 1380 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; 1381 assert(arena_mapbits_large_size_get(chunk, 1382 pageind+head_npages+tail_npages-1) == 0); 1383 assert(arena_mapbits_dirty_get(chunk, 1384 pageind+head_npages+tail_npages-1) == flag_dirty); 1385 } 1386 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, 1387 flag_dirty); 1388 1389 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), 1390 dirty, false); 1391 } 1392 1393 static arena_run_t * 1394 arena_bin_runs_first(arena_bin_t *bin) 1395 { 1396 arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs); 1397 if (mapelm != NULL) { 1398 arena_chunk_t *chunk; 1399 size_t pageind; 1400 arena_run_t *run; 1401 1402 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); 1403 pageind = arena_mapelm_to_pageind(mapelm); 1404 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - 1405 arena_mapbits_small_runind_get(chunk, pageind)) << 1406 LG_PAGE)); 1407 return (run); 1408 } 1409 1410 return (NULL); 1411 } 1412 1413 static void 1414 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) 1415 { 1416 arena_chunk_t *chunk = CHUNK_ADDR2BASE(run); 1417 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; 1418 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); 1419 1420 assert(arena_run_tree_search(&bin->runs, mapelm) == NULL); 1421 1422 arena_run_tree_insert(&bin->runs, mapelm); 1423 } 1424 1425 static void 1426 arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) 1427 { 1428 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1429 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; 1430 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); 1431 1432 assert(arena_run_tree_search(&bin->runs, mapelm) != NULL); 1433 1434 arena_run_tree_remove(&bin->runs, mapelm); 1435 } 1436 1437 static arena_run_t * 1438 arena_bin_nonfull_run_tryget(arena_bin_t *bin) 1439 { 1440 arena_run_t *run = arena_bin_runs_first(bin); 1441 if (run != NULL) { 1442 arena_bin_runs_remove(bin, run); 1443 if (config_stats) 1444 bin->stats.reruns++; 1445 } 1446 return (run); 1447 } 1448 1449 static arena_run_t * 1450 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) 1451 { 1452 arena_run_t *run; 1453 size_t binind; 1454 arena_bin_info_t *bin_info; 1455 1456 /* Look for a usable run. */ 1457 run = arena_bin_nonfull_run_tryget(bin); 1458 if (run != NULL) 1459 return (run); 1460 /* No existing runs have any space available. */ 1461 1462 binind = arena_bin_index(arena, bin); 1463 bin_info = &arena_bin_info[binind]; 1464 1465 /* Allocate a new run. */ 1466 malloc_mutex_unlock(&bin->lock); 1467 /******************************/ 1468 malloc_mutex_lock(&arena->lock); 1469 run = arena_run_alloc_small(arena, bin_info->run_size, binind); 1470 if (run != NULL) { 1471 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + 1472 (uintptr_t)bin_info->bitmap_offset); 1473 1474 /* Initialize run internals. */ 1475 run->bin = bin; 1476 run->nextind = 0; 1477 run->nfree = bin_info->nregs; 1478 bitmap_init(bitmap, &bin_info->bitmap_info); 1479 } 1480 malloc_mutex_unlock(&arena->lock); 1481 /********************************/ 1482 malloc_mutex_lock(&bin->lock); 1483 if (run != NULL) { 1484 if (config_stats) { 1485 bin->stats.nruns++; 1486 bin->stats.curruns++; 1487 } 1488 return (run); 1489 } 1490 1491 /* 1492 * arena_run_alloc_small() failed, but another thread may have made 1493 * sufficient memory available while this one dropped bin->lock above, 1494 * so search one more time. 1495 */ 1496 run = arena_bin_nonfull_run_tryget(bin); 1497 if (run != NULL) 1498 return (run); 1499 1500 return (NULL); 1501 } 1502 1503 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 1504 static void * 1505 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) 1506 { 1507 void *ret; 1508 size_t binind; 1509 arena_bin_info_t *bin_info; 1510 arena_run_t *run; 1511 1512 binind = arena_bin_index(arena, bin); 1513 bin_info = &arena_bin_info[binind]; 1514 bin->runcur = NULL; 1515 run = arena_bin_nonfull_run_get(arena, bin); 1516 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 1517 /* 1518 * Another thread updated runcur while this one ran without the 1519 * bin lock in arena_bin_nonfull_run_get(). 1520 */ 1521 assert(bin->runcur->nfree > 0); 1522 ret = arena_run_reg_alloc(bin->runcur, bin_info); 1523 if (run != NULL) { 1524 arena_chunk_t *chunk; 1525 1526 /* 1527 * arena_run_alloc_small() may have allocated run, or 1528 * it may have pulled run from the bin's run tree. 1529 * Therefore it is unsafe to make any assumptions about 1530 * how run has previously been used, and 1531 * arena_bin_lower_run() must be called, as if a region 1532 * were just deallocated from the run. 1533 */ 1534 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1535 if (run->nfree == bin_info->nregs) 1536 arena_dalloc_bin_run(arena, chunk, run, bin); 1537 else 1538 arena_bin_lower_run(arena, chunk, run, bin); 1539 } 1540 return (ret); 1541 } 1542 1543 if (run == NULL) 1544 return (NULL); 1545 1546 bin->runcur = run; 1547 1548 assert(bin->runcur->nfree > 0); 1549 1550 return (arena_run_reg_alloc(bin->runcur, bin_info)); 1551 } 1552 1553 void 1554 arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, 1555 uint64_t prof_accumbytes) 1556 { 1557 unsigned i, nfill; 1558 arena_bin_t *bin; 1559 arena_run_t *run; 1560 void *ptr; 1561 1562 assert(tbin->ncached == 0); 1563 1564 if (config_prof && arena_prof_accum(arena, prof_accumbytes)) 1565 prof_idump(); 1566 bin = &arena->bins[binind]; 1567 malloc_mutex_lock(&bin->lock); 1568 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1569 tbin->lg_fill_div); i < nfill; i++) { 1570 if ((run = bin->runcur) != NULL && run->nfree > 0) 1571 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1572 else 1573 ptr = arena_bin_malloc_hard(arena, bin); 1574 if (ptr == NULL) 1575 break; 1576 if (config_fill && opt_junk) { 1577 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 1578 true); 1579 } 1580 /* Insert such that low regions get used first. */ 1581 tbin->avail[nfill - 1 - i] = ptr; 1582 } 1583 if (config_stats) { 1584 bin->stats.allocated += i * arena_bin_info[binind].reg_size; 1585 bin->stats.nmalloc += i; 1586 bin->stats.nrequests += tbin->tstats.nrequests; 1587 bin->stats.nfills++; 1588 tbin->tstats.nrequests = 0; 1589 } 1590 malloc_mutex_unlock(&bin->lock); 1591 tbin->ncached = i; 1592 } 1593 1594 void 1595 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 1596 { 1597 1598 if (zero) { 1599 size_t redzone_size = bin_info->redzone_size; 1600 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, 1601 redzone_size); 1602 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, 1603 redzone_size); 1604 } else { 1605 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, 1606 bin_info->reg_interval); 1607 } 1608 } 1609 1610 #ifdef JEMALLOC_JET 1611 #undef arena_redzone_corruption 1612 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) 1613 #endif 1614 static void 1615 arena_redzone_corruption(void *ptr, size_t usize, bool after, 1616 size_t offset, uint8_t byte) 1617 { 1618 1619 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " 1620 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", 1621 after ? "after" : "before", ptr, usize, byte); 1622 } 1623 #ifdef JEMALLOC_JET 1624 #undef arena_redzone_corruption 1625 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 1626 arena_redzone_corruption_t *arena_redzone_corruption = 1627 JEMALLOC_N(arena_redzone_corruption_impl); 1628 #endif 1629 1630 static void 1631 arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 1632 { 1633 size_t size = bin_info->reg_size; 1634 size_t redzone_size = bin_info->redzone_size; 1635 size_t i; 1636 bool error = false; 1637 1638 for (i = 1; i <= redzone_size; i++) { 1639 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 1640 if (*byte != 0xa5) { 1641 error = true; 1642 arena_redzone_corruption(ptr, size, false, i, *byte); 1643 if (reset) 1644 *byte = 0xa5; 1645 } 1646 } 1647 for (i = 0; i < redzone_size; i++) { 1648 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); 1649 if (*byte != 0xa5) { 1650 error = true; 1651 arena_redzone_corruption(ptr, size, true, i, *byte); 1652 if (reset) 1653 *byte = 0xa5; 1654 } 1655 } 1656 if (opt_abort && error) 1657 abort(); 1658 } 1659 1660 #ifdef JEMALLOC_JET 1661 #undef arena_dalloc_junk_small 1662 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) 1663 #endif 1664 void 1665 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) 1666 { 1667 size_t redzone_size = bin_info->redzone_size; 1668 1669 arena_redzones_validate(ptr, bin_info, false); 1670 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, 1671 bin_info->reg_interval); 1672 } 1673 #ifdef JEMALLOC_JET 1674 #undef arena_dalloc_junk_small 1675 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 1676 arena_dalloc_junk_small_t *arena_dalloc_junk_small = 1677 JEMALLOC_N(arena_dalloc_junk_small_impl); 1678 #endif 1679 1680 void 1681 arena_quarantine_junk_small(void *ptr, size_t usize) 1682 { 1683 size_t binind; 1684 arena_bin_info_t *bin_info; 1685 cassert(config_fill); 1686 assert(opt_junk); 1687 assert(opt_quarantine); 1688 assert(usize <= SMALL_MAXCLASS); 1689 1690 binind = small_size2bin(usize); 1691 bin_info = &arena_bin_info[binind]; 1692 arena_redzones_validate(ptr, bin_info, true); 1693 } 1694 1695 void * 1696 arena_malloc_small(arena_t *arena, size_t size, bool zero) 1697 { 1698 void *ret; 1699 arena_bin_t *bin; 1700 arena_run_t *run; 1701 size_t binind; 1702 1703 binind = small_size2bin(size); 1704 assert(binind < NBINS); 1705 bin = &arena->bins[binind]; 1706 size = small_bin2size(binind); 1707 1708 malloc_mutex_lock(&bin->lock); 1709 if ((run = bin->runcur) != NULL && run->nfree > 0) 1710 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1711 else 1712 ret = arena_bin_malloc_hard(arena, bin); 1713 1714 if (ret == NULL) { 1715 malloc_mutex_unlock(&bin->lock); 1716 return (NULL); 1717 } 1718 1719 if (config_stats) { 1720 bin->stats.allocated += size; 1721 bin->stats.nmalloc++; 1722 bin->stats.nrequests++; 1723 } 1724 malloc_mutex_unlock(&bin->lock); 1725 if (config_prof && isthreaded == false && arena_prof_accum(arena, size)) 1726 prof_idump(); 1727 1728 if (zero == false) { 1729 if (config_fill) { 1730 if (opt_junk) { 1731 arena_alloc_junk_small(ret, 1732 &arena_bin_info[binind], false); 1733 } else if (opt_zero) 1734 memset(ret, 0, size); 1735 } 1736 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1737 } else { 1738 if (config_fill && opt_junk) { 1739 arena_alloc_junk_small(ret, &arena_bin_info[binind], 1740 true); 1741 } 1742 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1743 memset(ret, 0, size); 1744 } 1745 1746 return (ret); 1747 } 1748 1749 void * 1750 arena_malloc_large(arena_t *arena, size_t size, bool zero) 1751 { 1752 void *ret; 1753 UNUSED bool idump; 1754 1755 /* Large allocation. */ 1756 size = PAGE_CEILING(size); 1757 malloc_mutex_lock(&arena->lock); 1758 ret = (void *)arena_run_alloc_large(arena, size, zero); 1759 if (ret == NULL) { 1760 malloc_mutex_unlock(&arena->lock); 1761 return (NULL); 1762 } 1763 if (config_stats) { 1764 arena->stats.nmalloc_large++; 1765 arena->stats.nrequests_large++; 1766 arena->stats.allocated_large += size; 1767 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; 1768 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; 1769 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; 1770 } 1771 if (config_prof) 1772 idump = arena_prof_accum_locked(arena, size); 1773 malloc_mutex_unlock(&arena->lock); 1774 if (config_prof && idump) 1775 prof_idump(); 1776 1777 if (zero == false) { 1778 if (config_fill) { 1779 if (opt_junk) 1780 memset(ret, 0xa5, size); 1781 else if (opt_zero) 1782 memset(ret, 0, size); 1783 } 1784 } 1785 1786 return (ret); 1787 } 1788 1789 /* Only handles large allocations that require more than page alignment. */ 1790 void * 1791 arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) 1792 { 1793 void *ret; 1794 size_t alloc_size, leadsize, trailsize; 1795 arena_run_t *run; 1796 arena_chunk_t *chunk; 1797 1798 assert((size & PAGE_MASK) == 0); 1799 1800 alignment = PAGE_CEILING(alignment); 1801 alloc_size = size + alignment - PAGE; 1802 1803 malloc_mutex_lock(&arena->lock); 1804 run = arena_run_alloc_large(arena, alloc_size, false); 1805 if (run == NULL) { 1806 malloc_mutex_unlock(&arena->lock); 1807 return (NULL); 1808 } 1809 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1810 1811 leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) - 1812 (uintptr_t)run; 1813 assert(alloc_size >= leadsize + size); 1814 trailsize = alloc_size - leadsize - size; 1815 ret = (void *)((uintptr_t)run + leadsize); 1816 if (leadsize != 0) { 1817 arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size - 1818 leadsize); 1819 } 1820 if (trailsize != 0) { 1821 arena_run_trim_tail(arena, chunk, ret, size + trailsize, size, 1822 false); 1823 } 1824 arena_run_init_large(arena, (arena_run_t *)ret, size, zero); 1825 1826 if (config_stats) { 1827 arena->stats.nmalloc_large++; 1828 arena->stats.nrequests_large++; 1829 arena->stats.allocated_large += size; 1830 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; 1831 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; 1832 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; 1833 } 1834 malloc_mutex_unlock(&arena->lock); 1835 1836 if (config_fill && zero == false) { 1837 if (opt_junk) 1838 memset(ret, 0xa5, size); 1839 else if (opt_zero) 1840 memset(ret, 0, size); 1841 } 1842 return (ret); 1843 } 1844 1845 void 1846 arena_prof_promoted(const void *ptr, size_t size) 1847 { 1848 arena_chunk_t *chunk; 1849 size_t pageind, binind; 1850 1851 cassert(config_prof); 1852 assert(ptr != NULL); 1853 assert(CHUNK_ADDR2BASE(ptr) != ptr); 1854 assert(isalloc(ptr, false) == PAGE); 1855 assert(isalloc(ptr, true) == PAGE); 1856 assert(size <= SMALL_MAXCLASS); 1857 1858 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1859 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1860 binind = small_size2bin(size); 1861 assert(binind < NBINS); 1862 arena_mapbits_large_binind_set(chunk, pageind, binind); 1863 1864 assert(isalloc(ptr, false) == PAGE); 1865 assert(isalloc(ptr, true) == size); 1866 } 1867 1868 static void 1869 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 1870 arena_bin_t *bin) 1871 { 1872 1873 /* Dissociate run from bin. */ 1874 if (run == bin->runcur) 1875 bin->runcur = NULL; 1876 else { 1877 size_t binind = arena_bin_index(chunk->arena, bin); 1878 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 1879 1880 if (bin_info->nregs != 1) { 1881 /* 1882 * This block's conditional is necessary because if the 1883 * run only contains one region, then it never gets 1884 * inserted into the non-full runs tree. 1885 */ 1886 arena_bin_runs_remove(bin, run); 1887 } 1888 } 1889 } 1890 1891 static void 1892 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1893 arena_bin_t *bin) 1894 { 1895 size_t binind; 1896 arena_bin_info_t *bin_info; 1897 size_t npages, run_ind, past; 1898 1899 assert(run != bin->runcur); 1900 assert(arena_run_tree_search(&bin->runs, 1901 arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)) 1902 == NULL); 1903 1904 binind = arena_bin_index(chunk->arena, run->bin); 1905 bin_info = &arena_bin_info[binind]; 1906 1907 malloc_mutex_unlock(&bin->lock); 1908 /******************************/ 1909 npages = bin_info->run_size >> LG_PAGE; 1910 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); 1911 past = (size_t)(PAGE_CEILING((uintptr_t)run + 1912 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind * 1913 bin_info->reg_interval - bin_info->redzone_size) - 1914 (uintptr_t)chunk) >> LG_PAGE); 1915 malloc_mutex_lock(&arena->lock); 1916 1917 /* 1918 * If the run was originally clean, and some pages were never touched, 1919 * trim the clean pages before deallocating the dirty portion of the 1920 * run. 1921 */ 1922 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1923 arena_mapbits_dirty_get(chunk, run_ind+npages-1)); 1924 if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind < 1925 npages) { 1926 /* Trim clean pages. Convert to large run beforehand. */ 1927 assert(npages > 0); 1928 arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0); 1929 arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0); 1930 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE), 1931 ((past - run_ind) << LG_PAGE), false); 1932 /* npages = past - run_ind; */ 1933 } 1934 arena_run_dalloc(arena, run, true, false); 1935 malloc_mutex_unlock(&arena->lock); 1936 /****************************/ 1937 malloc_mutex_lock(&bin->lock); 1938 if (config_stats) 1939 bin->stats.curruns--; 1940 } 1941 1942 static void 1943 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1944 arena_bin_t *bin) 1945 { 1946 1947 /* 1948 * Make sure that if bin->runcur is non-NULL, it refers to the lowest 1949 * non-full run. It is okay to NULL runcur out rather than proactively 1950 * keeping it pointing at the lowest non-full run. 1951 */ 1952 if ((uintptr_t)run < (uintptr_t)bin->runcur) { 1953 /* Switch runcur. */ 1954 if (bin->runcur->nfree > 0) 1955 arena_bin_runs_insert(bin, bin->runcur); 1956 bin->runcur = run; 1957 if (config_stats) 1958 bin->stats.reruns++; 1959 } else 1960 arena_bin_runs_insert(bin, run); 1961 } 1962 1963 void 1964 arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1965 arena_chunk_map_t *mapelm) 1966 { 1967 size_t pageind; 1968 arena_run_t *run; 1969 arena_bin_t *bin; 1970 arena_bin_info_t *bin_info; 1971 size_t size, binind; 1972 1973 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1974 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - 1975 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); 1976 bin = run->bin; 1977 binind = arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, pageind)); 1978 bin_info = &arena_bin_info[binind]; 1979 if (config_fill || config_stats) 1980 size = bin_info->reg_size; 1981 1982 if (config_fill && opt_junk) 1983 arena_dalloc_junk_small(ptr, bin_info); 1984 1985 arena_run_reg_dalloc(run, ptr); 1986 if (run->nfree == bin_info->nregs) { 1987 arena_dissociate_bin_run(chunk, run, bin); 1988 arena_dalloc_bin_run(arena, chunk, run, bin); 1989 } else if (run->nfree == 1 && run != bin->runcur) 1990 arena_bin_lower_run(arena, chunk, run, bin); 1991 1992 if (config_stats) { 1993 bin->stats.allocated -= size; 1994 bin->stats.ndalloc++; 1995 } 1996 } 1997 1998 void 1999 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2000 size_t pageind, arena_chunk_map_t *mapelm) 2001 { 2002 arena_run_t *run; 2003 arena_bin_t *bin; 2004 2005 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - 2006 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); 2007 bin = run->bin; 2008 malloc_mutex_lock(&bin->lock); 2009 arena_dalloc_bin_locked(arena, chunk, ptr, mapelm); 2010 malloc_mutex_unlock(&bin->lock); 2011 } 2012 2013 void 2014 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2015 size_t pageind) 2016 { 2017 arena_chunk_map_t *mapelm; 2018 2019 if (config_debug) { 2020 /* arena_ptr_small_binind_get() does extra sanity checking. */ 2021 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 2022 pageind)) != BININD_INVALID); 2023 } 2024 mapelm = arena_mapp_get(chunk, pageind); 2025 arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); 2026 } 2027 2028 #ifdef JEMALLOC_JET 2029 #undef arena_dalloc_junk_large 2030 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) 2031 #endif 2032 static void 2033 arena_dalloc_junk_large(void *ptr, size_t usize) 2034 { 2035 2036 if (config_fill && opt_junk) 2037 memset(ptr, 0x5a, usize); 2038 } 2039 #ifdef JEMALLOC_JET 2040 #undef arena_dalloc_junk_large 2041 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 2042 arena_dalloc_junk_large_t *arena_dalloc_junk_large = 2043 JEMALLOC_N(arena_dalloc_junk_large_impl); 2044 #endif 2045 2046 void 2047 arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) 2048 { 2049 2050 if (config_fill || config_stats) { 2051 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2052 size_t usize = arena_mapbits_large_size_get(chunk, pageind); 2053 2054 arena_dalloc_junk_large(ptr, usize); 2055 if (config_stats) { 2056 arena->stats.ndalloc_large++; 2057 arena->stats.allocated_large -= usize; 2058 arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++; 2059 arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--; 2060 } 2061 } 2062 2063 arena_run_dalloc(arena, (arena_run_t *)ptr, true, false); 2064 } 2065 2066 void 2067 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) 2068 { 2069 2070 malloc_mutex_lock(&arena->lock); 2071 arena_dalloc_large_locked(arena, chunk, ptr); 2072 malloc_mutex_unlock(&arena->lock); 2073 } 2074 2075 static void 2076 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2077 size_t oldsize, size_t size) 2078 { 2079 2080 assert(size < oldsize); 2081 2082 /* 2083 * Shrink the run, and make trailing pages available for other 2084 * allocations. 2085 */ 2086 malloc_mutex_lock(&arena->lock); 2087 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, 2088 true); 2089 if (config_stats) { 2090 arena->stats.ndalloc_large++; 2091 arena->stats.allocated_large -= oldsize; 2092 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; 2093 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; 2094 2095 arena->stats.nmalloc_large++; 2096 arena->stats.nrequests_large++; 2097 arena->stats.allocated_large += size; 2098 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; 2099 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; 2100 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; 2101 } 2102 malloc_mutex_unlock(&arena->lock); 2103 } 2104 2105 static bool 2106 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2107 size_t oldsize, size_t size, size_t extra, bool zero) 2108 { 2109 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2110 size_t npages = oldsize >> LG_PAGE; 2111 size_t followsize; 2112 2113 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); 2114 2115 /* Try to extend the run. */ 2116 assert(size + extra > oldsize); 2117 malloc_mutex_lock(&arena->lock); 2118 if (pageind + npages < chunk_npages && 2119 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && 2120 (followsize = arena_mapbits_unallocated_size_get(chunk, 2121 pageind+npages)) >= size - oldsize) { 2122 /* 2123 * The next run is available and sufficiently large. Split the 2124 * following run, then merge the first part with the existing 2125 * allocation. 2126 */ 2127 size_t flag_dirty; 2128 size_t splitsize = (oldsize + followsize <= size + extra) 2129 ? followsize : size + extra - oldsize; 2130 arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk + 2131 ((pageind+npages) << LG_PAGE)), splitsize, zero); 2132 2133 size = oldsize + splitsize; 2134 npages = size >> LG_PAGE; 2135 2136 /* 2137 * Mark the extended run as dirty if either portion of the run 2138 * was dirty before allocation. This is rather pedantic, 2139 * because there's not actually any sequence of events that 2140 * could cause the resulting run to be passed to 2141 * arena_run_dalloc() with the dirty argument set to false 2142 * (which is when dirty flag consistency would really matter). 2143 */ 2144 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | 2145 arena_mapbits_dirty_get(chunk, pageind+npages-1); 2146 arena_mapbits_large_set(chunk, pageind, size, flag_dirty); 2147 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); 2148 2149 if (config_stats) { 2150 arena->stats.ndalloc_large++; 2151 arena->stats.allocated_large -= oldsize; 2152 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; 2153 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; 2154 2155 arena->stats.nmalloc_large++; 2156 arena->stats.nrequests_large++; 2157 arena->stats.allocated_large += size; 2158 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; 2159 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; 2160 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; 2161 } 2162 malloc_mutex_unlock(&arena->lock); 2163 return (false); 2164 } 2165 malloc_mutex_unlock(&arena->lock); 2166 2167 return (true); 2168 } 2169 2170 #ifdef JEMALLOC_JET 2171 #undef arena_ralloc_junk_large 2172 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) 2173 #endif 2174 static void 2175 arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) 2176 { 2177 2178 if (config_fill && opt_junk) { 2179 memset((void *)((uintptr_t)ptr + usize), 0x5a, 2180 old_usize - usize); 2181 } 2182 } 2183 #ifdef JEMALLOC_JET 2184 #undef arena_ralloc_junk_large 2185 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) 2186 arena_ralloc_junk_large_t *arena_ralloc_junk_large = 2187 JEMALLOC_N(arena_ralloc_junk_large_impl); 2188 #endif 2189 2190 /* 2191 * Try to resize a large allocation, in order to avoid copying. This will 2192 * always fail if growing an object, and the following run is already in use. 2193 */ 2194 static bool 2195 arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, 2196 bool zero) 2197 { 2198 size_t psize; 2199 2200 psize = PAGE_CEILING(size + extra); 2201 if (psize == oldsize) { 2202 /* Same size class. */ 2203 return (false); 2204 } else { 2205 arena_chunk_t *chunk; 2206 arena_t *arena; 2207 2208 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2209 arena = chunk->arena; 2210 2211 if (psize < oldsize) { 2212 /* Fill before shrinking in order avoid a race. */ 2213 arena_ralloc_junk_large(ptr, oldsize, psize); 2214 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, 2215 psize); 2216 return (false); 2217 } else { 2218 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, 2219 oldsize, PAGE_CEILING(size), 2220 psize - PAGE_CEILING(size), zero); 2221 if (config_fill && ret == false && zero == false) { 2222 if (opt_junk) { 2223 memset((void *)((uintptr_t)ptr + 2224 oldsize), 0xa5, isalloc(ptr, 2225 config_prof) - oldsize); 2226 } else if (opt_zero) { 2227 memset((void *)((uintptr_t)ptr + 2228 oldsize), 0, isalloc(ptr, 2229 config_prof) - oldsize); 2230 } 2231 } 2232 return (ret); 2233 } 2234 } 2235 } 2236 2237 bool 2238 arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, 2239 bool zero) 2240 { 2241 2242 /* 2243 * Avoid moving the allocation if the size class can be left the same. 2244 */ 2245 if (oldsize <= arena_maxclass) { 2246 if (oldsize <= SMALL_MAXCLASS) { 2247 assert(arena_bin_info[small_size2bin(oldsize)].reg_size 2248 == oldsize); 2249 if ((size + extra <= SMALL_MAXCLASS && 2250 small_size2bin(size + extra) == 2251 small_size2bin(oldsize)) || (size <= oldsize && 2252 size + extra >= oldsize)) 2253 return (false); 2254 } else { 2255 assert(size <= arena_maxclass); 2256 if (size + extra > SMALL_MAXCLASS) { 2257 if (arena_ralloc_large(ptr, oldsize, size, 2258 extra, zero) == false) 2259 return (false); 2260 } 2261 } 2262 } 2263 2264 /* Reallocation would require a move. */ 2265 return (true); 2266 } 2267 2268 void * 2269 arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, 2270 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, 2271 bool try_tcache_dalloc) 2272 { 2273 void *ret; 2274 size_t copysize; 2275 2276 /* Try to avoid moving the allocation. */ 2277 if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false) 2278 return (ptr); 2279 2280 /* 2281 * size and oldsize are different enough that we need to move the 2282 * object. In that case, fall back to allocating new space and 2283 * copying. 2284 */ 2285 if (alignment != 0) { 2286 size_t usize = sa2u(size + extra, alignment); 2287 if (usize == 0) 2288 return (NULL); 2289 ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); 2290 } else 2291 ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc); 2292 2293 if (ret == NULL) { 2294 if (extra == 0) 2295 return (NULL); 2296 /* Try again, this time without extra. */ 2297 if (alignment != 0) { 2298 size_t usize = sa2u(size, alignment); 2299 if (usize == 0) 2300 return (NULL); 2301 ret = ipalloct(usize, alignment, zero, try_tcache_alloc, 2302 arena); 2303 } else 2304 ret = arena_malloc(arena, size, zero, try_tcache_alloc); 2305 2306 if (ret == NULL) 2307 return (NULL); 2308 } 2309 2310 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ 2311 2312 /* 2313 * Copy at most size bytes (not size+extra), since the caller has no 2314 * expectation that the extra bytes will be reliably preserved. 2315 */ 2316 copysize = (size < oldsize) ? size : oldsize; 2317 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 2318 memcpy(ret, ptr, copysize); 2319 iqalloct(ptr, try_tcache_dalloc); 2320 return (ret); 2321 } 2322 2323 dss_prec_t 2324 arena_dss_prec_get(arena_t *arena) 2325 { 2326 dss_prec_t ret; 2327 2328 malloc_mutex_lock(&arena->lock); 2329 ret = arena->dss_prec; 2330 malloc_mutex_unlock(&arena->lock); 2331 return (ret); 2332 } 2333 2334 bool 2335 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) 2336 { 2337 2338 if (have_dss == false) 2339 return (dss_prec != dss_prec_disabled); 2340 malloc_mutex_lock(&arena->lock); 2341 arena->dss_prec = dss_prec; 2342 malloc_mutex_unlock(&arena->lock); 2343 return (false); 2344 } 2345 2346 void 2347 arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, 2348 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, 2349 malloc_large_stats_t *lstats) 2350 { 2351 unsigned i; 2352 2353 malloc_mutex_lock(&arena->lock); 2354 *dss = dss_prec_names[arena->dss_prec]; 2355 *nactive += arena->nactive; 2356 *ndirty += arena->ndirty; 2357 2358 astats->mapped += arena->stats.mapped; 2359 astats->npurge += arena->stats.npurge; 2360 astats->nmadvise += arena->stats.nmadvise; 2361 astats->purged += arena->stats.purged; 2362 astats->allocated_large += arena->stats.allocated_large; 2363 astats->nmalloc_large += arena->stats.nmalloc_large; 2364 astats->ndalloc_large += arena->stats.ndalloc_large; 2365 astats->nrequests_large += arena->stats.nrequests_large; 2366 astats->allocated_huge += arena->stats.allocated_huge; 2367 astats->nmalloc_huge += arena->stats.nmalloc_huge; 2368 astats->ndalloc_huge += arena->stats.ndalloc_huge; 2369 astats->nrequests_huge += arena->stats.nrequests_huge; 2370 2371 for (i = 0; i < nlclasses; i++) { 2372 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; 2373 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; 2374 lstats[i].nrequests += arena->stats.lstats[i].nrequests; 2375 lstats[i].curruns += arena->stats.lstats[i].curruns; 2376 } 2377 malloc_mutex_unlock(&arena->lock); 2378 2379 for (i = 0; i < NBINS; i++) { 2380 arena_bin_t *bin = &arena->bins[i]; 2381 2382 malloc_mutex_lock(&bin->lock); 2383 bstats[i].allocated += bin->stats.allocated; 2384 bstats[i].nmalloc += bin->stats.nmalloc; 2385 bstats[i].ndalloc += bin->stats.ndalloc; 2386 bstats[i].nrequests += bin->stats.nrequests; 2387 if (config_tcache) { 2388 bstats[i].nfills += bin->stats.nfills; 2389 bstats[i].nflushes += bin->stats.nflushes; 2390 } 2391 bstats[i].nruns += bin->stats.nruns; 2392 bstats[i].reruns += bin->stats.reruns; 2393 bstats[i].curruns += bin->stats.curruns; 2394 malloc_mutex_unlock(&bin->lock); 2395 } 2396 } 2397 2398 bool 2399 arena_new(arena_t *arena, unsigned ind) 2400 { 2401 unsigned i; 2402 arena_bin_t *bin; 2403 2404 arena->ind = ind; 2405 arena->nthreads = 0; 2406 arena->chunk_alloc = chunk_alloc_default; 2407 arena->chunk_dalloc = chunk_dalloc_default; 2408 2409 if (malloc_mutex_init(&arena->lock)) 2410 return (true); 2411 2412 if (config_stats) { 2413 memset(&arena->stats, 0, sizeof(arena_stats_t)); 2414 arena->stats.lstats = 2415 (malloc_large_stats_t *)base_alloc(nlclasses * 2416 sizeof(malloc_large_stats_t)); 2417 if (arena->stats.lstats == NULL) 2418 return (true); 2419 memset(arena->stats.lstats, 0, nlclasses * 2420 sizeof(malloc_large_stats_t)); 2421 if (config_tcache) 2422 ql_new(&arena->tcache_ql); 2423 } 2424 2425 if (config_prof) 2426 arena->prof_accumbytes = 0; 2427 2428 arena->dss_prec = chunk_dss_prec_get(); 2429 2430 /* Initialize chunks. */ 2431 arena_chunk_dirty_new(&arena->chunks_dirty); 2432 arena->spare = NULL; 2433 2434 arena->nactive = 0; 2435 arena->ndirty = 0; 2436 arena->npurgatory = 0; 2437 2438 arena_avail_tree_new(&arena->runs_avail); 2439 2440 /* Initialize bins. */ 2441 for (i = 0; i < NBINS; i++) { 2442 bin = &arena->bins[i]; 2443 if (malloc_mutex_init(&bin->lock)) 2444 return (true); 2445 bin->runcur = NULL; 2446 arena_run_tree_new(&bin->runs); 2447 if (config_stats) 2448 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 2449 } 2450 2451 return (false); 2452 } 2453 2454 /* 2455 * Calculate bin_info->run_size such that it meets the following constraints: 2456 * 2457 * *) bin_info->run_size >= min_run_size 2458 * *) bin_info->run_size <= arena_maxclass 2459 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). 2460 * *) bin_info->nregs <= RUN_MAXREGS 2461 * 2462 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also 2463 * calculated here, since these settings are all interdependent. 2464 */ 2465 static size_t 2466 bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) 2467 { 2468 size_t pad_size; 2469 size_t try_run_size, good_run_size; 2470 uint32_t try_nregs, good_nregs; 2471 uint32_t try_hdr_size, good_hdr_size; 2472 uint32_t try_bitmap_offset, good_bitmap_offset; 2473 uint32_t try_redzone0_offset, good_redzone0_offset; 2474 2475 assert(min_run_size >= PAGE); 2476 assert(min_run_size <= arena_maxclass); 2477 2478 /* 2479 * Determine redzone size based on minimum alignment and minimum 2480 * redzone size. Add padding to the end of the run if it is needed to 2481 * align the regions. The padding allows each redzone to be half the 2482 * minimum alignment; without the padding, each redzone would have to 2483 * be twice as large in order to maintain alignment. 2484 */ 2485 if (config_fill && opt_redzone) { 2486 size_t align_min = ZU(1) << (jemalloc_ffs(bin_info->reg_size) - 1); 2487 if (align_min <= REDZONE_MINSIZE) { 2488 bin_info->redzone_size = REDZONE_MINSIZE; 2489 pad_size = 0; 2490 } else { 2491 bin_info->redzone_size = align_min >> 1; 2492 pad_size = bin_info->redzone_size; 2493 } 2494 } else { 2495 bin_info->redzone_size = 0; 2496 pad_size = 0; 2497 } 2498 bin_info->reg_interval = bin_info->reg_size + 2499 (bin_info->redzone_size << 1); 2500 2501 /* 2502 * Calculate known-valid settings before entering the run_size 2503 * expansion loop, so that the first part of the loop always copies 2504 * valid settings. 2505 * 2506 * The do..while loop iteratively reduces the number of regions until 2507 * the run header and the regions no longer overlap. A closed formula 2508 * would be quite messy, since there is an interdependency between the 2509 * header's mask length and the number of regions. 2510 */ 2511 try_run_size = min_run_size; 2512 try_nregs = ((try_run_size - sizeof(arena_run_t)) / 2513 bin_info->reg_interval) 2514 + 1; /* Counter-act try_nregs-- in loop. */ 2515 if (try_nregs > RUN_MAXREGS) { 2516 try_nregs = RUN_MAXREGS 2517 + 1; /* Counter-act try_nregs-- in loop. */ 2518 } 2519 do { 2520 try_nregs--; 2521 try_hdr_size = sizeof(arena_run_t); 2522 /* Pad to a long boundary. */ 2523 try_hdr_size = LONG_CEILING(try_hdr_size); 2524 try_bitmap_offset = try_hdr_size; 2525 /* Add space for bitmap. */ 2526 try_hdr_size += bitmap_size(try_nregs); 2527 try_redzone0_offset = try_run_size - (try_nregs * 2528 bin_info->reg_interval) - pad_size; 2529 } while (try_hdr_size > try_redzone0_offset); 2530 2531 /* run_size expansion loop. */ 2532 do { 2533 /* 2534 * Copy valid settings before trying more aggressive settings. 2535 */ 2536 good_run_size = try_run_size; 2537 good_nregs = try_nregs; 2538 good_hdr_size = try_hdr_size; 2539 good_bitmap_offset = try_bitmap_offset; 2540 good_redzone0_offset = try_redzone0_offset; 2541 2542 /* Try more aggressive settings. */ 2543 try_run_size += PAGE; 2544 try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) / 2545 bin_info->reg_interval) 2546 + 1; /* Counter-act try_nregs-- in loop. */ 2547 if (try_nregs > RUN_MAXREGS) { 2548 try_nregs = RUN_MAXREGS 2549 + 1; /* Counter-act try_nregs-- in loop. */ 2550 } 2551 do { 2552 try_nregs--; 2553 try_hdr_size = sizeof(arena_run_t); 2554 /* Pad to a long boundary. */ 2555 try_hdr_size = LONG_CEILING(try_hdr_size); 2556 try_bitmap_offset = try_hdr_size; 2557 /* Add space for bitmap. */ 2558 try_hdr_size += bitmap_size(try_nregs); 2559 try_redzone0_offset = try_run_size - (try_nregs * 2560 bin_info->reg_interval) - pad_size; 2561 } while (try_hdr_size > try_redzone0_offset); 2562 } while (try_run_size <= arena_maxclass 2563 && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) > 2564 RUN_MAX_OVRHD_RELAX 2565 && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size 2566 && try_nregs < RUN_MAXREGS); 2567 2568 assert(good_hdr_size <= good_redzone0_offset); 2569 2570 /* Copy final settings. */ 2571 bin_info->run_size = good_run_size; 2572 bin_info->nregs = good_nregs; 2573 bin_info->bitmap_offset = good_bitmap_offset; 2574 bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size; 2575 2576 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 2577 * bin_info->reg_interval) + pad_size == bin_info->run_size); 2578 2579 return (good_run_size); 2580 } 2581 2582 static void 2583 bin_info_init(void) 2584 { 2585 arena_bin_info_t *bin_info; 2586 size_t prev_run_size = PAGE; 2587 2588 #define BIN_INFO_INIT_bin_yes(index, size) \ 2589 bin_info = &arena_bin_info[index]; \ 2590 bin_info->reg_size = size; \ 2591 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\ 2592 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); 2593 #define BIN_INFO_INIT_bin_no(index, size) 2594 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 2595 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 2596 SIZE_CLASSES 2597 #undef BIN_INFO_INIT_bin_yes 2598 #undef BIN_INFO_INIT_bin_no 2599 #undef SC 2600 } 2601 2602 void 2603 arena_boot(void) 2604 { 2605 size_t header_size; 2606 unsigned i; 2607 2608 /* 2609 * Compute the header size such that it is large enough to contain the 2610 * page map. The page map is biased to omit entries for the header 2611 * itself, so some iteration is necessary to compute the map bias. 2612 * 2613 * 1) Compute safe header_size and map_bias values that include enough 2614 * space for an unbiased page map. 2615 * 2) Refine map_bias based on (1) to omit the header pages in the page 2616 * map. The resulting map_bias may be one too small. 2617 * 3) Refine map_bias based on (2). The result will be >= the result 2618 * from (2), and will always be correct. 2619 */ 2620 map_bias = 0; 2621 for (i = 0; i < 3; i++) { 2622 header_size = offsetof(arena_chunk_t, map) + 2623 (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias)); 2624 map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK) 2625 != 0); 2626 } 2627 assert(map_bias > 0); 2628 2629 arena_maxclass = chunksize - (map_bias << LG_PAGE); 2630 2631 bin_info_init(); 2632 } 2633 2634 void 2635 arena_prefork(arena_t *arena) 2636 { 2637 unsigned i; 2638 2639 malloc_mutex_prefork(&arena->lock); 2640 for (i = 0; i < NBINS; i++) 2641 malloc_mutex_prefork(&arena->bins[i].lock); 2642 } 2643 2644 void 2645 arena_postfork_parent(arena_t *arena) 2646 { 2647 unsigned i; 2648 2649 for (i = 0; i < NBINS; i++) 2650 malloc_mutex_postfork_parent(&arena->bins[i].lock); 2651 malloc_mutex_postfork_parent(&arena->lock); 2652 } 2653 2654 void 2655 arena_postfork_child(arena_t *arena) 2656 { 2657 unsigned i; 2658 2659 for (i = 0; i < NBINS; i++) 2660 malloc_mutex_postfork_child(&arena->bins[i].lock); 2661 malloc_mutex_postfork_child(&arena->lock); 2662 } 2663