Home | History | Annotate | Download | only in chromium

Lines Matching refs:arena

91  *           in the associated arena chunk header maps.
116 * Use only one arena by default. Mozilla does not currently make extensive
169 * MALLOC_BALANCE enables monitoring of arena lock contention and dynamically
170 * re-balances arena load if exponentially averaged contention exceeds a
497 /* Maximum number of dirty pages per arena. */
504 * to the mandatory one chunk per arena.
704 /* Number of times this arena reassigned a thread due to contention. */
800 * Arena data structures.
872 /* Arena chunk header. */
875 /* Arena that owns the chunk. */
876 arena_t *arena;
878 /* Linkage for the arena's chunks_dirty tree. */
952 /* All operations on this arena require that lock be locked. */
969 /* Tree of dirty-page-containing chunks this arena manages. */
973 * In order to avoid rapid chunk allocation/deallocation when an arena
975 * recently freed chunk. The spare is left in the arena's chunk trees
978 * There is one spare chunk per arena, rather than one spare total, in
988 * memory is mapped for each arena.
993 * Size/address-ordered tree of this arena's available runs. This tree
1000 * The arena load balancing machinery needs to keep track of how much
1173 * Map of pthread_self() --> arenas[???], used for selecting an arena to use
1275 static void stats_print(arena_t *arena);
1292 static void arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
1294 static void arena_chunk_init(arena_t *arena, arena_chunk_t *chunk);
1295 static void arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
1296 static arena_run_t *arena_run_alloc(arena_t *arena, arena_bin_t *bin,
1298 static void arena_purge(arena_t *arena);
1299 static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
1300 static void arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
1302 static void arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
1304 static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
1305 static void *arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
1308 static void arena_lock_balance_hard(arena_t *arena);
1310 static void *arena_malloc_large(arena_t *arena, size_t size, bool zero);
1311 static void *arena_palloc(arena_t *arena, size_t alignment, size_t size,
1314 static void arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk,
1316 static void arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
1318 static bool arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
1322 static bool arena_new(arena_t *arena);
1731 /* Define the PRNG used for arena assignment. */
1997 stats_print(arena_t *arena)
2004 arena->ndirty, arena->ndirty == 1 ? "" : "s",
2005 arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s",
2006 arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s",
2007 arena->stats.purged, arena->stats.purged == 1 ? "" : "s");
2011 arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s",
2012 arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s",
2013 arena->stats.decommitted,
2014 (arena->stats.decommitted == 1) ? "" : "s");
2019 arena->stats.allocated_small, arena->stats.nmalloc_small,
2020 arena->stats.ndalloc_small);
2022 arena->stats.allocated_large, arena->stats.nmalloc_large,
2023 arena->stats.ndalloc_large);
2025 arena->stats.allocated_small + arena->stats.allocated_large,
2026 arena->stats.nmalloc_small + arena->stats.nmalloc_large,
2027 arena->stats.ndalloc_small + arena->stats.ndalloc_large);
2028 malloc_printf("mapped: %12Iu\n", arena->stats.mapped);
2032 arena->ndirty, arena->ndirty == 1 ? "" : "s",
2033 arena->stats.npurge, arena->stats.npurge == 1 ? "" : "s",
2034 arena->stats.nmadvise, arena->stats.nmadvise == 1 ? "" : "s",
2035 arena->stats.purged, arena->stats.purged == 1 ? "" : "s");
2039 arena->stats.ndecommit, (arena->stats.ndecommit == 1) ? "" : "s",
2040 arena->stats.ncommit, (arena->stats.ncommit == 1) ? "" : "s",
2041 arena->stats.decommitted,
2042 (arena->stats.decommitted == 1) ? "" : "s");
2047 arena->stats.allocated_small, arena->stats.nmalloc_small,
2048 arena->stats.ndalloc_small);
2050 arena->stats.allocated_large, arena->stats.nmalloc_large,
2051 arena->stats.ndalloc_large);
2053 arena->stats.allocated_small + arena->stats.allocated_large,
2054 arena->stats.nmalloc_small + arena->stats.nmalloc_large,
2055 arena->stats.ndalloc_small + arena->stats.ndalloc_large);
2056 malloc_printf("mapped: %12zu\n", arena->stats.mapped);
2061 if (arena->bins[i].stats.nrequests == 0) {
2086 arena->bins[i].reg_size,
2087 arena->bins[i].nregs,
2088 arena->bins[i].run_size >> pagesize_2pow,
2089 arena->bins[i].stats.nrequests,
2090 arena->bins[i].stats.nruns,
2091 arena->bins[i].stats.reruns,
2092 arena->bins[i].stats.highruns,
2093 arena->bins[i].stats.curruns);
2882 * Begin arena.
2886 * Choose an arena based on a per-thread value (fast-path code, calls slow-path
2965 * Choose an arena based on a per-thread value (slow-path code only, called
2976 /* Seed the PRNG used for arena load balancing. */
3239 arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
3256 arena_avail_tree_remove(&arena->runs_avail, &chunk->map[run_ind]);
3266 arena_avail_tree_insert(&arena->runs_avail,
3295 arena->stats.ncommit++;
3319 arena->ndirty--;
3343 arena_chunk_tree_dirty_remove(&arena->chunks_dirty, chunk);
3347 arena_chunk_init(arena_t *arena, arena_chunk_t *chunk)
3355 arena->stats.mapped += chunksize;
3358 chunk->arena = arena;
3395 arena->stats.ndecommit++;
3396 arena->stats.decommitted += (chunk_npages - arena_chunk_header_npages);
3401 arena_avail_tree_insert(&arena->runs_avail,
3406 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
3409 if (arena->spare != NULL) {
3410 if (arena->spare->ndirty > 0) {
3412 &chunk->arena->chunks_dirty, arena->spare);
3413 arena->ndirty -= arena->spare->ndirty;
3415 VALGRIND_FREELIKE_BLOCK(arena->spare, 0);
3416 chunk_dealloc((void *)arena->spare, chunksize);
3418 arena->stats.mapped -= chunksize;
3424 * will be cached, so that the arena does not use it. Dirty page
3428 arena_avail_tree_remove(&arena->runs_avail,
3431 arena->spare = chunk;
3435 arena_run_alloc(arena_t *arena, arena_bin_t *bin, size_t size, bool large,
3447 /* Search the arena's chunks for the lowest best fit. */
3449 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
3460 arena_run_split(arena, run, size, large, zero);
3464 if (arena->spare != NULL) {
3466 chunk = arena->spare;
3467 arena->spare = NULL;
3471 arena_avail_tree_insert(&arena->runs_avail,
3473 arena_run_split(arena, run, size, large, zero);
3488 arena->chunk_seq++;
3489 chunk_seq = arena->chunk_seq;
3492 * Drop the arena lock while allocating a chunk, since
3497 malloc_mutex_unlock(&arena->lock);
3500 malloc_mutex_lock(&arena->lock);
3517 if (chunk_seq != arena->chunk_seq)
3529 arena_chunk_init(arena, chunk);
3533 arena_run_split(arena, run, size, large, zero);
3539 arena_purge(arena_t *arena)
3545 rb_foreach_begin(arena_chunk_t, link_dirty, &arena->chunks_dirty,
3548 } rb_foreach_end(arena_chunk_t, link_dirty, &arena->chunks_dirty, chunk)
3549 assert(ndirty == arena->ndirty);
3551 assert(arena->ndirty > opt_dirty_max);
3554 arena->stats.npurge++;
3563 while (arena->ndirty > (opt_dirty_max >> 1)) {
3564 chunk = arena_chunk_tree_dirty_last(&arena->chunks_dirty);
3596 arena->ndirty -= npages;
3603 arena->stats.ndecommit++;
3604 arena->stats.decommitted += npages;
3612 arena->stats.nmadvise++;
3613 arena->stats.purged += npages;
3615 if (arena->ndirty <= (opt_dirty_max >> 1))
3621 arena_chunk_tree_dirty_remove(&arena->chunks_dirty,
3628 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
3655 arena_chunk_tree_dirty_insert(&arena->chunks_dirty,
3659 arena->ndirty += run_pages;
3683 arena_avail_tree_remove(&arena->runs_avail,
3708 arena_avail_tree_remove(&arena->runs_avail,
3723 arena_avail_tree_insert(&arena->runs_avail, &chunk->map[run_ind]);
3728 arena_chunk_dealloc(arena, chunk);
3731 if (arena->ndirty > opt_dirty_max)
3732 arena_purge(arena);
3736 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
3753 arena_run_dalloc(arena, run, false);
3757 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
3774 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
3779 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
3799 run = arena_run_alloc(arena, bin, bin->run_size, false, false);
3844 arena_bin_malloc_easy(arena_t *arena, arena_bin_t *bin, arena_run_t *run)
3860 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
3863 bin->runcur = arena_bin_nonfull_run_get(arena, bin);
3869 return (arena_bin_malloc_easy(arena, bin, bin->runcur));
3957 arena_lock_balance(arena_t *arena)
3961 contention = malloc_spin_lock(&arena->lock);
3965 * arena. Due to integer math always rounding down, this value
3968 arena->contention = (((uint64_t)arena->contention
3971 if (arena->contention >= opt_balance_threshold)
3972 arena_lock_balance_hard(arena);
3977 arena_lock_balance_hard(arena_t *arena)
3981 arena->contention = 0;
3983 arena->stats.nbalance++;
4013 arena_malloc_small(arena_t *arena, size_t size, bool zero)
4022 bin = &arena->bins[ffs((int)(size >> (TINY_MIN_2POW +
4036 bin = &arena->bins[ntbins + (size >> opt_quantum_2pow)
4041 bin = &arena->bins[ntbins + nqbins
4047 arena_lock_balance(arena);
4049 malloc_spin_lock(&arena->lock);
4052 ret = arena_bin_malloc_easy(arena, bin, run);
4054 ret = arena_bin_malloc_hard(arena, bin);
4057 malloc_spin_unlock(&arena->lock);
4063 arena->stats.nmalloc_small++;
4064 arena->stats.allocated_small += size;
4066 malloc_spin_unlock(&arena->lock);
4083 arena_malloc_large(arena_t *arena, size_t size, bool zero)
4090 arena_lock_balance(arena);
4092 malloc_spin_lock(&arena->lock);
4094 ret = (void *)arena_run_alloc(arena, NULL, size, true, zero);
4096 malloc_spin_unlock(&arena->lock);
4100 arena->stats.nmalloc_large++;
4101 arena->stats.allocated_large += size;
4103 malloc_spin_unlock(&arena->lock);
4119 arena_malloc(arena_t *arena, size_t size, bool zero)
4122 assert(arena != NULL);
4123 assert(arena->magic == ARENA_MAGIC);
4128 return (arena_malloc_small(arena, size, zero));
4130 return (arena_malloc_large(arena, size, zero));
4157 arena_palloc(arena_t *arena, size_t alignment, size_t size, size_t alloc_size)
4167 arena_lock_balance(arena);
4169 malloc_spin_lock(&arena->lock);
4171 ret = (void *)arena_run_alloc(arena, NULL, alloc_size, true, false);
4173 malloc_spin_unlock(&arena->lock);
4183 arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, alloc_size, size, false);
4189 arena_run_trim_head(arena, chunk, (arena_run_t*)ret, alloc_size,
4198 arena_run_trim_tail(arena, chunk, (arena_run_t*)ret, size + trailsize,
4204 arena->stats.nmalloc_large++;
4205 arena->stats.allocated_large += size;
4207 malloc_spin_unlock(&arena->lock);
4363 assert(chunk->arena->magic == ARENA_MAGIC);
4395 assert(chunk->arena->magic == ARENA_MAGIC);
4419 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
4461 arena_run_dalloc(arena, run, true);
4502 arena->stats.allocated_small -= size;
4503 arena->stats.ndalloc_small++;
4508 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
4511 malloc_spin_lock(&arena->lock);
4530 arena->stats.allocated_large -= size;
4534 arena->stats.ndalloc_large++;
4537 arena_run_dalloc(arena, (arena_run_t *)ptr, true);
4538 malloc_spin_unlock(&arena->lock);
4542 arena_dalloc(arena_t *arena, arena_chunk_t *chunk, void *ptr)
4547 assert(arena != NULL);
4548 assert(arena->magic == ARENA_MAGIC);
4549 assert(chunk->arena == arena);
4558 malloc_spin_lock(&arena->lock);
4559 arena_dalloc_small(arena, chunk, ptr, mapelm);
4560 malloc_spin_unlock(&arena->lock);
4562 arena_dalloc_large(arena, chunk, ptr);
4575 arena_dalloc(chunk->arena, chunk, ptr);
4581 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
4592 arena_lock_balance(arena);
4594 malloc_spin_lock(&arena->lock);
4596 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
4599 arena->stats.allocated_large -= oldsize - size;
4601 malloc_spin_unlock(&arena->lock);
4605 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
4616 arena_lock_balance(arena);
4618 malloc_spin_lock(&arena->lock);
4628 arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
4638 arena->stats.allocated_large += size - oldsize;
4640 malloc_spin_unlock(&arena->lock);
4643 malloc_spin_unlock(&arena->lock);
4669 arena_t *arena;
4672 arena = chunk->arena;
4673 assert(arena->magic == ARENA_MAGIC);
4683 arena_ralloc_large_shrink(arena, chunk, ptr, psize,
4687 bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
4791 arena_new(arena_t *arena)
4797 if (malloc_spin_init(&arena->lock))
4801 memset(&arena->stats, 0, sizeof(arena_stats_t));
4804 arena->chunk_seq = 0;
4807 arena_chunk_tree_dirty_new(&arena->chunks_dirty);
4808 arena->spare = NULL;
4810 arena->ndirty = 0;
4812 arena_avail_tree_new(&arena->runs_avail);
4815 arena->contention = 0;
4823 bin = &arena->bins[i];
4838 bin = &arena->bins[i];
4854 bin = &arena->bins[i];
4868 arena->magic = ARENA_MAGIC;
4874 /* Create a new arena and insert it into the arenas array at index ind. */
4896 ": (malloc) Error initializing arena\n", "", "");
4904 * End arena.
5396 _malloc_message("Arena balance threshold: ",
5404 _malloc_message("Max dirty pages per arena: ",
5417 arena_t *arena;
5471 malloc_printf("Arena balance reassignments: %llu\n",
5501 /* Print stats for each arena. */
5503 arena = arenas[i];
5504 if (arena != NULL) {
5507 malloc_spin_lock(&arena->lock);
5508 stats_print(arena);
5509 malloc_spin_unlock(&arena->lock);
5983 /* Make sure there is at least one arena. */
6042 * Initialize one arena here. The rest are lazily created in
6054 * Assign the initial arena to the initial thread, in order to avoid
6055 * spurious creation of an extra arena if the application switches to
6513 arena_t *arena = arenas[i];
6514 if (arena != NULL) {
6517 malloc_spin_lock(&arena->lock);
6518 stats->allocated += arena->stats.allocated_small;
6519 stats->allocated += arena->stats.allocated_large;
6522 &arena->chunks_dirty, chunk) {
6531 &arena->chunks_dirty, chunk)
6533 stats->dirty += (arena->ndirty << pagesize_2pow);
6534 malloc_spin_unlock(&arena->lock);