Lines Matching full:arena
52 static void chunk_record(tsdn_t *tsdn, arena_t *arena,
60 chunk_hooks_get_locked(arena_t *arena)
63 return (arena->chunk_hooks);
67 chunk_hooks_get(tsdn_t *tsdn, arena_t *arena)
71 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
72 chunk_hooks = chunk_hooks_get_locked(arena);
73 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
79 chunk_hooks_set(tsdn_t *tsdn, arena_t *arena, const chunk_hooks_t *chunk_hooks)
83 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
84 old_chunk_hooks = arena->chunk_hooks;
89 * entirety of arena->chunk_hooks), and stale reads do not affect
97 u.n = &arena->chunk_hooks.n; \
108 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
114 chunk_hooks_assure_initialized_impl(tsdn_t *tsdn, arena_t *arena,
122 *chunk_hooks = locked ? chunk_hooks_get_locked(arena) :
123 chunk_hooks_get(tsdn, arena);
128 chunk_hooks_assure_initialized_locked(tsdn_t *tsdn, arena_t *arena,
132 chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, true);
136 chunk_hooks_assure_initialized(tsdn_t *tsdn, arena_t *arena,
140 chunk_hooks_assure_initialized_impl(tsdn, arena, chunk_hooks, false);
190 chunk_first_best_fit(arena_t *arena, extent_tree_t *chunks_szsnad, size_t size)
196 extent_node_init(&key, arena, NULL, size, 0, false, false);
201 chunk_recycle(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
226 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
227 chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
230 extent_node_init(&key, arena, new_addr, alloc_size, 0, false,
234 node = chunk_first_best_fit(arena, chunks_szsnad, alloc_size);
238 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
257 extent_node_size_get(node), leadsize, size, false, arena->ind)) {
258 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
264 arena_chunk_cache_maybe_remove(arena, node, cache);
270 arena_chunk_cache_maybe_insert(arena, node, cache);
276 trailsize, false, arena->ind)) {
278 arena_node_dalloc(tsdn, arena, node);
279 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
280 chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad,
287 node = arena_node_alloc(tsdn, arena);
289 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
290 chunk_record(tsdn, arena, chunk_hooks,
296 extent_node_init(node, arena, (void *)((uintptr_t)(ret) + size),
300 arena_chunk_cache_maybe_insert(arena, node, cache);
303 if (!committed && chunk_hooks->commit(ret, size, 0, size, arena->ind)) {
304 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
305 chunk_record(tsdn, arena, chunk_hooks, chunks_szsnad, chunks_ad,
309 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
313 arena_node_dalloc(tsdn, arena, node);
337 chunk_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
349 chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
358 chunk_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
389 chunk_alloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
400 ret = chunk_recycle(tsdn, arena, chunk_hooks,
401 &arena->chunks_szsnad_cached, &arena->chunks_ad_cached, true,
413 arena_t *arena;
415 arena = arena_get(tsdn, arena_ind, false);
417 * The arena we're allocating on behalf of must have been initialized
420 assert(arena != NULL);
421 return (arena);
425 chunk_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
430 ret = chunk_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
431 commit, arena->dss_prec);
445 arena_t *arena;
448 arena = chunk_arena_get(tsdn, arena_ind);
450 return (chunk_alloc_default_impl(tsdn, arena, new_addr, size, alignment,
455 chunk_alloc_retained(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
466 ret = chunk_recycle(tsdn, arena, chunk_hooks,
467 &arena->chunks_szsnad_retained, &arena->chunks_ad_retained, false,
471 arena->stats.retained -= size;
477 chunk_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
483 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
485 ret = chunk_alloc_retained(tsdn, arena, chunk_hooks, new_addr, size,
490 ret = chunk_alloc_default_impl(tsdn, arena, new_addr,
494 zero, commit, arena->ind);
500 *sn = arena_extent_sn_next(arena);
511 chunk_record(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
523 malloc_mutex_lock(tsdn, &arena->chunks_mtx);
524 chunk_hooks_assure_initialized_locked(tsdn, arena, chunk_hooks);
525 extent_node_init(&key, arena, (void *)((uintptr_t)chunk + size), 0, 0,
533 arena->ind)) {
540 arena_chunk_cache_maybe_remove(arena, node, cache);
548 arena, node, cache);
551 node = arena_node_alloc(tsdn, arena);
560 chunk_purge_wrapper(tsdn, arena, chunk_hooks,
565 extent_node_init(node, arena, chunk, size, sn, !unzeroed,
569 arena_chunk_cache_maybe_insert(arena, node, cache);
578 extent_node_size_get(prev), chunk, size, false, arena->ind)) {
586 arena_chunk_cache_maybe_remove(arena, prev, cache);
588 arena_chunk_cache_maybe_remove(arena, node, cache);
597 arena_chunk_cache_maybe_insert(arena, node, cache);
599 arena_node_dalloc(tsdn, arena, prev);
603 malloc_mutex_unlock(tsdn, &arena->chunks_mtx);
607 chunk_dalloc_cache(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
616 chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_cached,
617 &arena->chunks_ad_cached, true, chunk, size, sn, false,
619 arena_maybe_purge(tsdn, arena);
640 chunk_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
650 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
656 err = chunk_hooks->dalloc(chunk, size, committed, arena->ind);
663 arena->ind);
666 arena->ind);
667 chunk_record(tsdn, arena, chunk_hooks, &arena->chunks_szsnad_retained,
668 &arena->chunks_ad_retained, false, chunk, size, sn, zeroed,
672 arena->stats.retained += size;
709 chunk_purge_wrapper(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
713 chunk_hooks_assure_initialized(tsdn, arena, chunk_hooks);
714 return (chunk_hooks->purge(chunk, size, offset, length, arena->ind));