Home | History | Annotate | Download | only in src
      1 #define	JEMALLOC_HUGE_C_
      2 #include "jemalloc/internal/jemalloc_internal.h"
      3 
      4 /******************************************************************************/
      5 /* Data. */
      6 
      7 /* Protects chunk-related data structures. */
      8 static malloc_mutex_t	huge_mtx;
      9 
     10 /******************************************************************************/
     11 
     12 /* Tree of chunks that are stand-alone huge allocations. */
     13 static extent_tree_t	huge;
     14 
     15 void *
     16 huge_malloc(arena_t *arena, size_t size, bool zero)
     17 {
     18 
     19 	return (huge_palloc(arena, size, chunksize, zero));
     20 }
     21 
     22 void *
     23 huge_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
     24 {
     25 	void *ret;
     26 	size_t csize;
     27 	extent_node_t *node;
     28 	bool is_zeroed;
     29 
     30 	/* Allocate one or more contiguous chunks for this request. */
     31 
     32 	csize = CHUNK_CEILING(size);
     33 	if (csize == 0) {
     34 		/* size is large enough to cause size_t wrap-around. */
     35 		return (NULL);
     36 	}
     37 
     38 	/* Allocate an extent node with which to track the chunk. */
     39 	node = base_node_alloc();
     40 	if (node == NULL)
     41 		return (NULL);
     42 
     43 	/*
     44 	 * Copy zero into is_zeroed and pass the copy to chunk_alloc(), so that
     45 	 * it is possible to make correct junk/zero fill decisions below.
     46 	 */
     47 	is_zeroed = zero;
     48 	arena = choose_arena(arena);
     49 	ret = arena_chunk_alloc_huge(arena, csize, alignment, &is_zeroed);
     50 	if (ret == NULL) {
     51 		base_node_dalloc(node);
     52 		return (NULL);
     53 	}
     54 
     55 	/* Insert node into huge. */
     56 	node->addr = ret;
     57 	node->size = csize;
     58 	node->arena = arena;
     59 
     60 	malloc_mutex_lock(&huge_mtx);
     61 	extent_tree_ad_insert(&huge, node);
     62 	malloc_mutex_unlock(&huge_mtx);
     63 
     64 	if (config_fill && zero == false) {
     65 		if (opt_junk)
     66 			memset(ret, 0xa5, csize);
     67 		else if (opt_zero && is_zeroed == false)
     68 			memset(ret, 0, csize);
     69 	}
     70 
     71 	return (ret);
     72 }
     73 
     74 bool
     75 huge_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra)
     76 {
     77 
     78 	/*
     79 	 * Avoid moving the allocation if the size class can be left the same.
     80 	 */
     81 	if (oldsize > arena_maxclass
     82 	    && CHUNK_CEILING(oldsize) >= CHUNK_CEILING(size)
     83 	    && CHUNK_CEILING(oldsize) <= CHUNK_CEILING(size+extra)) {
     84 		assert(CHUNK_CEILING(oldsize) == oldsize);
     85 		return (false);
     86 	}
     87 
     88 	/* Reallocation would require a move. */
     89 	return (true);
     90 }
     91 
     92 void *
     93 huge_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
     94     size_t extra, size_t alignment, bool zero, bool try_tcache_dalloc)
     95 {
     96 	void *ret;
     97 	size_t copysize;
     98 
     99 	/* Try to avoid moving the allocation. */
    100 	if (huge_ralloc_no_move(ptr, oldsize, size, extra) == false)
    101 		return (ptr);
    102 
    103 	/*
    104 	 * size and oldsize are different enough that we need to use a
    105 	 * different size class.  In that case, fall back to allocating new
    106 	 * space and copying.
    107 	 */
    108 	if (alignment > chunksize)
    109 		ret = huge_palloc(arena, size + extra, alignment, zero);
    110 	else
    111 		ret = huge_malloc(arena, size + extra, zero);
    112 
    113 	if (ret == NULL) {
    114 		if (extra == 0)
    115 			return (NULL);
    116 		/* Try again, this time without extra. */
    117 		if (alignment > chunksize)
    118 			ret = huge_palloc(arena, size, alignment, zero);
    119 		else
    120 			ret = huge_malloc(arena, size, zero);
    121 
    122 		if (ret == NULL)
    123 			return (NULL);
    124 	}
    125 
    126 	/*
    127 	 * Copy at most size bytes (not size+extra), since the caller has no
    128 	 * expectation that the extra bytes will be reliably preserved.
    129 	 */
    130 	copysize = (size < oldsize) ? size : oldsize;
    131 	memcpy(ret, ptr, copysize);
    132 	iqalloct(ptr, try_tcache_dalloc);
    133 	return (ret);
    134 }
    135 
    136 #ifdef JEMALLOC_JET
    137 #undef huge_dalloc_junk
    138 #define	huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk_impl)
    139 #endif
    140 static void
    141 huge_dalloc_junk(void *ptr, size_t usize)
    142 {
    143 
    144 	if (config_fill && have_dss && opt_junk) {
    145 		/*
    146 		 * Only bother junk filling if the chunk isn't about to be
    147 		 * unmapped.
    148 		 */
    149 		if (config_munmap == false || (have_dss && chunk_in_dss(ptr)))
    150 			memset(ptr, 0x5a, usize);
    151 	}
    152 }
    153 #ifdef JEMALLOC_JET
    154 #undef huge_dalloc_junk
    155 #define	huge_dalloc_junk JEMALLOC_N(huge_dalloc_junk)
    156 huge_dalloc_junk_t *huge_dalloc_junk = JEMALLOC_N(huge_dalloc_junk_impl);
    157 #endif
    158 
    159 void
    160 huge_dalloc(void *ptr)
    161 {
    162 	extent_node_t *node, key;
    163 
    164 	malloc_mutex_lock(&huge_mtx);
    165 
    166 	/* Extract from tree of huge allocations. */
    167 	key.addr = ptr;
    168 	node = extent_tree_ad_search(&huge, &key);
    169 	assert(node != NULL);
    170 	assert(node->addr == ptr);
    171 	extent_tree_ad_remove(&huge, node);
    172 
    173 	malloc_mutex_unlock(&huge_mtx);
    174 
    175 	huge_dalloc_junk(node->addr, node->size);
    176 	arena_chunk_dalloc_huge(node->arena, node->addr, node->size);
    177 	base_node_dalloc(node);
    178 }
    179 
    180 size_t
    181 huge_salloc(const void *ptr)
    182 {
    183 	size_t ret;
    184 	extent_node_t *node, key;
    185 
    186 	malloc_mutex_lock(&huge_mtx);
    187 
    188 	/* Extract from tree of huge allocations. */
    189 	key.addr = __DECONST(void *, ptr);
    190 	node = extent_tree_ad_search(&huge, &key);
    191 	assert(node != NULL);
    192 
    193 	ret = node->size;
    194 
    195 	malloc_mutex_unlock(&huge_mtx);
    196 
    197 	return (ret);
    198 }
    199 
    200 prof_ctx_t *
    201 huge_prof_ctx_get(const void *ptr)
    202 {
    203 	prof_ctx_t *ret;
    204 	extent_node_t *node, key;
    205 
    206 	malloc_mutex_lock(&huge_mtx);
    207 
    208 	/* Extract from tree of huge allocations. */
    209 	key.addr = __DECONST(void *, ptr);
    210 	node = extent_tree_ad_search(&huge, &key);
    211 	assert(node != NULL);
    212 
    213 	ret = node->prof_ctx;
    214 
    215 	malloc_mutex_unlock(&huge_mtx);
    216 
    217 	return (ret);
    218 }
    219 
    220 void
    221 huge_prof_ctx_set(const void *ptr, prof_ctx_t *ctx)
    222 {
    223 	extent_node_t *node, key;
    224 
    225 	malloc_mutex_lock(&huge_mtx);
    226 
    227 	/* Extract from tree of huge allocations. */
    228 	key.addr = __DECONST(void *, ptr);
    229 	node = extent_tree_ad_search(&huge, &key);
    230 	assert(node != NULL);
    231 
    232 	node->prof_ctx = ctx;
    233 
    234 	malloc_mutex_unlock(&huge_mtx);
    235 }
    236 
    237 bool
    238 huge_boot(void)
    239 {
    240 
    241 	/* Initialize chunks data. */
    242 	if (malloc_mutex_init(&huge_mtx))
    243 		return (true);
    244 	extent_tree_ad_new(&huge);
    245 
    246 	return (false);
    247 }
    248 
    249 void
    250 huge_prefork(void)
    251 {
    252 
    253 	malloc_mutex_prefork(&huge_mtx);
    254 }
    255 
    256 void
    257 huge_postfork_parent(void)
    258 {
    259 
    260 	malloc_mutex_postfork_parent(&huge_mtx);
    261 }
    262 
    263 void
    264 huge_postfork_child(void)
    265 {
    266 
    267 	malloc_mutex_postfork_child(&huge_mtx);
    268 }
    269