Home | History | Annotate | Download | only in src
      1 #define	JEMALLOC_CHUNK_C_
      2 #include "jemalloc/internal/jemalloc_internal.h"
      3 
      4 /******************************************************************************/
      5 /* Data. */
      6 
      7 const char	*opt_dss = DSS_DEFAULT;
      8 size_t		opt_lg_chunk = LG_CHUNK_DEFAULT;
      9 
     10 malloc_mutex_t	chunks_mtx;
     11 chunk_stats_t	stats_chunks;
     12 
     13 /*
     14  * Trees of chunks that were previously allocated (trees differ only in node
     15  * ordering).  These are used when allocating chunks, in an attempt to re-use
     16  * address space.  Depending on function, different tree orderings are needed,
     17  * which is why there are two trees with the same contents.
     18  */
     19 static extent_tree_t	chunks_szad_mmap;
     20 static extent_tree_t	chunks_ad_mmap;
     21 static extent_tree_t	chunks_szad_dss;
     22 static extent_tree_t	chunks_ad_dss;
     23 
     24 rtree_t		*chunks_rtree;
     25 
     26 /* Various chunk-related settings. */
     27 size_t		chunksize;
     28 size_t		chunksize_mask; /* (chunksize - 1). */
     29 size_t		chunk_npages;
     30 size_t		map_bias;
     31 size_t		arena_maxclass; /* Max size class for arenas. */
     32 
     33 /******************************************************************************/
     34 /*
     35  * Function prototypes for static functions that are referenced prior to
     36  * definition.
     37  */
     38 
     39 static void	chunk_dalloc_core(void *chunk, size_t size);
     40 
     41 /******************************************************************************/
     42 
     43 static void *
     44 chunk_recycle(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, size_t size,
     45     size_t alignment, bool base, bool *zero)
     46 {
     47 	void *ret;
     48 	extent_node_t *node;
     49 	extent_node_t key;
     50 	size_t alloc_size, leadsize, trailsize;
     51 	bool zeroed;
     52 
     53 	if (base) {
     54 		/*
     55 		 * This function may need to call base_node_{,de}alloc(), but
     56 		 * the current chunk allocation request is on behalf of the
     57 		 * base allocator.  Avoid deadlock (and if that weren't an
     58 		 * issue, potential for infinite recursion) by returning NULL.
     59 		 */
     60 		return (NULL);
     61 	}
     62 
     63 	alloc_size = size + alignment - chunksize;
     64 	/* Beware size_t wrap-around. */
     65 	if (alloc_size < size)
     66 		return (NULL);
     67 	key.addr = NULL;
     68 	key.size = alloc_size;
     69 	malloc_mutex_lock(&chunks_mtx);
     70 	node = extent_tree_szad_nsearch(chunks_szad, &key);
     71 	if (node == NULL) {
     72 		malloc_mutex_unlock(&chunks_mtx);
     73 		return (NULL);
     74 	}
     75 	leadsize = ALIGNMENT_CEILING((uintptr_t)node->addr, alignment) -
     76 	    (uintptr_t)node->addr;
     77 	assert(node->size >= leadsize + size);
     78 	trailsize = node->size - leadsize - size;
     79 	ret = (void *)((uintptr_t)node->addr + leadsize);
     80 	zeroed = node->zeroed;
     81 	if (zeroed)
     82 	    *zero = true;
     83 	/* Remove node from the tree. */
     84 	extent_tree_szad_remove(chunks_szad, node);
     85 	extent_tree_ad_remove(chunks_ad, node);
     86 	if (leadsize != 0) {
     87 		/* Insert the leading space as a smaller chunk. */
     88 		node->size = leadsize;
     89 		extent_tree_szad_insert(chunks_szad, node);
     90 		extent_tree_ad_insert(chunks_ad, node);
     91 		node = NULL;
     92 	}
     93 	if (trailsize != 0) {
     94 		/* Insert the trailing space as a smaller chunk. */
     95 		if (node == NULL) {
     96 			/*
     97 			 * An additional node is required, but
     98 			 * base_node_alloc() can cause a new base chunk to be
     99 			 * allocated.  Drop chunks_mtx in order to avoid
    100 			 * deadlock, and if node allocation fails, deallocate
    101 			 * the result before returning an error.
    102 			 */
    103 			malloc_mutex_unlock(&chunks_mtx);
    104 			node = base_node_alloc();
    105 			if (node == NULL) {
    106 				chunk_dalloc_core(ret, size);
    107 				return (NULL);
    108 			}
    109 			malloc_mutex_lock(&chunks_mtx);
    110 		}
    111 		node->addr = (void *)((uintptr_t)(ret) + size);
    112 		node->size = trailsize;
    113 		node->zeroed = zeroed;
    114 		extent_tree_szad_insert(chunks_szad, node);
    115 		extent_tree_ad_insert(chunks_ad, node);
    116 		node = NULL;
    117 	}
    118 	malloc_mutex_unlock(&chunks_mtx);
    119 
    120 	if (node != NULL)
    121 		base_node_dalloc(node);
    122 	if (*zero) {
    123 		if (zeroed == false)
    124 			memset(ret, 0, size);
    125 		else if (config_debug) {
    126 			size_t i;
    127 			size_t *p = (size_t *)(uintptr_t)ret;
    128 
    129 			JEMALLOC_VALGRIND_MAKE_MEM_DEFINED(ret, size);
    130 			for (i = 0; i < size / sizeof(size_t); i++)
    131 				assert(p[i] == 0);
    132 		}
    133 	}
    134 	return (ret);
    135 }
    136 
    137 /*
    138  * If the caller specifies (*zero == false), it is still possible to receive
    139  * zeroed memory, in which case *zero is toggled to true.  arena_chunk_alloc()
    140  * takes advantage of this to avoid demanding zeroed chunks, but taking
    141  * advantage of them if they are returned.
    142  */
    143 static void *
    144 chunk_alloc_core(size_t size, size_t alignment, bool base, bool *zero,
    145     dss_prec_t dss_prec)
    146 {
    147 	void *ret;
    148 
    149 	assert(size != 0);
    150 	assert((size & chunksize_mask) == 0);
    151 	assert(alignment != 0);
    152 	assert((alignment & chunksize_mask) == 0);
    153 
    154 	/* "primary" dss. */
    155 	if (have_dss && dss_prec == dss_prec_primary) {
    156 		if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
    157 		    alignment, base, zero)) != NULL)
    158 			return (ret);
    159 		if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
    160 			return (ret);
    161 	}
    162 	/* mmap. */
    163 	if ((ret = chunk_recycle(&chunks_szad_mmap, &chunks_ad_mmap, size,
    164 	    alignment, base, zero)) != NULL)
    165 		return (ret);
    166 	if ((ret = chunk_alloc_mmap(size, alignment, zero)) != NULL)
    167 		return (ret);
    168 	/* "secondary" dss. */
    169 	if (have_dss && dss_prec == dss_prec_secondary) {
    170 		if ((ret = chunk_recycle(&chunks_szad_dss, &chunks_ad_dss, size,
    171 		    alignment, base, zero)) != NULL)
    172 			return (ret);
    173 		if ((ret = chunk_alloc_dss(size, alignment, zero)) != NULL)
    174 			return (ret);
    175 	}
    176 
    177 	/* All strategies for allocation failed. */
    178 	return (NULL);
    179 }
    180 
    181 static bool
    182 chunk_register(void *chunk, size_t size, bool base)
    183 {
    184 
    185 	assert(chunk != NULL);
    186 	assert(CHUNK_ADDR2BASE(chunk) == chunk);
    187 
    188 	if (config_ivsalloc && base == false) {
    189 		if (rtree_set(chunks_rtree, (uintptr_t)chunk, 1))
    190 			return (true);
    191 	}
    192 	if (config_stats || config_prof) {
    193 		bool gdump;
    194 		malloc_mutex_lock(&chunks_mtx);
    195 		if (config_stats)
    196 			stats_chunks.nchunks += (size / chunksize);
    197 		stats_chunks.curchunks += (size / chunksize);
    198 		if (stats_chunks.curchunks > stats_chunks.highchunks) {
    199 			stats_chunks.highchunks =
    200 			    stats_chunks.curchunks;
    201 			if (config_prof)
    202 				gdump = true;
    203 		} else if (config_prof)
    204 			gdump = false;
    205 		malloc_mutex_unlock(&chunks_mtx);
    206 		if (config_prof && opt_prof && opt_prof_gdump && gdump)
    207 			prof_gdump();
    208 	}
    209 	if (config_valgrind)
    210 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(chunk, size);
    211 	return (false);
    212 }
    213 
    214 void *
    215 chunk_alloc_base(size_t size)
    216 {
    217 	void *ret;
    218 	bool zero;
    219 
    220 	zero = false;
    221 	ret = chunk_alloc_core(size, chunksize, true, &zero,
    222 	    chunk_dss_prec_get());
    223 	if (ret == NULL)
    224 		return (NULL);
    225 	if (chunk_register(ret, size, true)) {
    226 		chunk_dalloc_core(ret, size);
    227 		return (NULL);
    228 	}
    229 	return (ret);
    230 }
    231 
    232 void *
    233 chunk_alloc_arena(chunk_alloc_t *chunk_alloc, chunk_dalloc_t *chunk_dalloc,
    234     unsigned arena_ind, size_t size, size_t alignment, bool *zero)
    235 {
    236 	void *ret;
    237 
    238 	ret = chunk_alloc(size, alignment, zero, arena_ind);
    239 	if (ret != NULL && chunk_register(ret, size, false)) {
    240 		chunk_dalloc(ret, size, arena_ind);
    241 		ret = NULL;
    242 	}
    243 
    244 	return (ret);
    245 }
    246 
    247 /* Default arena chunk allocation routine in the absence of user override. */
    248 void *
    249 chunk_alloc_default(size_t size, size_t alignment, bool *zero,
    250     unsigned arena_ind)
    251 {
    252 
    253 	return (chunk_alloc_core(size, alignment, false, zero,
    254 	    arenas[arena_ind]->dss_prec));
    255 }
    256 
    257 static void
    258 chunk_record(extent_tree_t *chunks_szad, extent_tree_t *chunks_ad, void *chunk,
    259     size_t size)
    260 {
    261 	bool unzeroed;
    262 	extent_node_t *xnode, *node, *prev, *xprev, key;
    263 
    264 	unzeroed = pages_purge(chunk, size);
    265 	JEMALLOC_VALGRIND_MAKE_MEM_NOACCESS(chunk, size);
    266 
    267 	/*
    268 	 * Allocate a node before acquiring chunks_mtx even though it might not
    269 	 * be needed, because base_node_alloc() may cause a new base chunk to
    270 	 * be allocated, which could cause deadlock if chunks_mtx were already
    271 	 * held.
    272 	 */
    273 	xnode = base_node_alloc();
    274 	/* Use xprev to implement conditional deferred deallocation of prev. */
    275 	xprev = NULL;
    276 
    277 	malloc_mutex_lock(&chunks_mtx);
    278 	key.addr = (void *)((uintptr_t)chunk + size);
    279 	node = extent_tree_ad_nsearch(chunks_ad, &key);
    280 	/* Try to coalesce forward. */
    281 	if (node != NULL && node->addr == key.addr) {
    282 		/*
    283 		 * Coalesce chunk with the following address range.  This does
    284 		 * not change the position within chunks_ad, so only
    285 		 * remove/insert from/into chunks_szad.
    286 		 */
    287 		extent_tree_szad_remove(chunks_szad, node);
    288 		node->addr = chunk;
    289 		node->size += size;
    290 		node->zeroed = (node->zeroed && (unzeroed == false));
    291 		extent_tree_szad_insert(chunks_szad, node);
    292 	} else {
    293 		/* Coalescing forward failed, so insert a new node. */
    294 		if (xnode == NULL) {
    295 			/*
    296 			 * base_node_alloc() failed, which is an exceedingly
    297 			 * unlikely failure.  Leak chunk; its pages have
    298 			 * already been purged, so this is only a virtual
    299 			 * memory leak.
    300 			 */
    301 			goto label_return;
    302 		}
    303 		node = xnode;
    304 		xnode = NULL; /* Prevent deallocation below. */
    305 		node->addr = chunk;
    306 		node->size = size;
    307 		node->zeroed = (unzeroed == false);
    308 		extent_tree_ad_insert(chunks_ad, node);
    309 		extent_tree_szad_insert(chunks_szad, node);
    310 	}
    311 
    312 	/* Try to coalesce backward. */
    313 	prev = extent_tree_ad_prev(chunks_ad, node);
    314 	if (prev != NULL && (void *)((uintptr_t)prev->addr + prev->size) ==
    315 	    chunk) {
    316 		/*
    317 		 * Coalesce chunk with the previous address range.  This does
    318 		 * not change the position within chunks_ad, so only
    319 		 * remove/insert node from/into chunks_szad.
    320 		 */
    321 		extent_tree_szad_remove(chunks_szad, prev);
    322 		extent_tree_ad_remove(chunks_ad, prev);
    323 
    324 		extent_tree_szad_remove(chunks_szad, node);
    325 		node->addr = prev->addr;
    326 		node->size += prev->size;
    327 		node->zeroed = (node->zeroed && prev->zeroed);
    328 		extent_tree_szad_insert(chunks_szad, node);
    329 
    330 		xprev = prev;
    331 	}
    332 
    333 label_return:
    334 	malloc_mutex_unlock(&chunks_mtx);
    335 	/*
    336 	 * Deallocate xnode and/or xprev after unlocking chunks_mtx in order to
    337 	 * avoid potential deadlock.
    338 	 */
    339 	if (xnode != NULL)
    340 		base_node_dalloc(xnode);
    341 	if (xprev != NULL)
    342 		base_node_dalloc(xprev);
    343 }
    344 
    345 void
    346 chunk_unmap(void *chunk, size_t size)
    347 {
    348 	assert(chunk != NULL);
    349 	assert(CHUNK_ADDR2BASE(chunk) == chunk);
    350 	assert(size != 0);
    351 	assert((size & chunksize_mask) == 0);
    352 
    353 	if (have_dss && chunk_in_dss(chunk))
    354 		chunk_record(&chunks_szad_dss, &chunks_ad_dss, chunk, size);
    355 	else if (chunk_dalloc_mmap(chunk, size))
    356 		chunk_record(&chunks_szad_mmap, &chunks_ad_mmap, chunk, size);
    357 }
    358 
    359 static void
    360 chunk_dalloc_core(void *chunk, size_t size)
    361 {
    362 
    363 	assert(chunk != NULL);
    364 	assert(CHUNK_ADDR2BASE(chunk) == chunk);
    365 	assert(size != 0);
    366 	assert((size & chunksize_mask) == 0);
    367 
    368 	if (config_ivsalloc)
    369 		rtree_set(chunks_rtree, (uintptr_t)chunk, 0);
    370 	if (config_stats || config_prof) {
    371 		malloc_mutex_lock(&chunks_mtx);
    372 		assert(stats_chunks.curchunks >= (size / chunksize));
    373 		stats_chunks.curchunks -= (size / chunksize);
    374 		malloc_mutex_unlock(&chunks_mtx);
    375 	}
    376 
    377 	chunk_unmap(chunk, size);
    378 }
    379 
    380 /* Default arena chunk deallocation routine in the absence of user override. */
    381 bool
    382 chunk_dalloc_default(void *chunk, size_t size, unsigned arena_ind)
    383 {
    384 
    385 	chunk_dalloc_core(chunk, size);
    386 	return (false);
    387 }
    388 
    389 bool
    390 chunk_boot(void)
    391 {
    392 
    393 	/* Set variables according to the value of opt_lg_chunk. */
    394 	chunksize = (ZU(1) << opt_lg_chunk);
    395 	assert(chunksize >= PAGE);
    396 	chunksize_mask = chunksize - 1;
    397 	chunk_npages = (chunksize >> LG_PAGE);
    398 
    399 	if (config_stats || config_prof) {
    400 		if (malloc_mutex_init(&chunks_mtx))
    401 			return (true);
    402 		memset(&stats_chunks, 0, sizeof(chunk_stats_t));
    403 	}
    404 	if (have_dss && chunk_dss_boot())
    405 		return (true);
    406 	extent_tree_szad_new(&chunks_szad_mmap);
    407 	extent_tree_ad_new(&chunks_ad_mmap);
    408 	extent_tree_szad_new(&chunks_szad_dss);
    409 	extent_tree_ad_new(&chunks_ad_dss);
    410 	if (config_ivsalloc) {
    411 		chunks_rtree = rtree_new((ZU(1) << (LG_SIZEOF_PTR+3)) -
    412 		    opt_lg_chunk, base_alloc, NULL);
    413 		if (chunks_rtree == NULL)
    414 			return (true);
    415 	}
    416 
    417 	return (false);
    418 }
    419 
    420 void
    421 chunk_prefork(void)
    422 {
    423 
    424 	malloc_mutex_prefork(&chunks_mtx);
    425 	if (config_ivsalloc)
    426 		rtree_prefork(chunks_rtree);
    427 	chunk_dss_prefork();
    428 }
    429 
    430 void
    431 chunk_postfork_parent(void)
    432 {
    433 
    434 	chunk_dss_postfork_parent();
    435 	if (config_ivsalloc)
    436 		rtree_postfork_parent(chunks_rtree);
    437 	malloc_mutex_postfork_parent(&chunks_mtx);
    438 }
    439 
    440 void
    441 chunk_postfork_child(void)
    442 {
    443 
    444 	chunk_dss_postfork_child();
    445 	if (config_ivsalloc)
    446 		rtree_postfork_child(chunks_rtree);
    447 	malloc_mutex_postfork_child(&chunks_mtx);
    448 }
    449