Home | History | Annotate | Download | only in src
      1 #define	JEMALLOC_BASE_C_
      2 #include "jemalloc/internal/jemalloc_internal.h"
      3 
      4 /******************************************************************************/
      5 /* Data. */
      6 
      7 static malloc_mutex_t	base_mtx;
      8 
      9 /*
     10  * Current pages that are being used for internal memory allocations.  These
     11  * pages are carved up in cacheline-size quanta, so that there is no chance of
     12  * false cache line sharing.
     13  */
     14 static void		*base_pages;
     15 static void		*base_next_addr;
     16 static void		*base_past_addr; /* Addr immediately past base_pages. */
     17 static extent_node_t	*base_nodes;
     18 
     19 /******************************************************************************/
     20 
     21 static bool
     22 base_pages_alloc(size_t minsize)
     23 {
     24 	size_t csize;
     25 
     26 	assert(minsize != 0);
     27 	csize = CHUNK_CEILING(minsize);
     28 	base_pages = chunk_alloc_base(csize);
     29 	if (base_pages == NULL)
     30 		return (true);
     31 	base_next_addr = base_pages;
     32 	base_past_addr = (void *)((uintptr_t)base_pages + csize);
     33 
     34 	return (false);
     35 }
     36 
     37 void *
     38 base_alloc(size_t size)
     39 {
     40 	void *ret;
     41 	size_t csize;
     42 
     43 	/* Round size up to nearest multiple of the cacheline size. */
     44 	csize = CACHELINE_CEILING(size);
     45 
     46 	malloc_mutex_lock(&base_mtx);
     47 	/* Make sure there's enough space for the allocation. */
     48 	if ((uintptr_t)base_next_addr + csize > (uintptr_t)base_past_addr) {
     49 		if (base_pages_alloc(csize)) {
     50 			malloc_mutex_unlock(&base_mtx);
     51 			return (NULL);
     52 		}
     53 	}
     54 	/* Allocate. */
     55 	ret = base_next_addr;
     56 	base_next_addr = (void *)((uintptr_t)base_next_addr + csize);
     57 	malloc_mutex_unlock(&base_mtx);
     58 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, csize);
     59 
     60 	return (ret);
     61 }
     62 
     63 void *
     64 base_calloc(size_t number, size_t size)
     65 {
     66 	void *ret = base_alloc(number * size);
     67 
     68 	if (ret != NULL)
     69 		memset(ret, 0, number * size);
     70 
     71 	return (ret);
     72 }
     73 
     74 extent_node_t *
     75 base_node_alloc(void)
     76 {
     77 	extent_node_t *ret;
     78 
     79 	malloc_mutex_lock(&base_mtx);
     80 	if (base_nodes != NULL) {
     81 		ret = base_nodes;
     82 		base_nodes = *(extent_node_t **)ret;
     83 		malloc_mutex_unlock(&base_mtx);
     84 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret,
     85 		    sizeof(extent_node_t));
     86 	} else {
     87 		malloc_mutex_unlock(&base_mtx);
     88 		ret = (extent_node_t *)base_alloc(sizeof(extent_node_t));
     89 	}
     90 
     91 	return (ret);
     92 }
     93 
     94 void
     95 base_node_dalloc(extent_node_t *node)
     96 {
     97 
     98 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(node, sizeof(extent_node_t));
     99 	malloc_mutex_lock(&base_mtx);
    100 	*(extent_node_t **)node = base_nodes;
    101 	base_nodes = node;
    102 	malloc_mutex_unlock(&base_mtx);
    103 }
    104 
    105 bool
    106 base_boot(void)
    107 {
    108 
    109 	base_nodes = NULL;
    110 	if (malloc_mutex_init(&base_mtx))
    111 		return (true);
    112 
    113 	return (false);
    114 }
    115 
    116 void
    117 base_prefork(void)
    118 {
    119 
    120 	malloc_mutex_prefork(&base_mtx);
    121 }
    122 
    123 void
    124 base_postfork_parent(void)
    125 {
    126 
    127 	malloc_mutex_postfork_parent(&base_mtx);
    128 }
    129 
    130 void
    131 base_postfork_child(void)
    132 {
    133 
    134 	malloc_mutex_postfork_child(&base_mtx);
    135 }
    136