Home | History | Annotate | Download | only in src
      1 #define	JEMALLOC_CHUNK_MMAP_C_
      2 #include "jemalloc/internal/jemalloc_internal.h"
      3 
      4 /******************************************************************************/
      5 /* Defines/includes needed for special android code. */
      6 
      7 #if defined(__ANDROID__)
      8 #include <sys/prctl.h>
      9 
     10 /* Definitions of prctl arguments to set a vma name in Android kernels. */
     11 #define ANDROID_PR_SET_VMA            0x53564d41
     12 #define ANDROID_PR_SET_VMA_ANON_NAME  0
     13 #endif
     14 
     15 /******************************************************************************/
     16 
     17 /******************************************************************************/
     18 /* Function prototypes for non-inline static functions. */
     19 
     20 static void	*pages_map(void *addr, size_t size);
     21 static void	pages_unmap(void *addr, size_t size);
     22 static void	*chunk_alloc_mmap_slow(size_t size, size_t alignment,
     23     bool *zero);
     24 
     25 /******************************************************************************/
     26 
     27 static void *
     28 pages_map(void *addr, size_t size)
     29 {
     30 	void *ret;
     31 
     32 	assert(size != 0);
     33 
     34 #ifdef _WIN32
     35 	/*
     36 	 * If VirtualAlloc can't allocate at the given address when one is
     37 	 * given, it fails and returns NULL.
     38 	 */
     39 	ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
     40 	    PAGE_READWRITE);
     41 #else
     42 	/*
     43 	 * We don't use MAP_FIXED here, because it can cause the *replacement*
     44 	 * of existing mappings, and we only want to create new mappings.
     45 	 */
     46 	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
     47 	    -1, 0);
     48 	assert(ret != NULL);
     49 
     50 	if (ret == MAP_FAILED)
     51 		ret = NULL;
     52 	else if (addr != NULL && ret != addr) {
     53 		/*
     54 		 * We succeeded in mapping memory, but not in the right place.
     55 		 */
     56 		if (munmap(ret, size) == -1) {
     57 			char buf[BUFERROR_BUF];
     58 
     59 			buferror(get_errno(), buf, sizeof(buf));
     60 			malloc_printf("<jemalloc: Error in munmap(): %s\n",
     61 			    buf);
     62 			if (opt_abort)
     63 				abort();
     64 		}
     65 		ret = NULL;
     66 	}
     67 #endif
     68 #if defined(__ANDROID__)
     69 	if (ret != NULL) {
     70 		/* Name this memory as being used by libc */
     71 		prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, ret,
     72 		    size, "libc_malloc");
     73 	}
     74 #endif
     75 	assert(ret == NULL || (addr == NULL && ret != addr)
     76 	    || (addr != NULL && ret == addr));
     77 	return (ret);
     78 }
     79 
     80 static void
     81 pages_unmap(void *addr, size_t size)
     82 {
     83 
     84 #ifdef _WIN32
     85 	if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
     86 #else
     87 	if (munmap(addr, size) == -1)
     88 #endif
     89 	{
     90 		char buf[BUFERROR_BUF];
     91 
     92 		buferror(get_errno(), buf, sizeof(buf));
     93 		malloc_printf("<jemalloc>: Error in "
     94 #ifdef _WIN32
     95 		              "VirtualFree"
     96 #else
     97 		              "munmap"
     98 #endif
     99 		              "(): %s\n", buf);
    100 		if (opt_abort)
    101 			abort();
    102 	}
    103 }
    104 
    105 static void *
    106 pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
    107 {
    108 	void *ret = (void *)((uintptr_t)addr + leadsize);
    109 
    110 	assert(alloc_size >= leadsize + size);
    111 #ifdef _WIN32
    112 	{
    113 		void *new_addr;
    114 
    115 		pages_unmap(addr, alloc_size);
    116 		new_addr = pages_map(ret, size);
    117 		if (new_addr == ret)
    118 			return (ret);
    119 		if (new_addr)
    120 			pages_unmap(new_addr, size);
    121 		return (NULL);
    122 	}
    123 #else
    124 	{
    125 		size_t trailsize = alloc_size - leadsize - size;
    126 
    127 		if (leadsize != 0)
    128 			pages_unmap(addr, leadsize);
    129 		if (trailsize != 0)
    130 			pages_unmap((void *)((uintptr_t)ret + size), trailsize);
    131 		return (ret);
    132 	}
    133 #endif
    134 }
    135 
    136 bool
    137 pages_purge(void *addr, size_t length)
    138 {
    139 	bool unzeroed;
    140 
    141 #ifdef _WIN32
    142 	VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
    143 	unzeroed = true;
    144 #elif defined(JEMALLOC_HAVE_MADVISE)
    145 #  ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
    146 #    define JEMALLOC_MADV_PURGE MADV_DONTNEED
    147 #    define JEMALLOC_MADV_ZEROS true
    148 #  elif defined(JEMALLOC_PURGE_MADVISE_FREE)
    149 #    define JEMALLOC_MADV_PURGE MADV_FREE
    150 #    define JEMALLOC_MADV_ZEROS false
    151 #  else
    152 #    error "No madvise(2) flag defined for purging unused dirty pages."
    153 #  endif
    154 	int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
    155 	unzeroed = (JEMALLOC_MADV_ZEROS == false || err != 0);
    156 #  undef JEMALLOC_MADV_PURGE
    157 #  undef JEMALLOC_MADV_ZEROS
    158 #else
    159 	/* Last resort no-op. */
    160 	unzeroed = true;
    161 #endif
    162 	return (unzeroed);
    163 }
    164 
    165 static void *
    166 chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
    167 {
    168 	void *ret, *pages;
    169 	size_t alloc_size, leadsize;
    170 
    171 	alloc_size = size + alignment - PAGE;
    172 	/* Beware size_t wrap-around. */
    173 	if (alloc_size < size)
    174 		return (NULL);
    175 	do {
    176 		pages = pages_map(NULL, alloc_size);
    177 		if (pages == NULL)
    178 			return (NULL);
    179 		leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
    180 		    (uintptr_t)pages;
    181 		ret = pages_trim(pages, alloc_size, leadsize, size);
    182 	} while (ret == NULL);
    183 
    184 	assert(ret != NULL);
    185 	*zero = true;
    186 	return (ret);
    187 }
    188 
    189 void *
    190 chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
    191 {
    192 	void *ret;
    193 	size_t offset;
    194 
    195 	/*
    196 	 * Ideally, there would be a way to specify alignment to mmap() (like
    197 	 * NetBSD has), but in the absence of such a feature, we have to work
    198 	 * hard to efficiently create aligned mappings.  The reliable, but
    199 	 * slow method is to create a mapping that is over-sized, then trim the
    200 	 * excess.  However, that always results in one or two calls to
    201 	 * pages_unmap().
    202 	 *
    203 	 * Optimistically try mapping precisely the right amount before falling
    204 	 * back to the slow method, with the expectation that the optimistic
    205 	 * approach works most of the time.
    206 	 */
    207 
    208 	assert(alignment != 0);
    209 	assert((alignment & chunksize_mask) == 0);
    210 
    211 	ret = pages_map(NULL, size);
    212 	if (ret == NULL)
    213 		return (NULL);
    214 	offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
    215 	if (offset != 0) {
    216 		pages_unmap(ret, size);
    217 		return (chunk_alloc_mmap_slow(size, alignment, zero));
    218 	}
    219 
    220 	assert(ret != NULL);
    221 	*zero = true;
    222 	return (ret);
    223 }
    224 
    225 bool
    226 chunk_dalloc_mmap(void *chunk, size_t size)
    227 {
    228 
    229 	if (config_munmap)
    230 		pages_unmap(chunk, size);
    231 
    232 	return (config_munmap == false);
    233 }
    234