Home | History | Annotate | Download | only in src
      1 #define	JEMALLOC_CHUNK_MMAP_C_
      2 #include "jemalloc/internal/jemalloc_internal.h"
      3 
      4 /******************************************************************************/
      5 /* Defines/includes needed for special android code. */
      6 
      7 #if defined(__ANDROID__)
      8 #include <sys/prctl.h>
      9 
     10 /* Definitions of prctl arguments to set a vma name in Android kernels. */
     11 #define ANDROID_PR_SET_VMA            0x53564d41
     12 #define ANDROID_PR_SET_VMA_ANON_NAME  0
     13 #endif
     14 
     15 /******************************************************************************/
     16 
     17 /******************************************************************************/
     18 /* Function prototypes for non-inline static functions. */
     19 
     20 static void	*pages_map(void *addr, size_t size);
     21 static void	pages_unmap(void *addr, size_t size);
     22 static void	*chunk_alloc_mmap_slow(size_t size, size_t alignment,
     23     bool *zero);
     24 
     25 /******************************************************************************/
     26 
     27 static void *
     28 pages_map(void *addr, size_t size)
     29 {
     30 	void *ret;
     31 
     32 	assert(size != 0);
     33 
     34 #ifdef _WIN32
     35 	/*
     36 	 * If VirtualAlloc can't allocate at the given address when one is
     37 	 * given, it fails and returns NULL.
     38 	 */
     39 	ret = VirtualAlloc(addr, size, MEM_COMMIT | MEM_RESERVE,
     40 	    PAGE_READWRITE);
     41 #else
     42 	/*
     43 	 * We don't use MAP_FIXED here, because it can cause the *replacement*
     44 	 * of existing mappings, and we only want to create new mappings.
     45 	 */
     46 	ret = mmap(addr, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON,
     47 	    -1, 0);
     48 	assert(ret != NULL);
     49 
     50 	if (ret == MAP_FAILED)
     51 		ret = NULL;
     52 	else if (addr != NULL && ret != addr) {
     53 		/*
     54 		 * We succeeded in mapping memory, but not in the right place.
     55 		 */
     56 		pages_unmap(ret, size);
     57 		ret = NULL;
     58 	}
     59 #endif
     60 #if defined(__ANDROID__)
     61 	if (ret != NULL) {
     62 		/* Name this memory as being used by libc */
     63 		prctl(ANDROID_PR_SET_VMA, ANDROID_PR_SET_VMA_ANON_NAME, ret,
     64 		    size, "libc_malloc");
     65 	}
     66 #endif
     67 	assert(ret == NULL || (addr == NULL && ret != addr)
     68 	    || (addr != NULL && ret == addr));
     69 	return (ret);
     70 }
     71 
     72 static void
     73 pages_unmap(void *addr, size_t size)
     74 {
     75 
     76 #ifdef _WIN32
     77 	if (VirtualFree(addr, 0, MEM_RELEASE) == 0)
     78 #else
     79 	if (munmap(addr, size) == -1)
     80 #endif
     81 	{
     82 		char buf[BUFERROR_BUF];
     83 
     84 		buferror(get_errno(), buf, sizeof(buf));
     85 		malloc_printf("<jemalloc>: Error in "
     86 #ifdef _WIN32
     87 		              "VirtualFree"
     88 #else
     89 		              "munmap"
     90 #endif
     91 		              "(): %s\n", buf);
     92 		if (opt_abort)
     93 			abort();
     94 	}
     95 }
     96 
     97 static void *
     98 pages_trim(void *addr, size_t alloc_size, size_t leadsize, size_t size)
     99 {
    100 	void *ret = (void *)((uintptr_t)addr + leadsize);
    101 
    102 	assert(alloc_size >= leadsize + size);
    103 #ifdef _WIN32
    104 	{
    105 		void *new_addr;
    106 
    107 		pages_unmap(addr, alloc_size);
    108 		new_addr = pages_map(ret, size);
    109 		if (new_addr == ret)
    110 			return (ret);
    111 		if (new_addr)
    112 			pages_unmap(new_addr, size);
    113 		return (NULL);
    114 	}
    115 #else
    116 	{
    117 		size_t trailsize = alloc_size - leadsize - size;
    118 
    119 		if (leadsize != 0)
    120 			pages_unmap(addr, leadsize);
    121 		if (trailsize != 0)
    122 			pages_unmap((void *)((uintptr_t)ret + size), trailsize);
    123 		return (ret);
    124 	}
    125 #endif
    126 }
    127 
    128 bool
    129 pages_purge(void *addr, size_t length)
    130 {
    131 	bool unzeroed;
    132 
    133 #ifdef _WIN32
    134 	VirtualAlloc(addr, length, MEM_RESET, PAGE_READWRITE);
    135 	unzeroed = true;
    136 #elif defined(JEMALLOC_HAVE_MADVISE)
    137 #  ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
    138 #    define JEMALLOC_MADV_PURGE MADV_DONTNEED
    139 #    define JEMALLOC_MADV_ZEROS true
    140 #  elif defined(JEMALLOC_PURGE_MADVISE_FREE)
    141 #    define JEMALLOC_MADV_PURGE MADV_FREE
    142 #    define JEMALLOC_MADV_ZEROS false
    143 #  else
    144 #    error "No madvise(2) flag defined for purging unused dirty pages."
    145 #  endif
    146 	int err = madvise(addr, length, JEMALLOC_MADV_PURGE);
    147 	unzeroed = (!JEMALLOC_MADV_ZEROS || err != 0);
    148 #  undef JEMALLOC_MADV_PURGE
    149 #  undef JEMALLOC_MADV_ZEROS
    150 #else
    151 	/* Last resort no-op. */
    152 	unzeroed = true;
    153 #endif
    154 	return (unzeroed);
    155 }
    156 
    157 static void *
    158 chunk_alloc_mmap_slow(size_t size, size_t alignment, bool *zero)
    159 {
    160 	void *ret, *pages;
    161 	size_t alloc_size, leadsize;
    162 
    163 	alloc_size = size + alignment - PAGE;
    164 	/* Beware size_t wrap-around. */
    165 	if (alloc_size < size)
    166 		return (NULL);
    167 	do {
    168 		pages = pages_map(NULL, alloc_size);
    169 		if (pages == NULL)
    170 			return (NULL);
    171 		leadsize = ALIGNMENT_CEILING((uintptr_t)pages, alignment) -
    172 		    (uintptr_t)pages;
    173 		ret = pages_trim(pages, alloc_size, leadsize, size);
    174 	} while (ret == NULL);
    175 
    176 	assert(ret != NULL);
    177 	*zero = true;
    178 	return (ret);
    179 }
    180 
    181 void *
    182 chunk_alloc_mmap(size_t size, size_t alignment, bool *zero)
    183 {
    184 	void *ret;
    185 	size_t offset;
    186 
    187 	/*
    188 	 * Ideally, there would be a way to specify alignment to mmap() (like
    189 	 * NetBSD has), but in the absence of such a feature, we have to work
    190 	 * hard to efficiently create aligned mappings.  The reliable, but
    191 	 * slow method is to create a mapping that is over-sized, then trim the
    192 	 * excess.  However, that always results in one or two calls to
    193 	 * pages_unmap().
    194 	 *
    195 	 * Optimistically try mapping precisely the right amount before falling
    196 	 * back to the slow method, with the expectation that the optimistic
    197 	 * approach works most of the time.
    198 	 */
    199 
    200 	assert(alignment != 0);
    201 	assert((alignment & chunksize_mask) == 0);
    202 
    203 	ret = pages_map(NULL, size);
    204 	if (ret == NULL)
    205 		return (NULL);
    206 	offset = ALIGNMENT_ADDR2OFFSET(ret, alignment);
    207 	if (offset != 0) {
    208 		pages_unmap(ret, size);
    209 		return (chunk_alloc_mmap_slow(size, alignment, zero));
    210 	}
    211 
    212 	assert(ret != NULL);
    213 	*zero = true;
    214 	return (ret);
    215 }
    216 
    217 bool
    218 chunk_dalloc_mmap(void *chunk, size_t size)
    219 {
    220 
    221 	if (config_munmap)
    222 		pages_unmap(chunk, size);
    223 
    224 	return (!config_munmap);
    225 }
    226