Home | History | Annotate | Download | only in libcutils
      1 /* Copyright 2006 The Android Open Source Project */
      2 
      3 /* A wrapper file for dlmalloc.c that compiles in the
      4  * mspace_*() functions, which provide an interface for
      5  * creating multiple heaps.
      6  */
      7 #include <sys/types.h>
      8 #include <sys/stat.h>
      9 #include <fcntl.h>
     10 #include <unistd.h>
     11 #include <stdint.h>
     12 #include <sys/ioctl.h>
     13 
     14 #include <cutils/ashmem.h>
     15 
     16 /* It's a pain getting the mallinfo stuff to work
     17  * with Linux, OSX, and klibc, so just turn it off
     18  * for now.
     19  * TODO: make mallinfo work
     20  */
     21 #define NO_MALLINFO 1
     22 
     23 /* Allow setting the maximum heap footprint.
     24  */
     25 #define USE_MAX_ALLOWED_FOOTPRINT 1
     26 
     27 /* Don't try to trim memory.
     28  * TODO: support this.
     29  */
     30 #define MORECORE_CANNOT_TRIM 1
     31 
     32 /* Use mmap()d anonymous memory to guarantee
     33  * that an mspace is contiguous.
     34  *
     35  * create_mspace() won't work right if this is
     36  * defined, so hide the definition of it and
     37  * break any users at build time.
     38  */
     39 #define USE_CONTIGUOUS_MSPACES 1
     40 #if USE_CONTIGUOUS_MSPACES
     41 /* This combination of settings forces sys_alloc()
     42  * to always use MORECORE().  It won't expect the
     43  * results to be contiguous, but we'll guarantee
     44  * that they are.
     45  */
     46 #define HAVE_MMAP 0
     47 #define HAVE_MORECORE 1
     48 #define MORECORE_CONTIGUOUS 0
     49 /* m is always the appropriate local when MORECORE() is called. */
     50 #define MORECORE(S) contiguous_mspace_morecore(m, S)
     51 #define create_mspace   HIDDEN_create_mspace_HIDDEN
     52 #define destroy_mspace   HIDDEN_destroy_mspace_HIDDEN
     53 typedef struct malloc_state *mstate0;
     54 static void *contiguous_mspace_morecore(mstate0 m, ssize_t nb);
     55 #endif
     56 
     57 #define MSPACES 1
     58 #define ONLY_MSPACES 1
     59 #include "../../../bionic/libc/bionic/dlmalloc.c"
     60 
     61 #ifndef PAGESIZE
     62 #define PAGESIZE  mparams.page_size
     63 #endif
     64 
     65 #define ALIGN_UP(p, alignment) \
     66     (((uintptr_t)(p) + (alignment)-1) & ~((alignment)-1))
     67 
     68 /* A direct copy of dlmalloc_usable_size(),
     69  * which isn't compiled in when ONLY_MSPACES is set.
     70  * The mspace parameter isn't actually necessary,
     71  * but we include it to be consistent with the
     72  * rest of the mspace_*() functions.
     73  */
     74 size_t mspace_usable_size(mspace _unused, const void* mem) {
     75   if (mem != 0) {
     76     const mchunkptr p = mem2chunk(mem);
     77     if (cinuse(p))
     78       return chunksize(p) - overhead_for(p);
     79   }
     80   return 0;
     81 }
     82 
     83 #if USE_CONTIGUOUS_MSPACES
     84 #include <sys/mman.h>
     85 #include <limits.h>
     86 
     87 #define CONTIG_STATE_MAGIC  0xf00dd00d
     88 struct mspace_contig_state {
     89   unsigned int magic;
     90   char *brk;
     91   char *top;
     92   mspace m;
     93 };
     94 
     95 static void *contiguous_mspace_morecore(mstate m, ssize_t nb) {
     96   struct mspace_contig_state *cs;
     97   char *oldbrk;
     98   const unsigned int pagesize = PAGESIZE;
     99 
    100   cs = (struct mspace_contig_state *)((uintptr_t)m & ~(pagesize-1));
    101   assert(cs->magic == CONTIG_STATE_MAGIC);
    102   assert(cs->m == m);
    103 assert(nb >= 0);  //xxx deal with the trim case
    104 
    105   oldbrk = cs->brk;
    106   if (nb > 0) {
    107     /* Break to the first page boundary that satisfies the request.
    108      */
    109     char *newbrk = (char *)ALIGN_UP(oldbrk + nb, pagesize);
    110     if (newbrk > cs->top)
    111       return CMFAIL;
    112 
    113     /* Update the protection on the underlying memory.
    114      * Pages we've given to dlmalloc are read/write, and
    115      * pages we haven't are not accessable (read or write
    116      * will cause a seg fault).
    117      */
    118     if (mprotect(cs, newbrk - (char *)cs, PROT_READ | PROT_WRITE) < 0)
    119       return CMFAIL;
    120     if (newbrk != cs->top) {
    121       if (mprotect(newbrk, cs->top - newbrk, PROT_NONE) < 0)
    122         return CMFAIL;
    123     }
    124 
    125     cs->brk = newbrk;
    126 
    127     /* Make sure that dlmalloc will merge this block with the
    128      * initial block that was passed to create_mspace_with_base().
    129      * We don't care about extern vs. non-extern, so just clear it.
    130      */
    131     m->seg.sflags &= ~EXTERN_BIT;
    132   }
    133 
    134   return oldbrk;
    135 }
    136 
    137 mspace create_contiguous_mspace_with_base(size_t starting_capacity,
    138     size_t max_capacity, int locked, void *base) {
    139   struct mspace_contig_state *cs;
    140   unsigned int pagesize;
    141   mstate m;
    142 
    143   init_mparams();
    144   pagesize = PAGESIZE;
    145   assert(starting_capacity <= max_capacity);
    146   assert(((uintptr_t)base & (pagesize-1)) == 0);
    147   assert(((uintptr_t)max_capacity & (pagesize-1)) == 0);
    148   starting_capacity = (size_t)ALIGN_UP(starting_capacity, pagesize);
    149 
    150   /* Make the first page read/write. dlmalloc needs to use that page.
    151    */
    152   if (mprotect(base, starting_capacity, PROT_READ | PROT_WRITE) < 0) {
    153     goto error;
    154   }
    155 
    156   /* Create the mspace, pointing to the memory given.
    157    */
    158   m = create_mspace_with_base((char *)base + sizeof(*cs), starting_capacity,
    159                               locked);
    160   if (m == (mspace)0) {
    161     goto error;
    162   }
    163   /* Make sure that m is in the same page as base.
    164    */
    165   assert(((uintptr_t)m & (uintptr_t)~(pagesize-1)) == (uintptr_t)base);
    166   /* Use some space for the information that our MORECORE needs.
    167    */
    168   cs = (struct mspace_contig_state *)base;
    169 
    170   /* Find out exactly how much of the memory the mspace
    171    * is using.
    172    */
    173   cs->brk = m->seg.base + m->seg.size;
    174   cs->top = (char *)base + max_capacity;
    175 
    176   assert((char *)base <= cs->brk);
    177   assert(cs->brk <= cs->top);
    178   /* Prevent access to the memory we haven't handed out yet.
    179    */
    180   if (cs->brk != cs->top) {
    181     /* mprotect() requires page-aligned arguments, but it's possible
    182      * for cs->brk not to be page-aligned at this point.
    183      */
    184     char *prot_brk = (char *)ALIGN_UP(cs->brk, pagesize);
    185     if ((mprotect(base, prot_brk - (char *)base, PROT_READ | PROT_WRITE) < 0) ||
    186         (mprotect(prot_brk, cs->top - prot_brk, PROT_NONE) < 0)) {
    187       goto error;
    188     }
    189   }
    190 
    191   cs->m = m;
    192   cs->magic = CONTIG_STATE_MAGIC;
    193 
    194   return (mspace)m;
    195 
    196 error:
    197   return (mspace)0;
    198 }
    199 
    200 
    201 mspace create_contiguous_mspace_with_name(size_t starting_capacity,
    202     size_t max_capacity, int locked, char const *name) {
    203   int fd, ret;
    204   char buf[ASHMEM_NAME_LEN] = "mspace";
    205   void *base;
    206   unsigned int pagesize;
    207   mstate m;
    208 
    209   if (starting_capacity > max_capacity)
    210     return (mspace)0;
    211 
    212   init_mparams();
    213   pagesize = PAGESIZE;
    214 
    215   /* Create the anonymous memory that will back the mspace.
    216    * This reserves all of the virtual address space we could
    217    * ever need.  Physical pages will be mapped as the memory
    218    * is touched.
    219    *
    220    * Align max_capacity to a whole page.
    221    */
    222   max_capacity = (size_t)ALIGN_UP(max_capacity, pagesize);
    223 
    224   if (name)
    225     snprintf(buf, sizeof(buf), "mspace/%s", name);
    226   fd = ashmem_create_region(buf, max_capacity);
    227   if (fd < 0)
    228     return (mspace)0;
    229 
    230   base = mmap(NULL, max_capacity, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
    231   close(fd);
    232   if (base == MAP_FAILED)
    233     return (mspace)0;
    234 
    235   /* Make sure that base is at the beginning of a page.
    236    */
    237   assert(((uintptr_t)base & (pagesize-1)) == 0);
    238 
    239   m = create_contiguous_mspace_with_base(starting_capacity, max_capacity,
    240                                          locked, base);
    241   if (m == 0) {
    242     munmap(base, max_capacity);
    243   }
    244   return m;
    245 }
    246 
    247 mspace create_contiguous_mspace(size_t starting_capacity,
    248     size_t max_capacity, int locked) {
    249   return create_contiguous_mspace_with_name(starting_capacity,
    250       max_capacity, locked, NULL);
    251 }
    252 
    253 size_t destroy_contiguous_mspace(mspace msp) {
    254   mstate ms = (mstate)msp;
    255 
    256   if (ok_magic(ms)) {
    257     struct mspace_contig_state *cs;
    258     size_t length;
    259     const unsigned int pagesize = PAGESIZE;
    260 
    261     cs = (struct mspace_contig_state *)((uintptr_t)ms & ~(pagesize-1));
    262     assert(cs->magic == CONTIG_STATE_MAGIC);
    263     assert(cs->m == ms);
    264 
    265     length = cs->top - (char *)cs;
    266     if (munmap((char *)cs, length) != 0)
    267       return length;
    268   }
    269   else {
    270     USAGE_ERROR_ACTION(ms, ms);
    271   }
    272   return 0;
    273 }
    274 #endif
    275