Home | History | Annotate | Download | only in dlmalloc
      1 /*
      2   Default header file for malloc-2.8.x, written by Doug Lea
      3   and released to the public domain, as explained at
      4   http://creativecommons.org/publicdomain/zero/1.0/
      5 
      6   This header is for ANSI C/C++ only.  You can set any of
      7   the following #defines before including:
      8 
      9   * If USE_DL_PREFIX is defined, it is assumed that malloc.c
     10     was also compiled with this option, so all routines
     11     have names starting with "dl".
     12 
     13   * If HAVE_USR_INCLUDE_MALLOC_H is defined, it is assumed that this
     14     file will be #included AFTER <malloc.h>. This is needed only if
     15     your system defines a struct mallinfo that is incompatible with the
     16     standard one declared here.  Otherwise, you can include this file
     17     INSTEAD of your system system <malloc.h>.  At least on ANSI, all
     18     declarations should be compatible with system versions
     19 
     20   * If MSPACES is defined, declarations for mspace versions are included.
     21 */
     22 
     23 #ifndef MALLOC_280_H
     24 #define MALLOC_280_H
     25 
     26 #ifdef __cplusplus
     27 extern "C" {
     28 #endif
     29 
     30 #include <stddef.h>   /* for size_t */
     31 
     32 #ifndef ONLY_MSPACES
     33 #define ONLY_MSPACES 0     /* define to a value */
     34 #elif ONLY_MSPACES != 0
     35 #define ONLY_MSPACES 1
     36 #endif  /* ONLY_MSPACES */
     37 #ifndef NO_MALLINFO
     38 #define NO_MALLINFO 0
     39 #endif  /* NO_MALLINFO */
     40 
     41 #ifndef MSPACES
     42 #if ONLY_MSPACES
     43 #define MSPACES 1
     44 #else   /* ONLY_MSPACES */
     45 #define MSPACES 0
     46 #endif  /* ONLY_MSPACES */
     47 #endif  /* MSPACES */
     48 
     49 #if !ONLY_MSPACES
     50 
     51 #ifndef USE_DL_PREFIX
     52 #define dlcalloc               calloc
     53 #define dlfree                 free
     54 #define dlmalloc               malloc
     55 #define dlmemalign             memalign
     56 #define dlposix_memalign       posix_memalign
     57 #define dlrealloc              realloc
     58 #define dlvalloc               valloc
     59 #define dlpvalloc              pvalloc
     60 #define dlmallinfo             mallinfo
     61 #define dlmallopt              mallopt
     62 #define dlmalloc_trim          malloc_trim
     63 #define dlmalloc_stats         malloc_stats
     64 #define dlmalloc_usable_size   malloc_usable_size
     65 #define dlmalloc_footprint     malloc_footprint
     66 #define dlmalloc_max_footprint malloc_max_footprint
     67 #define dlmalloc_footprint_limit malloc_footprint_limit
     68 #define dlmalloc_set_footprint_limit malloc_set_footprint_limit
     69 #define dlmalloc_inspect_all   malloc_inspect_all
     70 #define dlindependent_calloc   independent_calloc
     71 #define dlindependent_comalloc independent_comalloc
     72 #define dlbulk_free            bulk_free
     73 #endif /* USE_DL_PREFIX */
     74 
     75 #if !NO_MALLINFO
     76 #ifndef HAVE_USR_INCLUDE_MALLOC_H
     77 #ifndef _MALLOC_H
     78 #ifndef MALLINFO_FIELD_TYPE
     79 #define MALLINFO_FIELD_TYPE size_t
     80 #endif /* MALLINFO_FIELD_TYPE */
     81 #ifndef STRUCT_MALLINFO_DECLARED
     82 #define STRUCT_MALLINFO_DECLARED 1
     83 struct mallinfo {
     84   MALLINFO_FIELD_TYPE arena;    /* non-mmapped space allocated from system */
     85   MALLINFO_FIELD_TYPE ordblks;  /* number of free chunks */
     86   MALLINFO_FIELD_TYPE smblks;   /* always 0 */
     87   MALLINFO_FIELD_TYPE hblks;    /* always 0 */
     88   MALLINFO_FIELD_TYPE hblkhd;   /* space in mmapped regions */
     89   MALLINFO_FIELD_TYPE usmblks;  /* maximum total allocated space */
     90   MALLINFO_FIELD_TYPE fsmblks;  /* always 0 */
     91   MALLINFO_FIELD_TYPE uordblks; /* total allocated space */
     92   MALLINFO_FIELD_TYPE fordblks; /* total free space */
     93   MALLINFO_FIELD_TYPE keepcost; /* releasable (via malloc_trim) space */
     94 };
     95 #endif /* STRUCT_MALLINFO_DECLARED */
     96 #endif  /* _MALLOC_H */
     97 #endif  /* HAVE_USR_INCLUDE_MALLOC_H */
     98 #endif  /* !NO_MALLINFO */
     99 
    100 /*
    101   malloc(size_t n)
    102   Returns a pointer to a newly allocated chunk of at least n bytes, or
    103   null if no space is available, in which case errno is set to ENOMEM
    104   on ANSI C systems.
    105 
    106   If n is zero, malloc returns a minimum-sized chunk. (The minimum
    107   size is 16 bytes on most 32bit systems, and 32 bytes on 64bit
    108   systems.)  Note that size_t is an unsigned type, so calls with
    109   arguments that would be negative if signed are interpreted as
    110   requests for huge amounts of space, which will often fail. The
    111   maximum supported value of n differs across systems, but is in all
    112   cases less than the maximum representable value of a size_t.
    113 */
    114 void* dlmalloc(size_t);
    115 
    116 /*
    117   free(void* p)
    118   Releases the chunk of memory pointed to by p, that had been previously
    119   allocated using malloc or a related routine such as realloc.
    120   It has no effect if p is null. If p was not malloced or already
    121   freed, free(p) will by default cuase the current program to abort.
    122 */
    123 void  dlfree(void*);
    124 
    125 /*
    126   calloc(size_t n_elements, size_t element_size);
    127   Returns a pointer to n_elements * element_size bytes, with all locations
    128   set to zero.
    129 */
    130 void* dlcalloc(size_t, size_t);
    131 
    132 /*
    133   realloc(void* p, size_t n)
    134   Returns a pointer to a chunk of size n that contains the same data
    135   as does chunk p up to the minimum of (n, p's size) bytes, or null
    136   if no space is available.
    137 
    138   The returned pointer may or may not be the same as p. The algorithm
    139   prefers extending p in most cases when possible, otherwise it
    140   employs the equivalent of a malloc-copy-free sequence.
    141 
    142   If p is null, realloc is equivalent to malloc.
    143 
    144   If space is not available, realloc returns null, errno is set (if on
    145   ANSI) and p is NOT freed.
    146 
    147   if n is for fewer bytes than already held by p, the newly unused
    148   space is lopped off and freed if possible.  realloc with a size
    149   argument of zero (re)allocates a minimum-sized chunk.
    150 
    151   The old unix realloc convention of allowing the last-free'd chunk
    152   to be used as an argument to realloc is not supported.
    153 */
    154 void* dlrealloc(void*, size_t);
    155 
    156 /*
    157   realloc_in_place(void* p, size_t n)
    158   Resizes the space allocated for p to size n, only if this can be
    159   done without moving p (i.e., only if there is adjacent space
    160   available if n is greater than p's current allocated size, or n is
    161   less than or equal to p's size). This may be used instead of plain
    162   realloc if an alternative allocation strategy is needed upon failure
    163   to expand space; for example, reallocation of a buffer that must be
    164   memory-aligned or cleared. You can use realloc_in_place to trigger
    165   these alternatives only when needed.
    166 
    167   Returns p if successful; otherwise null.
    168 */
    169 void* dlrealloc_in_place(void*, size_t);
    170 
    171 /*
    172   memalign(size_t alignment, size_t n);
    173   Returns a pointer to a newly allocated chunk of n bytes, aligned
    174   in accord with the alignment argument.
    175 
    176   The alignment argument should be a power of two. If the argument is
    177   not a power of two, the nearest greater power is used.
    178   8-byte alignment is guaranteed by normal malloc calls, so don't
    179   bother calling memalign with an argument of 8 or less.
    180 
    181   Overreliance on memalign is a sure way to fragment space.
    182 */
    183 void* dlmemalign(size_t, size_t);
    184 
    185 /*
    186   int posix_memalign(void** pp, size_t alignment, size_t n);
    187   Allocates a chunk of n bytes, aligned in accord with the alignment
    188   argument. Differs from memalign only in that it (1) assigns the
    189   allocated memory to *pp rather than returning it, (2) fails and
    190   returns EINVAL if the alignment is not a power of two (3) fails and
    191   returns ENOMEM if memory cannot be allocated.
    192 */
    193 int dlposix_memalign(void**, size_t, size_t);
    194 
    195 /*
    196   valloc(size_t n);
    197   Equivalent to memalign(pagesize, n), where pagesize is the page
    198   size of the system. If the pagesize is unknown, 4096 is used.
    199 */
    200 void* dlvalloc(size_t);
    201 
    202 /*
    203   mallopt(int parameter_number, int parameter_value)
    204   Sets tunable parameters The format is to provide a
    205   (parameter-number, parameter-value) pair.  mallopt then sets the
    206   corresponding parameter to the argument value if it can (i.e., so
    207   long as the value is meaningful), and returns 1 if successful else
    208   0.  SVID/XPG/ANSI defines four standard param numbers for mallopt,
    209   normally defined in malloc.h.  None of these are use in this malloc,
    210   so setting them has no effect. But this malloc also supports other
    211   options in mallopt:
    212 
    213   Symbol            param #  default    allowed param values
    214   M_TRIM_THRESHOLD     -1   2*1024*1024   any   (-1U disables trimming)
    215   M_GRANULARITY        -2     page size   any power of 2 >= page size
    216   M_MMAP_THRESHOLD     -3      256*1024   any   (or 0 if no MMAP support)
    217 */
    218 int dlmallopt(int, int);
    219 
    220 #define M_TRIM_THRESHOLD     (-1)
    221 #define M_GRANULARITY        (-2)
    222 #define M_MMAP_THRESHOLD     (-3)
    223 
    224 
    225 /*
    226   malloc_footprint();
    227   Returns the number of bytes obtained from the system.  The total
    228   number of bytes allocated by malloc, realloc etc., is less than this
    229   value. Unlike mallinfo, this function returns only a precomputed
    230   result, so can be called frequently to monitor memory consumption.
    231   Even if locks are otherwise defined, this function does not use them,
    232   so results might not be up to date.
    233 */
    234 size_t dlmalloc_footprint(void);
    235 
    236 /*
    237   malloc_max_footprint();
    238   Returns the maximum number of bytes obtained from the system. This
    239   value will be greater than current footprint if deallocated space
    240   has been reclaimed by the system. The peak number of bytes allocated
    241   by malloc, realloc etc., is less than this value. Unlike mallinfo,
    242   this function returns only a precomputed result, so can be called
    243   frequently to monitor memory consumption.  Even if locks are
    244   otherwise defined, this function does not use them, so results might
    245   not be up to date.
    246 */
    247 size_t dlmalloc_max_footprint(void);
    248 
    249 /*
    250   malloc_footprint_limit();
    251   Returns the number of bytes that the heap is allowed to obtain from
    252   the system, returning the last value returned by
    253   malloc_set_footprint_limit, or the maximum size_t value if
    254   never set. The returned value reflects a permission. There is no
    255   guarantee that this number of bytes can actually be obtained from
    256   the system.
    257 */
    258 size_t dlmalloc_footprint_limit(void);
    259 
    260 /*
    261   malloc_set_footprint_limit();
    262   Sets the maximum number of bytes to obtain from the system, causing
    263   failure returns from malloc and related functions upon attempts to
    264   exceed this value. The argument value may be subject to page
    265   rounding to an enforceable limit; this actual value is returned.
    266   Using an argument of the maximum possible size_t effectively
    267   disables checks. If the argument is less than or equal to the
    268   current malloc_footprint, then all future allocations that require
    269   additional system memory will fail. However, invocation cannot
    270   retroactively deallocate existing used memory.
    271 */
    272 size_t dlmalloc_set_footprint_limit(size_t bytes);
    273 
    274 /*
    275   malloc_inspect_all(void(*handler)(void *start,
    276                                     void *end,
    277                                     size_t used_bytes,
    278                                     void* callback_arg),
    279                       void* arg);
    280   Traverses the heap and calls the given handler for each managed
    281   region, skipping all bytes that are (or may be) used for bookkeeping
    282   purposes.  Traversal does not include include chunks that have been
    283   directly memory mapped. Each reported region begins at the start
    284   address, and continues up to but not including the end address.  The
    285   first used_bytes of the region contain allocated data. If
    286   used_bytes is zero, the region is unallocated. The handler is
    287   invoked with the given callback argument. If locks are defined, they
    288   are held during the entire traversal. It is a bad idea to invoke
    289   other malloc functions from within the handler.
    290 
    291   For example, to count the number of in-use chunks with size greater
    292   than 1000, you could write:
    293   static int count = 0;
    294   void count_chunks(void* start, void* end, size_t used, void* arg) {
    295     if (used >= 1000) ++count;
    296   }
    297   then:
    298     malloc_inspect_all(count_chunks, NULL);
    299 
    300   malloc_inspect_all is compiled only if MALLOC_INSPECT_ALL is defined.
    301 */
    302 void dlmalloc_inspect_all(void(*handler)(void*, void *, size_t, void*),
    303                            void* arg);
    304 
    305 #if !NO_MALLINFO
    306 /*
    307   mallinfo()
    308   Returns (by copy) a struct containing various summary statistics:
    309 
    310   arena:     current total non-mmapped bytes allocated from system
    311   ordblks:   the number of free chunks
    312   smblks:    always zero.
    313   hblks:     current number of mmapped regions
    314   hblkhd:    total bytes held in mmapped regions
    315   usmblks:   the maximum total allocated space. This will be greater
    316                 than current total if trimming has occurred.
    317   fsmblks:   always zero
    318   uordblks:  current total allocated space (normal or mmapped)
    319   fordblks:  total free space
    320   keepcost:  the maximum number of bytes that could ideally be released
    321                back to system via malloc_trim. ("ideally" means that
    322                it ignores page restrictions etc.)
    323 
    324   Because these fields are ints, but internal bookkeeping may
    325   be kept as longs, the reported values may wrap around zero and
    326   thus be inaccurate.
    327 */
    328 
    329 struct mallinfo dlmallinfo(void);
    330 #endif  /* NO_MALLINFO */
    331 
    332 /*
    333   independent_calloc(size_t n_elements, size_t element_size, void* chunks[]);
    334 
    335   independent_calloc is similar to calloc, but instead of returning a
    336   single cleared space, it returns an array of pointers to n_elements
    337   independent elements that can hold contents of size elem_size, each
    338   of which starts out cleared, and can be independently freed,
    339   realloc'ed etc. The elements are guaranteed to be adjacently
    340   allocated (this is not guaranteed to occur with multiple callocs or
    341   mallocs), which may also improve cache locality in some
    342   applications.
    343 
    344   The "chunks" argument is optional (i.e., may be null, which is
    345   probably the most typical usage). If it is null, the returned array
    346   is itself dynamically allocated and should also be freed when it is
    347   no longer needed. Otherwise, the chunks array must be of at least
    348   n_elements in length. It is filled in with the pointers to the
    349   chunks.
    350 
    351   In either case, independent_calloc returns this pointer array, or
    352   null if the allocation failed.  If n_elements is zero and "chunks"
    353   is null, it returns a chunk representing an array with zero elements
    354   (which should be freed if not wanted).
    355 
    356   Each element must be freed when it is no longer needed. This can be
    357   done all at once using bulk_free.
    358 
    359   independent_calloc simplifies and speeds up implementations of many
    360   kinds of pools.  It may also be useful when constructing large data
    361   structures that initially have a fixed number of fixed-sized nodes,
    362   but the number is not known at compile time, and some of the nodes
    363   may later need to be freed. For example:
    364 
    365   struct Node { int item; struct Node* next; };
    366 
    367   struct Node* build_list() {
    368     struct Node** pool;
    369     int n = read_number_of_nodes_needed();
    370     if (n <= 0) return 0;
    371     pool = (struct Node**)(independent_calloc(n, sizeof(struct Node), 0);
    372     if (pool == 0) die();
    373     // organize into a linked list...
    374     struct Node* first = pool[0];
    375     for (i = 0; i < n-1; ++i)
    376       pool[i]->next = pool[i+1];
    377     free(pool);     // Can now free the array (or not, if it is needed later)
    378     return first;
    379   }
    380 */
    381 void** dlindependent_calloc(size_t, size_t, void**);
    382 
    383 /*
    384   independent_comalloc(size_t n_elements, size_t sizes[], void* chunks[]);
    385 
    386   independent_comalloc allocates, all at once, a set of n_elements
    387   chunks with sizes indicated in the "sizes" array.    It returns
    388   an array of pointers to these elements, each of which can be
    389   independently freed, realloc'ed etc. The elements are guaranteed to
    390   be adjacently allocated (this is not guaranteed to occur with
    391   multiple callocs or mallocs), which may also improve cache locality
    392   in some applications.
    393 
    394   The "chunks" argument is optional (i.e., may be null). If it is null
    395   the returned array is itself dynamically allocated and should also
    396   be freed when it is no longer needed. Otherwise, the chunks array
    397   must be of at least n_elements in length. It is filled in with the
    398   pointers to the chunks.
    399 
    400   In either case, independent_comalloc returns this pointer array, or
    401   null if the allocation failed.  If n_elements is zero and chunks is
    402   null, it returns a chunk representing an array with zero elements
    403   (which should be freed if not wanted).
    404 
    405   Each element must be freed when it is no longer needed. This can be
    406   done all at once using bulk_free.
    407 
    408   independent_comallac differs from independent_calloc in that each
    409   element may have a different size, and also that it does not
    410   automatically clear elements.
    411 
    412   independent_comalloc can be used to speed up allocation in cases
    413   where several structs or objects must always be allocated at the
    414   same time.  For example:
    415 
    416   struct Head { ... }
    417   struct Foot { ... }
    418 
    419   void send_message(char* msg) {
    420     int msglen = strlen(msg);
    421     size_t sizes[3] = { sizeof(struct Head), msglen, sizeof(struct Foot) };
    422     void* chunks[3];
    423     if (independent_comalloc(3, sizes, chunks) == 0)
    424       die();
    425     struct Head* head = (struct Head*)(chunks[0]);
    426     char*        body = (char*)(chunks[1]);
    427     struct Foot* foot = (struct Foot*)(chunks[2]);
    428     // ...
    429   }
    430 
    431   In general though, independent_comalloc is worth using only for
    432   larger values of n_elements. For small values, you probably won't
    433   detect enough difference from series of malloc calls to bother.
    434 
    435   Overuse of independent_comalloc can increase overall memory usage,
    436   since it cannot reuse existing noncontiguous small chunks that
    437   might be available for some of the elements.
    438 */
    439 void** dlindependent_comalloc(size_t, size_t*, void**);
    440 
    441 /*
    442   bulk_free(void* array[], size_t n_elements)
    443   Frees and clears (sets to null) each non-null pointer in the given
    444   array.  This is likely to be faster than freeing them one-by-one.
    445   If footers are used, pointers that have been allocated in different
    446   mspaces are not freed or cleared, and the count of all such pointers
    447   is returned.  For large arrays of pointers with poor locality, it
    448   may be worthwhile to sort this array before calling bulk_free.
    449 */
    450 size_t  dlbulk_free(void**, size_t n_elements);
    451 
    452 /*
    453   pvalloc(size_t n);
    454   Equivalent to valloc(minimum-page-that-holds(n)), that is,
    455   round up n to nearest pagesize.
    456  */
    457 void*  dlpvalloc(size_t);
    458 
    459 /*
    460   malloc_trim(size_t pad);
    461 
    462   If possible, gives memory back to the system (via negative arguments
    463   to sbrk) if there is unused memory at the `high' end of the malloc
    464   pool or in unused MMAP segments. You can call this after freeing
    465   large blocks of memory to potentially reduce the system-level memory
    466   requirements of a program. However, it cannot guarantee to reduce
    467   memory. Under some allocation patterns, some large free blocks of
    468   memory will be locked between two used chunks, so they cannot be
    469   given back to the system.
    470 
    471   The `pad' argument to malloc_trim represents the amount of free
    472   trailing space to leave untrimmed. If this argument is zero, only
    473   the minimum amount of memory to maintain internal data structures
    474   will be left. Non-zero arguments can be supplied to maintain enough
    475   trailing space to service future expected allocations without having
    476   to re-obtain memory from the system.
    477 
    478   Malloc_trim returns 1 if it actually released any memory, else 0.
    479 */
    480 int  dlmalloc_trim(size_t);
    481 
    482 /*
    483   malloc_stats();
    484   Prints on stderr the amount of space obtained from the system (both
    485   via sbrk and mmap), the maximum amount (which may be more than
    486   current if malloc_trim and/or munmap got called), and the current
    487   number of bytes allocated via malloc (or realloc, etc) but not yet
    488   freed. Note that this is the number of bytes allocated, not the
    489   number requested. It will be larger than the number requested
    490   because of alignment and bookkeeping overhead. Because it includes
    491   alignment wastage as being in use, this figure may be greater than
    492   zero even when no user-level chunks are allocated.
    493 
    494   The reported current and maximum system memory can be inaccurate if
    495   a program makes other calls to system memory allocation functions
    496   (normally sbrk) outside of malloc.
    497 
    498   malloc_stats prints only the most commonly interesting statistics.
    499   More information can be obtained by calling mallinfo.
    500 
    501   malloc_stats is not compiled if NO_MALLOC_STATS is defined.
    502 */
    503 void  dlmalloc_stats(void);
    504 
    505 #endif /* !ONLY_MSPACES */
    506 
    507 /*
    508   malloc_usable_size(void* p);
    509 
    510   Returns the number of bytes you can actually use in
    511   an allocated chunk, which may be more than you requested (although
    512   often not) due to alignment and minimum size constraints.
    513   You can use this many bytes without worrying about
    514   overwriting other allocated objects. This is not a particularly great
    515   programming practice. malloc_usable_size can be more useful in
    516   debugging and assertions, for example:
    517 
    518   p = malloc(n);
    519   assert(malloc_usable_size(p) >= 256);
    520 */
    521 size_t dlmalloc_usable_size(const void*);
    522 
    523 #if MSPACES
    524 
    525 /*
    526   mspace is an opaque type representing an independent
    527   region of space that supports mspace_malloc, etc.
    528 */
    529 typedef void* mspace;
    530 
    531 /*
    532   create_mspace creates and returns a new independent space with the
    533   given initial capacity, or, if 0, the default granularity size.  It
    534   returns null if there is no system memory available to create the
    535   space.  If argument locked is non-zero, the space uses a separate
    536   lock to control access. The capacity of the space will grow
    537   dynamically as needed to service mspace_malloc requests.  You can
    538   control the sizes of incremental increases of this space by
    539   compiling with a different DEFAULT_GRANULARITY or dynamically
    540   setting with mallopt(M_GRANULARITY, value).
    541 */
    542 mspace create_mspace(size_t capacity, int locked);
    543 
    544 /*
    545   destroy_mspace destroys the given space, and attempts to return all
    546   of its memory back to the system, returning the total number of
    547   bytes freed. After destruction, the results of access to all memory
    548   used by the space become undefined.
    549 */
    550 size_t destroy_mspace(mspace msp);
    551 
    552 /*
    553   create_mspace_with_base uses the memory supplied as the initial base
    554   of a new mspace. Part (less than 128*sizeof(size_t) bytes) of this
    555   space is used for bookkeeping, so the capacity must be at least this
    556   large. (Otherwise 0 is returned.) When this initial space is
    557   exhausted, additional memory will be obtained from the system.
    558   Destroying this space will deallocate all additionally allocated
    559   space (if possible) but not the initial base.
    560 */
    561 mspace create_mspace_with_base(void* base, size_t capacity, int locked);
    562 
    563 /*
    564   mspace_track_large_chunks controls whether requests for large chunks
    565   are allocated in their own untracked mmapped regions, separate from
    566   others in this mspace. By default large chunks are not tracked,
    567   which reduces fragmentation. However, such chunks are not
    568   necessarily released to the system upon destroy_mspace.  Enabling
    569   tracking by setting to true may increase fragmentation, but avoids
    570   leakage when relying on destroy_mspace to release all memory
    571   allocated using this space.  The function returns the previous
    572   setting.
    573 */
    574 int mspace_track_large_chunks(mspace msp, int enable);
    575 
    576 #if !NO_MALLINFO
    577 /*
    578   mspace_mallinfo behaves as mallinfo, but reports properties of
    579   the given space.
    580 */
    581 struct mallinfo mspace_mallinfo(mspace msp);
    582 #endif /* NO_MALLINFO */
    583 
    584 /*
    585   An alias for mallopt.
    586 */
    587 int mspace_mallopt(int, int);
    588 
    589 /*
    590   The following operate identically to their malloc counterparts
    591   but operate only for the given mspace argument
    592 */
    593 void* mspace_malloc(mspace msp, size_t bytes);
    594 void mspace_free(mspace msp, void* mem);
    595 void* mspace_calloc(mspace msp, size_t n_elements, size_t elem_size);
    596 void* mspace_realloc(mspace msp, void* mem, size_t newsize);
    597 void* mspace_realloc_in_place(mspace msp, void* mem, size_t newsize);
    598 void* mspace_memalign(mspace msp, size_t alignment, size_t bytes);
    599 void** mspace_independent_calloc(mspace msp, size_t n_elements,
    600                                  size_t elem_size, void* chunks[]);
    601 void** mspace_independent_comalloc(mspace msp, size_t n_elements,
    602                                    size_t sizes[], void* chunks[]);
    603 size_t mspace_bulk_free(mspace msp, void**, size_t n_elements);
    604 size_t mspace_usable_size(const void* mem);
    605 void mspace_malloc_stats(mspace msp);
    606 int mspace_trim(mspace msp, size_t pad);
    607 size_t mspace_footprint(mspace msp);
    608 size_t mspace_max_footprint(mspace msp);
    609 size_t mspace_footprint_limit(mspace msp);
    610 size_t mspace_set_footprint_limit(mspace msp, size_t bytes);
    611 void mspace_inspect_all(mspace msp,
    612                         void(*handler)(void *, void *, size_t, void*),
    613                         void* arg);
    614 #endif  /* MSPACES */
    615 
    616 #ifdef __cplusplus
    617 };  /* end of extern "C" */
    618 #endif
    619 
    620 #endif /* MALLOC_280_H */
    621