Home | History | Annotate | Download | only in internal
      1 /******************************************************************************/
      2 #ifdef JEMALLOC_H_TYPES
      3 
      4 /*
      5  * Size and alignment of memory chunks that are allocated by the OS's virtual
      6  * memory system.
      7  */
      8 #ifdef ANDROID_LG_CHUNK_DEFAULT
      9 #define	LG_CHUNK_DEFAULT	ANDROID_LG_CHUNK_DEFAULT
     10 #else
     11 #define	LG_CHUNK_DEFAULT	21
     12 #endif
     13 
     14 /* Return the chunk address for allocation address a. */
     15 #define	CHUNK_ADDR2BASE(a)						\
     16 	((void *)((uintptr_t)(a) & ~chunksize_mask))
     17 
     18 /* Return the chunk offset of address a. */
     19 #define	CHUNK_ADDR2OFFSET(a)						\
     20 	((size_t)((uintptr_t)(a) & chunksize_mask))
     21 
     22 /* Return the smallest chunk multiple that is >= s. */
     23 #define	CHUNK_CEILING(s)						\
     24 	(((s) + chunksize_mask) & ~chunksize_mask)
     25 
     26 #define	CHUNK_HOOKS_INITIALIZER {					\
     27     NULL,								\
     28     NULL,								\
     29     NULL,								\
     30     NULL,								\
     31     NULL,								\
     32     NULL,								\
     33     NULL								\
     34 }
     35 
     36 #endif /* JEMALLOC_H_TYPES */
     37 /******************************************************************************/
     38 #ifdef JEMALLOC_H_STRUCTS
     39 
     40 #endif /* JEMALLOC_H_STRUCTS */
     41 /******************************************************************************/
     42 #ifdef JEMALLOC_H_EXTERNS
     43 
     44 extern size_t		opt_lg_chunk;
     45 extern const char	*opt_dss;
     46 
     47 extern rtree_t		chunks_rtree;
     48 
     49 extern size_t		chunksize;
     50 extern size_t		chunksize_mask; /* (chunksize - 1). */
     51 extern size_t		chunk_npages;
     52 
     53 extern const chunk_hooks_t	chunk_hooks_default;
     54 
     55 chunk_hooks_t	chunk_hooks_get(arena_t *arena);
     56 chunk_hooks_t	chunk_hooks_set(arena_t *arena,
     57     const chunk_hooks_t *chunk_hooks);
     58 
     59 bool	chunk_register(const void *chunk, const extent_node_t *node);
     60 void	chunk_deregister(const void *chunk, const extent_node_t *node);
     61 void	*chunk_alloc_base(size_t size);
     62 void	*chunk_alloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
     63     void *new_addr, size_t size, size_t alignment, bool *zero,
     64     bool dalloc_node);
     65 void	*chunk_alloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
     66     void *new_addr, size_t size, size_t alignment, bool *zero, bool *commit);
     67 void	chunk_dalloc_cache(arena_t *arena, chunk_hooks_t *chunk_hooks,
     68     void *chunk, size_t size, bool committed);
     69 void	chunk_dalloc_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
     70     void *chunk, size_t size, bool zeroed, bool committed);
     71 bool	chunk_purge_wrapper(arena_t *arena, chunk_hooks_t *chunk_hooks,
     72     void *chunk, size_t size, size_t offset, size_t length);
     73 bool	chunk_boot(void);
     74 void	chunk_prefork(void);
     75 void	chunk_postfork_parent(void);
     76 void	chunk_postfork_child(void);
     77 
     78 #endif /* JEMALLOC_H_EXTERNS */
     79 /******************************************************************************/
     80 #ifdef JEMALLOC_H_INLINES
     81 
     82 #ifndef JEMALLOC_ENABLE_INLINE
     83 extent_node_t	*chunk_lookup(const void *chunk, bool dependent);
     84 #endif
     85 
     86 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_CHUNK_C_))
     87 JEMALLOC_INLINE extent_node_t *
     88 chunk_lookup(const void *ptr, bool dependent)
     89 {
     90 
     91 	return (rtree_get(&chunks_rtree, (uintptr_t)ptr, dependent));
     92 }
     93 #endif
     94 
     95 #endif /* JEMALLOC_H_INLINES */
     96 /******************************************************************************/
     97 
     98 #include "jemalloc/internal/chunk_dss.h"
     99 #include "jemalloc/internal/chunk_mmap.h"
    100