Home | History | Annotate | Download | only in internal
      1 #ifndef JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
      2 #define JEMALLOC_INTERNAL_EXTENT_STRUCTS_H
      3 
      4 #include "jemalloc/internal/atomic.h"
      5 #include "jemalloc/internal/bitmap.h"
      6 #include "jemalloc/internal/mutex.h"
      7 #include "jemalloc/internal/ql.h"
      8 #include "jemalloc/internal/ph.h"
      9 #include "jemalloc/internal/size_classes.h"
     10 
     11 typedef enum {
     12 	extent_state_active   = 0,
     13 	extent_state_dirty    = 1,
     14 	extent_state_muzzy    = 2,
     15 	extent_state_retained = 3
     16 } extent_state_t;
     17 
     18 /* Extent (span of pages).  Use accessor functions for e_* fields. */
     19 struct extent_s {
     20 	/*
     21 	 * Bitfield containing several fields:
     22 	 *
     23 	 * a: arena_ind
     24 	 * b: slab
     25 	 * c: committed
     26 	 * d: dumpable
     27 	 * z: zeroed
     28 	 * t: state
     29 	 * i: szind
     30 	 * f: nfree
     31 	 * n: sn
     32 	 *
     33 	 * nnnnnnnn ... nnnnffff ffffffii iiiiiitt zdcbaaaa aaaaaaaa
     34 	 *
     35 	 * arena_ind: Arena from which this extent came, or all 1 bits if
     36 	 *            unassociated.
     37 	 *
     38 	 * slab: The slab flag indicates whether the extent is used for a slab
     39 	 *       of small regions.  This helps differentiate small size classes,
     40 	 *       and it indicates whether interior pointers can be looked up via
     41 	 *       iealloc().
     42 	 *
     43 	 * committed: The committed flag indicates whether physical memory is
     44 	 *            committed to the extent, whether explicitly or implicitly
     45 	 *            as on a system that overcommits and satisfies physical
     46 	 *            memory needs on demand via soft page faults.
     47 	 *
     48 	 * dumpable: The dumpable flag indicates whether or not we've set the
     49 	 *           memory in question to be dumpable.  Note that this
     50 	 *           interacts somewhat subtly with user-specified extent hooks,
     51 	 *           since we don't know if *they* are fiddling with
     52 	 *           dumpability (in which case, we don't want to undo whatever
     53 	 *           they're doing).  To deal with this scenario, we:
     54 	 *             - Make dumpable false only for memory allocated with the
     55 	 *               default hooks.
     56 	 *             - Only allow memory to go from non-dumpable to dumpable,
     57 	 *               and only once.
     58 	 *             - Never make the OS call to allow dumping when the
     59 	 *               dumpable bit is already set.
     60 	 *           These three constraints mean that we will never
     61 	 *           accidentally dump user memory that the user meant to set
     62 	 *           nondumpable with their extent hooks.
     63 	 *
     64 	 *
     65 	 * zeroed: The zeroed flag is used by extent recycling code to track
     66 	 *         whether memory is zero-filled.
     67 	 *
     68 	 * state: The state flag is an extent_state_t.
     69 	 *
     70 	 * szind: The szind flag indicates usable size class index for
     71 	 *        allocations residing in this extent, regardless of whether the
     72 	 *        extent is a slab.  Extent size and usable size often differ
     73 	 *        even for non-slabs, either due to sz_large_pad or promotion of
     74 	 *        sampled small regions.
     75 	 *
     76 	 * nfree: Number of free regions in slab.
     77 	 *
     78 	 * sn: Serial number (potentially non-unique).
     79 	 *
     80 	 *     Serial numbers may wrap around if !opt_retain, but as long as
     81 	 *     comparison functions fall back on address comparison for equal
     82 	 *     serial numbers, stable (if imperfect) ordering is maintained.
     83 	 *
     84 	 *     Serial numbers may not be unique even in the absence of
     85 	 *     wrap-around, e.g. when splitting an extent and assigning the same
     86 	 *     serial number to both resulting adjacent extents.
     87 	 */
     88 	uint64_t		e_bits;
     89 #define MASK(CURRENT_FIELD_WIDTH, CURRENT_FIELD_SHIFT) ((((((uint64_t)0x1U) << (CURRENT_FIELD_WIDTH)) - 1)) << (CURRENT_FIELD_SHIFT))
     90 
     91 #define EXTENT_BITS_ARENA_WIDTH  MALLOCX_ARENA_BITS
     92 #define EXTENT_BITS_ARENA_SHIFT  0
     93 #define EXTENT_BITS_ARENA_MASK  MASK(EXTENT_BITS_ARENA_WIDTH, EXTENT_BITS_ARENA_SHIFT)
     94 
     95 #define EXTENT_BITS_SLAB_WIDTH  1
     96 #define EXTENT_BITS_SLAB_SHIFT  (EXTENT_BITS_ARENA_WIDTH + EXTENT_BITS_ARENA_SHIFT)
     97 #define EXTENT_BITS_SLAB_MASK  MASK(EXTENT_BITS_SLAB_WIDTH, EXTENT_BITS_SLAB_SHIFT)
     98 
     99 #define EXTENT_BITS_COMMITTED_WIDTH  1
    100 #define EXTENT_BITS_COMMITTED_SHIFT  (EXTENT_BITS_SLAB_WIDTH + EXTENT_BITS_SLAB_SHIFT)
    101 #define EXTENT_BITS_COMMITTED_MASK  MASK(EXTENT_BITS_COMMITTED_WIDTH, EXTENT_BITS_COMMITTED_SHIFT)
    102 
    103 #define EXTENT_BITS_DUMPABLE_WIDTH  1
    104 #define EXTENT_BITS_DUMPABLE_SHIFT  (EXTENT_BITS_COMMITTED_WIDTH + EXTENT_BITS_COMMITTED_SHIFT)
    105 #define EXTENT_BITS_DUMPABLE_MASK  MASK(EXTENT_BITS_DUMPABLE_WIDTH, EXTENT_BITS_DUMPABLE_SHIFT)
    106 
    107 #define EXTENT_BITS_ZEROED_WIDTH  1
    108 #define EXTENT_BITS_ZEROED_SHIFT  (EXTENT_BITS_DUMPABLE_WIDTH + EXTENT_BITS_DUMPABLE_SHIFT)
    109 #define EXTENT_BITS_ZEROED_MASK  MASK(EXTENT_BITS_ZEROED_WIDTH, EXTENT_BITS_ZEROED_SHIFT)
    110 
    111 #define EXTENT_BITS_STATE_WIDTH  2
    112 #define EXTENT_BITS_STATE_SHIFT  (EXTENT_BITS_ZEROED_WIDTH + EXTENT_BITS_ZEROED_SHIFT)
    113 #define EXTENT_BITS_STATE_MASK  MASK(EXTENT_BITS_STATE_WIDTH, EXTENT_BITS_STATE_SHIFT)
    114 
    115 #define EXTENT_BITS_SZIND_WIDTH  LG_CEIL_NSIZES
    116 #define EXTENT_BITS_SZIND_SHIFT  (EXTENT_BITS_STATE_WIDTH + EXTENT_BITS_STATE_SHIFT)
    117 #define EXTENT_BITS_SZIND_MASK  MASK(EXTENT_BITS_SZIND_WIDTH, EXTENT_BITS_SZIND_SHIFT)
    118 
    119 #define EXTENT_BITS_NFREE_WIDTH  (LG_SLAB_MAXREGS + 1)
    120 #define EXTENT_BITS_NFREE_SHIFT  (EXTENT_BITS_SZIND_WIDTH + EXTENT_BITS_SZIND_SHIFT)
    121 #define EXTENT_BITS_NFREE_MASK  MASK(EXTENT_BITS_NFREE_WIDTH, EXTENT_BITS_NFREE_SHIFT)
    122 
    123 #define EXTENT_BITS_SN_SHIFT  (EXTENT_BITS_NFREE_WIDTH + EXTENT_BITS_NFREE_SHIFT)
    124 #define EXTENT_BITS_SN_MASK  (UINT64_MAX << EXTENT_BITS_SN_SHIFT)
    125 
    126 	/* Pointer to the extent that this structure is responsible for. */
    127 	void			*e_addr;
    128 
    129 	union {
    130 		/*
    131 		 * Extent size and serial number associated with the extent
    132 		 * structure (different than the serial number for the extent at
    133 		 * e_addr).
    134 		 *
    135 		 * ssssssss [...] ssssssss ssssnnnn nnnnnnnn
    136 		 */
    137 		size_t			e_size_esn;
    138 	#define EXTENT_SIZE_MASK	((size_t)~(PAGE-1))
    139 	#define EXTENT_ESN_MASK		((size_t)PAGE-1)
    140 		/* Base extent size, which may not be a multiple of PAGE. */
    141 		size_t			e_bsize;
    142 	};
    143 
    144 	/*
    145 	 * List linkage, used by a variety of lists:
    146 	 * - bin_t's slabs_full
    147 	 * - extents_t's LRU
    148 	 * - stashed dirty extents
    149 	 * - arena's large allocations
    150 	 */
    151 	ql_elm(extent_t)	ql_link;
    152 
    153 	/*
    154 	 * Linkage for per size class sn/address-ordered heaps, and
    155 	 * for extent_avail
    156 	 */
    157 	phn(extent_t)		ph_link;
    158 
    159 	union {
    160 		/* Small region slab metadata. */
    161 		arena_slab_data_t	e_slab_data;
    162 
    163 		/*
    164 		 * Profile counters, used for large objects.  Points to a
    165 		 * prof_tctx_t.
    166 		 */
    167 		atomic_p_t		e_prof_tctx;
    168 	};
    169 };
    170 typedef ql_head(extent_t) extent_list_t;
    171 typedef ph(extent_t) extent_tree_t;
    172 typedef ph(extent_t) extent_heap_t;
    173 
    174 /* Quantized collection of extents, with built-in LRU queue. */
    175 struct extents_s {
    176 	malloc_mutex_t		mtx;
    177 
    178 	/*
    179 	 * Quantized per size class heaps of extents.
    180 	 *
    181 	 * Synchronization: mtx.
    182 	 */
    183 	extent_heap_t		heaps[NPSIZES+1];
    184 
    185 	/*
    186 	 * Bitmap for which set bits correspond to non-empty heaps.
    187 	 *
    188 	 * Synchronization: mtx.
    189 	 */
    190 	bitmap_t		bitmap[BITMAP_GROUPS(NPSIZES+1)];
    191 
    192 	/*
    193 	 * LRU of all extents in heaps.
    194 	 *
    195 	 * Synchronization: mtx.
    196 	 */
    197 	extent_list_t		lru;
    198 
    199 	/*
    200 	 * Page sum for all extents in heaps.
    201 	 *
    202 	 * The synchronization here is a little tricky.  Modifications to npages
    203 	 * must hold mtx, but reads need not (though, a reader who sees npages
    204 	 * without holding the mutex can't assume anything about the rest of the
    205 	 * state of the extents_t).
    206 	 */
    207 	atomic_zu_t		npages;
    208 
    209 	/* All stored extents must be in the same state. */
    210 	extent_state_t		state;
    211 
    212 	/*
    213 	 * If true, delay coalescing until eviction; otherwise coalesce during
    214 	 * deallocation.
    215 	 */
    216 	bool			delay_coalesce;
    217 };
    218 
    219 #endif /* JEMALLOC_INTERNAL_EXTENT_STRUCTS_H */
    220