Home | History | Annotate | Download | only in internal
      1 /******************************************************************************/
      2 #ifdef JEMALLOC_H_TYPES
      3 
      4 typedef struct tcache_bin_stats_s tcache_bin_stats_t;
      5 typedef struct malloc_bin_stats_s malloc_bin_stats_t;
      6 typedef struct malloc_large_stats_s malloc_large_stats_t;
      7 typedef struct malloc_huge_stats_s malloc_huge_stats_t;
      8 typedef struct arena_stats_s arena_stats_t;
      9 typedef struct chunk_stats_s chunk_stats_t;
     10 
     11 #endif /* JEMALLOC_H_TYPES */
     12 /******************************************************************************/
     13 #ifdef JEMALLOC_H_STRUCTS
     14 
     15 struct tcache_bin_stats_s {
     16 	/*
     17 	 * Number of allocation requests that corresponded to the size of this
     18 	 * bin.
     19 	 */
     20 	uint64_t	nrequests;
     21 };
     22 
     23 struct malloc_bin_stats_s {
     24 	/*
     25 	 * Total number of allocation/deallocation requests served directly by
     26 	 * the bin.  Note that tcache may allocate an object, then recycle it
     27 	 * many times, resulting many increments to nrequests, but only one
     28 	 * each to nmalloc and ndalloc.
     29 	 */
     30 	uint64_t	nmalloc;
     31 	uint64_t	ndalloc;
     32 
     33 	/*
     34 	 * Number of allocation requests that correspond to the size of this
     35 	 * bin.  This includes requests served by tcache, though tcache only
     36 	 * periodically merges into this counter.
     37 	 */
     38 	uint64_t	nrequests;
     39 
     40 	/*
     41 	 * Current number of regions of this size class, including regions
     42 	 * currently cached by tcache.
     43 	 */
     44 	size_t		curregs;
     45 
     46 	/* Number of tcache fills from this bin. */
     47 	uint64_t	nfills;
     48 
     49 	/* Number of tcache flushes to this bin. */
     50 	uint64_t	nflushes;
     51 
     52 	/* Total number of runs created for this bin's size class. */
     53 	uint64_t	nruns;
     54 
     55 	/*
     56 	 * Total number of runs reused by extracting them from the runs tree for
     57 	 * this bin's size class.
     58 	 */
     59 	uint64_t	reruns;
     60 
     61 	/* Current number of runs in this bin. */
     62 	size_t		curruns;
     63 };
     64 
     65 struct malloc_large_stats_s {
     66 	/*
     67 	 * Total number of allocation/deallocation requests served directly by
     68 	 * the arena.  Note that tcache may allocate an object, then recycle it
     69 	 * many times, resulting many increments to nrequests, but only one
     70 	 * each to nmalloc and ndalloc.
     71 	 */
     72 	uint64_t	nmalloc;
     73 	uint64_t	ndalloc;
     74 
     75 	/*
     76 	 * Number of allocation requests that correspond to this size class.
     77 	 * This includes requests served by tcache, though tcache only
     78 	 * periodically merges into this counter.
     79 	 */
     80 	uint64_t	nrequests;
     81 
     82 	/*
     83 	 * Current number of runs of this size class, including runs currently
     84 	 * cached by tcache.
     85 	 */
     86 	size_t		curruns;
     87 };
     88 
     89 struct malloc_huge_stats_s {
     90 	/*
     91 	 * Total number of allocation/deallocation requests served directly by
     92 	 * the arena.
     93 	 */
     94 	uint64_t	nmalloc;
     95 	uint64_t	ndalloc;
     96 
     97 	/* Current number of (multi-)chunk allocations of this size class. */
     98 	size_t		curhchunks;
     99 };
    100 
    101 struct arena_stats_s {
    102 	/* Number of bytes currently mapped. */
    103 	size_t		mapped;
    104 
    105 	/*
    106 	 * Total number of purge sweeps, total number of madvise calls made,
    107 	 * and total pages purged in order to keep dirty unused memory under
    108 	 * control.
    109 	 */
    110 	uint64_t	npurge;
    111 	uint64_t	nmadvise;
    112 	uint64_t	purged;
    113 
    114 	/*
    115 	 * Number of bytes currently mapped purely for metadata purposes, and
    116 	 * number of bytes currently allocated for internal metadata.
    117 	 */
    118 	size_t		metadata_mapped;
    119 	size_t		metadata_allocated; /* Protected via atomic_*_z(). */
    120 
    121 	/* Per-size-category statistics. */
    122 	size_t		allocated_large;
    123 	uint64_t	nmalloc_large;
    124 	uint64_t	ndalloc_large;
    125 	uint64_t	nrequests_large;
    126 
    127 	size_t		allocated_huge;
    128 	uint64_t	nmalloc_huge;
    129 	uint64_t	ndalloc_huge;
    130 
    131 	/* One element for each large size class. */
    132 	malloc_large_stats_t	*lstats;
    133 
    134 	/* One element for each huge size class. */
    135 	malloc_huge_stats_t	*hstats;
    136 };
    137 
    138 #endif /* JEMALLOC_H_STRUCTS */
    139 /******************************************************************************/
    140 #ifdef JEMALLOC_H_EXTERNS
    141 
    142 extern bool	opt_stats_print;
    143 
    144 extern size_t	stats_cactive;
    145 
    146 void	stats_print(void (*write)(void *, const char *), void *cbopaque,
    147     const char *opts);
    148 
    149 #endif /* JEMALLOC_H_EXTERNS */
    150 /******************************************************************************/
    151 #ifdef JEMALLOC_H_INLINES
    152 
    153 #ifndef JEMALLOC_ENABLE_INLINE
    154 size_t	stats_cactive_get(void);
    155 void	stats_cactive_add(size_t size);
    156 void	stats_cactive_sub(size_t size);
    157 #endif
    158 
    159 #if (defined(JEMALLOC_ENABLE_INLINE) || defined(JEMALLOC_STATS_C_))
    160 JEMALLOC_INLINE size_t
    161 stats_cactive_get(void)
    162 {
    163 
    164 	return (atomic_read_z(&stats_cactive));
    165 }
    166 
    167 JEMALLOC_INLINE void
    168 stats_cactive_add(size_t size)
    169 {
    170 	UNUSED size_t cactive;
    171 
    172 	assert(size > 0);
    173 	assert((size & chunksize_mask) == 0);
    174 
    175 	cactive = atomic_add_z(&stats_cactive, size);
    176 	assert(cactive - size < cactive);
    177 }
    178 
    179 JEMALLOC_INLINE void
    180 stats_cactive_sub(size_t size)
    181 {
    182 	UNUSED size_t cactive;
    183 
    184 	assert(size > 0);
    185 	assert((size & chunksize_mask) == 0);
    186 
    187 	cactive = atomic_sub_z(&stats_cactive, size);
    188 	assert(cactive + size > cactive);
    189 }
    190 #endif
    191 
    192 #endif /* JEMALLOC_H_INLINES */
    193 /******************************************************************************/
    194