Home | History | Annotate | Download | only in unit
      1 #include "test/jemalloc_test.h"
      2 
      3 /*
      4  * Size class that is a divisor of the page size, ideally 4+ regions per run.
      5  */
      6 #if LG_PAGE <= 14
      7 #define SZ	(ZU(1) << (LG_PAGE - 2))
      8 #else
      9 #define SZ	ZU(4096)
     10 #endif
     11 
     12 /*
     13  * Number of slabs to consume at high water mark.  Should be at least 2 so that
     14  * if mmap()ed memory grows downward, downward growth of mmap()ed memory is
     15  * tested.
     16  */
     17 #define NSLABS	8
     18 
     19 static unsigned
     20 binind_compute(void) {
     21 	size_t sz;
     22 	unsigned nbins, i;
     23 
     24 	sz = sizeof(nbins);
     25 	assert_d_eq(mallctl("arenas.nbins", (void *)&nbins, &sz, NULL, 0), 0,
     26 	    "Unexpected mallctl failure");
     27 
     28 	for (i = 0; i < nbins; i++) {
     29 		size_t mib[4];
     30 		size_t miblen = sizeof(mib)/sizeof(size_t);
     31 		size_t size;
     32 
     33 		assert_d_eq(mallctlnametomib("arenas.bin.0.size", mib,
     34 		    &miblen), 0, "Unexpected mallctlnametomb failure");
     35 		mib[2] = (size_t)i;
     36 
     37 		sz = sizeof(size);
     38 		assert_d_eq(mallctlbymib(mib, miblen, (void *)&size, &sz, NULL,
     39 		    0), 0, "Unexpected mallctlbymib failure");
     40 		if (size == SZ) {
     41 			return i;
     42 		}
     43 	}
     44 
     45 	test_fail("Unable to compute nregs_per_run");
     46 	return 0;
     47 }
     48 
     49 static size_t
     50 nregs_per_run_compute(void) {
     51 	uint32_t nregs;
     52 	size_t sz;
     53 	unsigned binind = binind_compute();
     54 	size_t mib[4];
     55 	size_t miblen = sizeof(mib)/sizeof(size_t);
     56 
     57 	assert_d_eq(mallctlnametomib("arenas.bin.0.nregs", mib, &miblen), 0,
     58 	    "Unexpected mallctlnametomb failure");
     59 	mib[2] = (size_t)binind;
     60 	sz = sizeof(nregs);
     61 	assert_d_eq(mallctlbymib(mib, miblen, (void *)&nregs, &sz, NULL,
     62 	    0), 0, "Unexpected mallctlbymib failure");
     63 	return nregs;
     64 }
     65 
     66 static unsigned
     67 arenas_create_mallctl(void) {
     68 	unsigned arena_ind;
     69 	size_t sz;
     70 
     71 	sz = sizeof(arena_ind);
     72 	assert_d_eq(mallctl("arenas.create", (void *)&arena_ind, &sz, NULL, 0),
     73 	    0, "Error in arenas.create");
     74 
     75 	return arena_ind;
     76 }
     77 
     78 static void
     79 arena_reset_mallctl(unsigned arena_ind) {
     80 	size_t mib[3];
     81 	size_t miblen = sizeof(mib)/sizeof(size_t);
     82 
     83 	assert_d_eq(mallctlnametomib("arena.0.reset", mib, &miblen), 0,
     84 	    "Unexpected mallctlnametomib() failure");
     85 	mib[1] = (size_t)arena_ind;
     86 	assert_d_eq(mallctlbymib(mib, miblen, NULL, NULL, NULL, 0), 0,
     87 	    "Unexpected mallctlbymib() failure");
     88 }
     89 
     90 TEST_BEGIN(test_pack) {
     91 	bool prof_enabled;
     92 	size_t sz = sizeof(prof_enabled);
     93 	if (mallctl("opt.prof", (void *)&prof_enabled, &sz, NULL, 0) == 0) {
     94 		test_skip_if(prof_enabled);
     95 	}
     96 
     97 	unsigned arena_ind = arenas_create_mallctl();
     98 	size_t nregs_per_run = nregs_per_run_compute();
     99 	size_t nregs = nregs_per_run * NSLABS;
    100 	VARIABLE_ARRAY(void *, ptrs, nregs);
    101 	size_t i, j, offset;
    102 
    103 	/* Fill matrix. */
    104 	for (i = offset = 0; i < NSLABS; i++) {
    105 		for (j = 0; j < nregs_per_run; j++) {
    106 			void *p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
    107 			    MALLOCX_TCACHE_NONE);
    108 			assert_ptr_not_null(p,
    109 			    "Unexpected mallocx(%zu, MALLOCX_ARENA(%u) |"
    110 			    " MALLOCX_TCACHE_NONE) failure, run=%zu, reg=%zu",
    111 			    SZ, arena_ind, i, j);
    112 			ptrs[(i * nregs_per_run) + j] = p;
    113 		}
    114 	}
    115 
    116 	/*
    117 	 * Free all but one region of each run, but rotate which region is
    118 	 * preserved, so that subsequent allocations exercise the within-run
    119 	 * layout policy.
    120 	 */
    121 	offset = 0;
    122 	for (i = offset = 0;
    123 	    i < NSLABS;
    124 	    i++, offset = (offset + 1) % nregs_per_run) {
    125 		for (j = 0; j < nregs_per_run; j++) {
    126 			void *p = ptrs[(i * nregs_per_run) + j];
    127 			if (offset == j) {
    128 				continue;
    129 			}
    130 			dallocx(p, MALLOCX_ARENA(arena_ind) |
    131 			    MALLOCX_TCACHE_NONE);
    132 		}
    133 	}
    134 
    135 	/*
    136 	 * Logically refill matrix, skipping preserved regions and verifying
    137 	 * that the matrix is unmodified.
    138 	 */
    139 	offset = 0;
    140 	for (i = offset = 0;
    141 	    i < NSLABS;
    142 	    i++, offset = (offset + 1) % nregs_per_run) {
    143 		for (j = 0; j < nregs_per_run; j++) {
    144 			void *p;
    145 
    146 			if (offset == j) {
    147 				continue;
    148 			}
    149 			p = mallocx(SZ, MALLOCX_ARENA(arena_ind) |
    150 			    MALLOCX_TCACHE_NONE);
    151 			assert_ptr_eq(p, ptrs[(i * nregs_per_run) + j],
    152 			    "Unexpected refill discrepancy, run=%zu, reg=%zu\n",
    153 			    i, j);
    154 		}
    155 	}
    156 
    157 	/* Clean up. */
    158 	arena_reset_mallctl(arena_ind);
    159 }
    160 TEST_END
    161 
    162 int
    163 main(void) {
    164 	return test(
    165 	    test_pack);
    166 }
    167