Home | History | Annotate | Download | only in tests
      1 #include <time.h>
      2 #include <stdlib.h>
      3 #include <stdint.h>
      4 #include <assert.h>
      5 #include <string.h>
      6 #include <stdio.h>
      7 
      8 #include "../memcheck.h"
      9 
     10 // Test VALGRIND_CREATE_MEMPOOL_EXT features, the VALGRIND_MEMPOOL_METAPOOL and
     11 // VALGRIND_MEMPOOL_AUTO_FREE flags.
     12 // Also show that without these, having a custom allocator that:
     13 // - Allocates a MEMPOOL
     14 // - Uses ITSELF to get large blocks to populate the pool (so these are marked
     15 //   as MALLOCLIKE blocks)
     16 // - Then passes out MALLOCLIKE blocks out of these pool blocks
     17 // Was not previously supported by the 'loose model' for mempools in memcheck
     18 // because it spotted these (correctly) as overlapping blocks (test case 3
     19 // below).
     20 // The VALGRIND_MEMPOOL_METAPOOL says not to treat these as overlaps.
     21 //
     22 // Also, when one of these metapool blocks is freed, memcheck will not auto-free
     23 // the MALLOCLIKE blocks allocated from the meta-pool, and report them as leaks.
     24 // When VALGRIND_MEMPOOL_AUTO_FREE is passed, no such leaks are reported.
     25 // This is for custom allocators that destroy a pool without freeing the objects
     26 // allocated from it, because that is the defined behaviour of the allocator.
     27 
     28 struct pool
     29 {
     30   size_t allocated;
     31   size_t used;
     32   uint8_t *buf;
     33 };
     34 
     35 struct cell
     36 {
     37   struct cell *next;
     38   char x[16 - sizeof(void*)];
     39 };
     40 
     41 static struct pool _PlainPool, *PlainPool = &_PlainPool;
     42 static struct pool _MetaPool,  *MetaPool  = &_MetaPool;
     43 
     44 #define N 10
     45 #define POOL_BLOCK_SIZE   4096
     46 #define NOISE_SIZE        256
     47 
     48 // For easy testing, the plain mempool uses N allocations, the
     49 // metapool 2 * N (so 10 reported leaks are from the plain pool, 20 must be
     50 // from the metapool).
     51 
     52 static int    MetaPoolFlags = 0;
     53 static int    CleanupBeforeExit = 0;
     54 static int    GenerateNoise = 0;
     55 static int    NoiseCounter = 0;
     56 
     57 static struct cell *cells_plain[2 * N];
     58 static struct cell *cells_meta[2 * N];
     59 
     60 static unsigned char *noise[3 * N];
     61 
     62 static char   PlainBlock[POOL_BLOCK_SIZE];
     63 static char   MetaBlock[POOL_BLOCK_SIZE];
     64 
     65 void create_meta_pool (void)
     66 {
     67    VALGRIND_CREATE_MEMPOOL_EXT(MetaPool, 0, 0, MetaPoolFlags);
     68    VALGRIND_MEMPOOL_ALLOC(MetaPool, MetaBlock, POOL_BLOCK_SIZE);
     69 
     70    MetaPool->buf = (uint8_t *) MetaBlock;
     71    MetaPool->allocated = POOL_BLOCK_SIZE;
     72    MetaPool->used = 0;
     73 
     74    /* A pool-block is expected to have metadata, and the core of
     75       valgrind sees a MALLOCLIKE_BLOCK that starts at the same address
     76       as a MEMPOOLBLOCK as a MEMPOOLBLOCK, hence never as a leak.
     77       Introduce such some simulated metadata.
     78    */
     79 
     80    MetaPool->buf  += sizeof(uint8_t);
     81    MetaPool->used += sizeof(uint8_t);
     82 }
     83 
     84 static void create_plain_pool (void)
     85 {
     86    VALGRIND_CREATE_MEMPOOL(PlainPool, 0, 0);
     87 
     88    PlainPool->buf = (uint8_t *) PlainBlock;
     89    PlainPool->allocated = POOL_BLOCK_SIZE;
     90    PlainPool->used = 0;
     91 
     92    /* Same overhead */
     93    PlainPool->buf  += sizeof(uint8_t);
     94    PlainPool->used += sizeof(uint8_t);
     95 }
     96 
     97 static void *allocate_meta_style (struct pool *p, size_t n)
     98 {
     99   void *a = p->buf + p->used;
    100   assert(p->used + n < p->allocated);
    101 
    102   // Simulate a custom allocator that allocates memory either directly for
    103   // the application or for a custom memory pool: All are marked as MALLOCLIKE.
    104   VALGRIND_MALLOCLIKE_BLOCK(a, n, 0, 0);
    105   p->used += n;
    106 
    107   return a;
    108 }
    109 
    110 static void *allocate_plain_style (struct pool *p, size_t n)
    111 {
    112   void *a = p->buf + p->used;
    113   assert(p->used + n < p->allocated);
    114 
    115   // And this is custom allocator that knows that it is allocating from a pool.
    116   VALGRIND_MEMPOOL_ALLOC(p, a, n);
    117   p->used += n;
    118 
    119   return a;
    120 }
    121 
    122 /* flags */
    123 
    124 static void set_flags ( int n )
    125 {
    126   switch (n) {
    127      // Case 0: No special flags. VALGRIND_CREATE_MEMPOOL_EXT is same as
    128      // VALGRIND_CREATE_MEMPOOL.
    129      // When mempools are destroyed, the METAPOOL leaks because auto-free is
    130      // missing. Must show 2*N (20) leaks.
    131      // The VALGRIND_MEMPOOL_ALLOC items from the plain pool are automatically
    132      // destroyed. CleanupBeforeExit means the metapool is freed and destroyed
    133      // (simulating an app that cleans up before it exits), and when false it
    134      // simply exits with the pool unaltered.
    135      case 0:
    136         MetaPoolFlags     = 0;
    137         CleanupBeforeExit = 1;
    138         break;
    139 
    140      // Case 1: VALGRIND_MEMPOOL_METAPOOL, no auto-free.
    141      // Without explicit free, these MALLOCLIKE_BLOCK blocks are considered
    142      // leaks. So this case should show same as case 0: 20 leaks.
    143      case 1:
    144         MetaPoolFlags     = VALGRIND_MEMPOOL_METAPOOL;
    145         CleanupBeforeExit = 1;
    146         break;
    147 
    148      // Same as before, but now the MALLOCLIKE blocks are auto-freed.
    149      // Must show 0 leaks.
    150      case 2:
    151         MetaPoolFlags = VALGRIND_MEMPOOL_METAPOOL | VALGRIND_MEMPOOL_AUTO_FREE;
    152         CleanupBeforeExit = 1;
    153         break;
    154 
    155      case 3: // Note: this is incorrect behaviour, and aborts valgrind.
    156         // (so it is not exercised during regression testing).
    157         // Just auto-free, not marked with meta pool flag.
    158         // This is an error, and will cause valgrind to abort when the pool
    159         // is created.
    160         MetaPoolFlags     = VALGRIND_MEMPOOL_AUTO_FREE;
    161         CleanupBeforeExit = 1;
    162         break;
    163 
    164      case 4:
    165         // No auto-free, no cleanup. Leaves overlapping blocks detected
    166         // by valgrind, but those are ignored because of the METAPOOL.
    167         // So, no crash, no problems, but 20 leaks.
    168         MetaPoolFlags     = VALGRIND_MEMPOOL_METAPOOL;
    169         CleanupBeforeExit = 0;
    170         break;
    171 
    172      case 5:
    173         // Main reason for the VALGRIND_MEMPOOL_METAPOOL flags: When not
    174         // specified, and the application has a memorypool that has MALLOC_LIKE
    175         // overlapping allocations, that leaves block(s) that overlap.
    176         // Causes a fatal error.
    177         // The METAPOOL allows the overlap. Test must show that without that
    178         // flag, a fatal error occurs.
    179         MetaPoolFlags     = 0;
    180         CleanupBeforeExit = 0;
    181         break;
    182 
    183      case 6:
    184         // Test the VG_(HT_remove_at_Iter)() function, which removes a chunk
    185         // from a hashlist without the need to reset the iterator. The pool
    186         // is auto_freed, and the best test for the function (besides the ones
    187         // already done above) is by allocating lots of other chunks that are
    188         // NOT part of the pool so the MC_Alloc lists contain other stuff.
    189 	// That will make the iterator find stuff AND skip stuff.
    190         MetaPoolFlags     = VALGRIND_MEMPOOL_METAPOOL | VALGRIND_MEMPOOL_AUTO_FREE;
    191         CleanupBeforeExit = 1;
    192         GenerateNoise     = 1;
    193         break;
    194 
    195      default:
    196         assert(0);
    197   }
    198 }
    199 
    200 static void GenerateNoisyBit (void)
    201 {
    202    // In case the HT_remove_at_Iter messes up the administration, the wrong
    203    // blocks may be deleted from the list, making access to these noise-blocks
    204    // invalid. So fill 256-byte blocks with easily tested contents.
    205 
    206    noise[NoiseCounter] = malloc(NOISE_SIZE);
    207    assert(noise[NoiseCounter] != NULL);
    208    memset(noise[NoiseCounter],(unsigned char) (NoiseCounter % 256), NOISE_SIZE);
    209    NoiseCounter++;
    210 }
    211 
    212 static void CheckNoiseContents (void)
    213 {
    214    int i;
    215 
    216    for (i = 0; i < NoiseCounter; i++) {
    217       unsigned char Check = (unsigned char) ( i % 256);
    218       int j;
    219 
    220       for (j = 0; j < NOISE_SIZE; j++) {
    221          assert(noise[i][j] == Check);
    222       }
    223    }
    224 }
    225 
    226 int main( int argc, char** argv )
    227 {
    228    int arg;
    229    size_t i;
    230 
    231    assert(argc == 2 || argc == 3);
    232    assert(argv[1]);
    233    assert(strlen(argv[1]) == 1);
    234    assert(argv[1][0] >= '0' && argv[1][0] <= '9');
    235    arg = atoi( argv[1] );
    236    set_flags( arg );
    237 
    238    create_plain_pool();
    239    create_meta_pool();
    240 
    241    // N plain allocs
    242    for (i = 0; i < N; ++i) {
    243       cells_plain[i] = allocate_plain_style(PlainPool,sizeof(struct cell));
    244 
    245       if (GenerateNoise)
    246          GenerateNoisyBit();
    247    }
    248 
    249    // 2*N meta allocs
    250    for (i = 0; i < 2 * N; ++i) {
    251       cells_meta[i] = allocate_meta_style(MetaPool,sizeof(struct cell));
    252 
    253       if (GenerateNoise)
    254          GenerateNoisyBit();
    255    }
    256 
    257    // Leak the memory from the pools by losing the pointers.
    258    for (i = 0; i < N; ++i) {
    259       cells_plain[i] = NULL;
    260    }
    261 
    262    for (i = 0; i < 2 * N; ++i) {
    263       cells_meta[i] = NULL;
    264    }
    265 
    266    if (GenerateNoise)
    267       CheckNoiseContents();
    268 
    269    // This must free MALLOCLIKE allocations from the pool when
    270    // VALGRIND_MEMPOOL_AUTO_FREE
    271    // is set for the pool and report leaks when not.
    272 
    273    if (CleanupBeforeExit) {
    274       VALGRIND_MEMPOOL_FREE(MetaPool, MetaBlock);
    275 
    276       if (GenerateNoise)
    277          CheckNoiseContents();
    278 
    279        VALGRIND_DESTROY_MEMPOOL(MetaPool);
    280 
    281       if (GenerateNoise)
    282          CheckNoiseContents();
    283 
    284    }
    285 
    286    // Cleanup.
    287    VALGRIND_DESTROY_MEMPOOL(PlainPool);
    288 
    289    if (GenerateNoise)
    290       CheckNoiseContents();
    291 
    292    // Try to trigger an error in the bookkeeping by freeing the noise bits.
    293    // Valgrind should report no leaks, and zero memory in use. If the
    294    // new HT_remove_at_Iter function would corrupt the bookkeeping in any
    295    // way, this should bring it out!
    296    if (GenerateNoise) {
    297       for (i = 0; i < NoiseCounter; i++)
    298          free(noise[i]);
    299    }
    300 
    301 
    302   // Perf test
    303    if (argc == 3) {
    304       struct pool perf_plain_pool;
    305       void *perf_plain_block;
    306       struct pool perf_meta_pool;
    307       void *perf_meta_block;
    308       size_t pool_block_size;
    309       int n;
    310       int nr_elts = atoi( argv[2] );
    311       time_t dnow;
    312 #define tprintf(...) (dnow = time(NULL),          \
    313                       printf(__VA_ARGS__),        \
    314                       printf(" %s", ctime(&dnow)))
    315 
    316       pool_block_size = nr_elts * sizeof(struct cell) + sizeof(uint8_t) + 1;
    317 
    318       // Create perf meta pool
    319       VALGRIND_CREATE_MEMPOOL_EXT
    320          (&perf_meta_pool, 0, 0,
    321           VALGRIND_MEMPOOL_METAPOOL | VALGRIND_MEMPOOL_AUTO_FREE);
    322       perf_meta_block = malloc(pool_block_size);
    323 
    324       VALGRIND_MEMPOOL_ALLOC(&perf_meta_pool, perf_meta_block,
    325                              pool_block_size);
    326 
    327       perf_meta_pool.buf = (uint8_t *) perf_meta_block;
    328       perf_meta_pool.allocated = pool_block_size;
    329       perf_meta_pool.used = 0;
    330 
    331 
    332       perf_meta_pool.buf  += sizeof(uint8_t);
    333       perf_meta_pool.used += sizeof(uint8_t);
    334 
    335       // Create perf plain pool
    336       VALGRIND_CREATE_MEMPOOL(&perf_plain_pool, 0, 0);
    337       perf_plain_block = malloc(pool_block_size);
    338 
    339       perf_plain_pool.buf = (uint8_t *) perf_plain_block;
    340       perf_plain_pool.allocated = pool_block_size;;
    341       perf_plain_pool.used = 0;
    342 
    343       perf_plain_pool.buf  += sizeof(uint8_t);
    344       perf_plain_pool.used += sizeof(uint8_t);
    345 
    346       tprintf("allocating %d elts", nr_elts);
    347       for (n = 0; n < nr_elts; n++) {
    348          (void) allocate_meta_style (&perf_meta_pool, sizeof(struct cell));
    349          (void) allocate_plain_style (&perf_plain_pool, sizeof(struct cell));
    350       }
    351 
    352       tprintf("freeing mempool");
    353       VALGRIND_MEMPOOL_FREE(&perf_meta_pool, perf_meta_block);
    354       tprintf("destroying mempool");
    355       VALGRIND_DESTROY_MEMPOOL(&perf_meta_pool);
    356       tprintf("done");
    357 
    358    }
    359    return 0;
    360 }
    361