Home | History | Annotate | Download | only in tests
      1 //===-- sanitizer_allocator_test.cc ---------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of ThreadSanitizer/AddressSanitizer runtime.
     11 // Tests for sanitizer_allocator.h.
     12 //
     13 //===----------------------------------------------------------------------===//
     14 #include "sanitizer_common/sanitizer_allocator.h"
     15 #include "sanitizer_common/sanitizer_allocator_internal.h"
     16 #include "sanitizer_common/sanitizer_common.h"
     17 
     18 #include "sanitizer_test_utils.h"
     19 
     20 #include "gtest/gtest.h"
     21 
     22 #include <stdlib.h>
     23 #include <pthread.h>
     24 #include <algorithm>
     25 #include <vector>
     26 #include <set>
     27 
     28 // Too slow for debug build
     29 #if TSAN_DEBUG == 0
     30 
     31 #if SANITIZER_WORDSIZE == 64
     32 static const uptr kAllocatorSpace = 0x700000000000ULL;
     33 static const uptr kAllocatorSize  = 0x010000000000ULL;  // 1T.
     34 static const u64 kAddressSpaceSize = 1ULL << 47;
     35 
     36 typedef SizeClassAllocator64<
     37   kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap> Allocator64;
     38 
     39 typedef SizeClassAllocator64<
     40   kAllocatorSpace, kAllocatorSize, 16, CompactSizeClassMap> Allocator64Compact;
     41 #else
     42 static const u64 kAddressSpaceSize = 1ULL << 32;
     43 #endif
     44 
     45 static const uptr kRegionSizeLog = FIRST_32_SECOND_64(20, 24);
     46 static const uptr kFlatByteMapSize = kAddressSpaceSize >> kRegionSizeLog;
     47 
     48 typedef SizeClassAllocator32<
     49   0, kAddressSpaceSize,
     50   /*kMetadataSize*/16,
     51   CompactSizeClassMap,
     52   kRegionSizeLog,
     53   FlatByteMap<kFlatByteMapSize> >
     54   Allocator32Compact;
     55 
     56 template <class SizeClassMap>
     57 void TestSizeClassMap() {
     58   typedef SizeClassMap SCMap;
     59   // SCMap::Print();
     60   SCMap::Validate();
     61 }
     62 
     63 TEST(SanitizerCommon, DefaultSizeClassMap) {
     64   TestSizeClassMap<DefaultSizeClassMap>();
     65 }
     66 
     67 TEST(SanitizerCommon, CompactSizeClassMap) {
     68   TestSizeClassMap<CompactSizeClassMap>();
     69 }
     70 
     71 TEST(SanitizerCommon, InternalSizeClassMap) {
     72   TestSizeClassMap<InternalSizeClassMap>();
     73 }
     74 
     75 template <class Allocator>
     76 void TestSizeClassAllocator() {
     77   Allocator *a = new Allocator;
     78   a->Init();
     79   SizeClassAllocatorLocalCache<Allocator> cache;
     80   memset(&cache, 0, sizeof(cache));
     81   cache.Init(0);
     82 
     83   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
     84     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
     85 
     86   std::vector<void *> allocated;
     87 
     88   uptr last_total_allocated = 0;
     89   for (int i = 0; i < 3; i++) {
     90     // Allocate a bunch of chunks.
     91     for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
     92       uptr size = sizes[s];
     93       if (!a->CanAllocate(size, 1)) continue;
     94       // printf("s = %ld\n", size);
     95       uptr n_iter = std::max((uptr)6, 8000000 / size);
     96       // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
     97       for (uptr i = 0; i < n_iter; i++) {
     98         uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
     99         char *x = (char*)cache.Allocate(a, class_id0);
    100         x[0] = 0;
    101         x[size - 1] = 0;
    102         x[size / 2] = 0;
    103         allocated.push_back(x);
    104         CHECK_EQ(x, a->GetBlockBegin(x));
    105         CHECK_EQ(x, a->GetBlockBegin(x + size - 1));
    106         CHECK(a->PointerIsMine(x));
    107         CHECK(a->PointerIsMine(x + size - 1));
    108         CHECK(a->PointerIsMine(x + size / 2));
    109         CHECK_GE(a->GetActuallyAllocatedSize(x), size);
    110         uptr class_id = a->GetSizeClass(x);
    111         CHECK_EQ(class_id, Allocator::SizeClassMapT::ClassID(size));
    112         uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
    113         metadata[0] = reinterpret_cast<uptr>(x) + 1;
    114         metadata[1] = 0xABCD;
    115       }
    116     }
    117     // Deallocate all.
    118     for (uptr i = 0; i < allocated.size(); i++) {
    119       void *x = allocated[i];
    120       uptr *metadata = reinterpret_cast<uptr*>(a->GetMetaData(x));
    121       CHECK_EQ(metadata[0], reinterpret_cast<uptr>(x) + 1);
    122       CHECK_EQ(metadata[1], 0xABCD);
    123       cache.Deallocate(a, a->GetSizeClass(x), x);
    124     }
    125     allocated.clear();
    126     uptr total_allocated = a->TotalMemoryUsed();
    127     if (last_total_allocated == 0)
    128       last_total_allocated = total_allocated;
    129     CHECK_EQ(last_total_allocated, total_allocated);
    130   }
    131 
    132   // Check that GetBlockBegin never crashes.
    133   for (uptr x = 0, step = kAddressSpaceSize / 100000;
    134        x < kAddressSpaceSize - step; x += step)
    135     if (a->PointerIsMine(reinterpret_cast<void *>(x)))
    136       Ident(a->GetBlockBegin(reinterpret_cast<void *>(x)));
    137 
    138   a->TestOnlyUnmap();
    139   delete a;
    140 }
    141 
    142 #if SANITIZER_WORDSIZE == 64
    143 TEST(SanitizerCommon, SizeClassAllocator64) {
    144   TestSizeClassAllocator<Allocator64>();
    145 }
    146 
    147 TEST(SanitizerCommon, SizeClassAllocator64Compact) {
    148   TestSizeClassAllocator<Allocator64Compact>();
    149 }
    150 #endif
    151 
    152 TEST(SanitizerCommon, SizeClassAllocator32Compact) {
    153   TestSizeClassAllocator<Allocator32Compact>();
    154 }
    155 
    156 template <class Allocator>
    157 void SizeClassAllocatorMetadataStress() {
    158   Allocator *a = new Allocator;
    159   a->Init();
    160   SizeClassAllocatorLocalCache<Allocator> cache;
    161   memset(&cache, 0, sizeof(cache));
    162   cache.Init(0);
    163 
    164   const uptr kNumAllocs = 1 << 13;
    165   void *allocated[kNumAllocs];
    166   void *meta[kNumAllocs];
    167   for (uptr i = 0; i < kNumAllocs; i++) {
    168     void *x = cache.Allocate(a, 1 + i % 50);
    169     allocated[i] = x;
    170     meta[i] = a->GetMetaData(x);
    171   }
    172   // Get Metadata kNumAllocs^2 times.
    173   for (uptr i = 0; i < kNumAllocs * kNumAllocs; i++) {
    174     uptr idx = i % kNumAllocs;
    175     void *m = a->GetMetaData(allocated[idx]);
    176     EXPECT_EQ(m, meta[idx]);
    177   }
    178   for (uptr i = 0; i < kNumAllocs; i++) {
    179     cache.Deallocate(a, 1 + i % 50, allocated[i]);
    180   }
    181 
    182   a->TestOnlyUnmap();
    183   delete a;
    184 }
    185 
    186 #if SANITIZER_WORDSIZE == 64
    187 TEST(SanitizerCommon, SizeClassAllocator64MetadataStress) {
    188   SizeClassAllocatorMetadataStress<Allocator64>();
    189 }
    190 
    191 TEST(SanitizerCommon, SizeClassAllocator64CompactMetadataStress) {
    192   SizeClassAllocatorMetadataStress<Allocator64Compact>();
    193 }
    194 #endif  // SANITIZER_WORDSIZE == 64
    195 TEST(SanitizerCommon, SizeClassAllocator32CompactMetadataStress) {
    196   SizeClassAllocatorMetadataStress<Allocator32Compact>();
    197 }
    198 
    199 template <class Allocator>
    200 void SizeClassAllocatorGetBlockBeginStress() {
    201   Allocator *a = new Allocator;
    202   a->Init();
    203   SizeClassAllocatorLocalCache<Allocator> cache;
    204   memset(&cache, 0, sizeof(cache));
    205   cache.Init(0);
    206 
    207   uptr max_size_class = Allocator::kNumClasses - 1;
    208   uptr size = Allocator::SizeClassMapT::Size(max_size_class);
    209   u64 G8 = 1ULL << 33;
    210   // Make sure we correctly compute GetBlockBegin() w/o overflow.
    211   for (size_t i = 0; i <= G8 / size; i++) {
    212     void *x = cache.Allocate(a, max_size_class);
    213     void *beg = a->GetBlockBegin(x);
    214     // if ((i & (i - 1)) == 0)
    215     //   fprintf(stderr, "[%zd] %p %p\n", i, x, beg);
    216     EXPECT_EQ(x, beg);
    217   }
    218 
    219   a->TestOnlyUnmap();
    220   delete a;
    221 }
    222 
    223 #if SANITIZER_WORDSIZE == 64
    224 TEST(SanitizerCommon, SizeClassAllocator64GetBlockBegin) {
    225   SizeClassAllocatorGetBlockBeginStress<Allocator64>();
    226 }
    227 TEST(SanitizerCommon, SizeClassAllocator64CompactGetBlockBegin) {
    228   SizeClassAllocatorGetBlockBeginStress<Allocator64Compact>();
    229 }
    230 TEST(SanitizerCommon, SizeClassAllocator32CompactGetBlockBegin) {
    231   SizeClassAllocatorGetBlockBeginStress<Allocator32Compact>();
    232 }
    233 #endif  // SANITIZER_WORDSIZE == 64
    234 
    235 struct TestMapUnmapCallback {
    236   static int map_count, unmap_count;
    237   void OnMap(uptr p, uptr size) const { map_count++; }
    238   void OnUnmap(uptr p, uptr size) const { unmap_count++; }
    239 };
    240 int TestMapUnmapCallback::map_count;
    241 int TestMapUnmapCallback::unmap_count;
    242 
    243 #if SANITIZER_WORDSIZE == 64
    244 TEST(SanitizerCommon, SizeClassAllocator64MapUnmapCallback) {
    245   TestMapUnmapCallback::map_count = 0;
    246   TestMapUnmapCallback::unmap_count = 0;
    247   typedef SizeClassAllocator64<
    248       kAllocatorSpace, kAllocatorSize, 16, DefaultSizeClassMap,
    249       TestMapUnmapCallback> Allocator64WithCallBack;
    250   Allocator64WithCallBack *a = new Allocator64WithCallBack;
    251   a->Init();
    252   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);  // Allocator state.
    253   SizeClassAllocatorLocalCache<Allocator64WithCallBack> cache;
    254   memset(&cache, 0, sizeof(cache));
    255   cache.Init(0);
    256   AllocatorStats stats;
    257   stats.Init();
    258   a->AllocateBatch(&stats, &cache, 32);
    259   EXPECT_EQ(TestMapUnmapCallback::map_count, 3);  // State + alloc + metadata.
    260   a->TestOnlyUnmap();
    261   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);  // The whole thing.
    262   delete a;
    263 }
    264 #endif
    265 
    266 TEST(SanitizerCommon, SizeClassAllocator32MapUnmapCallback) {
    267   TestMapUnmapCallback::map_count = 0;
    268   TestMapUnmapCallback::unmap_count = 0;
    269   typedef SizeClassAllocator32<
    270       0, kAddressSpaceSize,
    271       /*kMetadataSize*/16,
    272       CompactSizeClassMap,
    273       kRegionSizeLog,
    274       FlatByteMap<kFlatByteMapSize>,
    275       TestMapUnmapCallback>
    276     Allocator32WithCallBack;
    277   Allocator32WithCallBack *a = new Allocator32WithCallBack;
    278   a->Init();
    279   EXPECT_EQ(TestMapUnmapCallback::map_count, 0);
    280   SizeClassAllocatorLocalCache<Allocator32WithCallBack>  cache;
    281   memset(&cache, 0, sizeof(cache));
    282   cache.Init(0);
    283   AllocatorStats stats;
    284   stats.Init();
    285   a->AllocateBatch(&stats, &cache, 32);
    286   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
    287   a->TestOnlyUnmap();
    288   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
    289   delete a;
    290   // fprintf(stderr, "Map: %d Unmap: %d\n",
    291   //         TestMapUnmapCallback::map_count,
    292   //         TestMapUnmapCallback::unmap_count);
    293 }
    294 
    295 TEST(SanitizerCommon, LargeMmapAllocatorMapUnmapCallback) {
    296   TestMapUnmapCallback::map_count = 0;
    297   TestMapUnmapCallback::unmap_count = 0;
    298   LargeMmapAllocator<TestMapUnmapCallback> a;
    299   a.Init();
    300   AllocatorStats stats;
    301   stats.Init();
    302   void *x = a.Allocate(&stats, 1 << 20, 1);
    303   EXPECT_EQ(TestMapUnmapCallback::map_count, 1);
    304   a.Deallocate(&stats, x);
    305   EXPECT_EQ(TestMapUnmapCallback::unmap_count, 1);
    306 }
    307 
    308 template<class Allocator>
    309 void FailInAssertionOnOOM() {
    310   Allocator a;
    311   a.Init();
    312   SizeClassAllocatorLocalCache<Allocator> cache;
    313   memset(&cache, 0, sizeof(cache));
    314   cache.Init(0);
    315   AllocatorStats stats;
    316   stats.Init();
    317   for (int i = 0; i < 1000000; i++) {
    318     a.AllocateBatch(&stats, &cache, 52);
    319   }
    320 
    321   a.TestOnlyUnmap();
    322 }
    323 
    324 #if SANITIZER_WORDSIZE == 64
    325 TEST(SanitizerCommon, SizeClassAllocator64Overflow) {
    326   EXPECT_DEATH(FailInAssertionOnOOM<Allocator64>(), "Out of memory");
    327 }
    328 #endif
    329 
    330 TEST(SanitizerCommon, LargeMmapAllocator) {
    331   LargeMmapAllocator<> a;
    332   a.Init();
    333   AllocatorStats stats;
    334   stats.Init();
    335 
    336   static const int kNumAllocs = 1000;
    337   char *allocated[kNumAllocs];
    338   static const uptr size = 4000;
    339   // Allocate some.
    340   for (int i = 0; i < kNumAllocs; i++) {
    341     allocated[i] = (char *)a.Allocate(&stats, size, 1);
    342     CHECK(a.PointerIsMine(allocated[i]));
    343   }
    344   // Deallocate all.
    345   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
    346   for (int i = 0; i < kNumAllocs; i++) {
    347     char *p = allocated[i];
    348     CHECK(a.PointerIsMine(p));
    349     a.Deallocate(&stats, p);
    350   }
    351   // Check that non left.
    352   CHECK_EQ(a.TotalMemoryUsed(), 0);
    353 
    354   // Allocate some more, also add metadata.
    355   for (int i = 0; i < kNumAllocs; i++) {
    356     char *x = (char *)a.Allocate(&stats, size, 1);
    357     CHECK_GE(a.GetActuallyAllocatedSize(x), size);
    358     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(x));
    359     *meta = i;
    360     allocated[i] = x;
    361   }
    362   for (int i = 0; i < kNumAllocs * kNumAllocs; i++) {
    363     char *p = allocated[i % kNumAllocs];
    364     CHECK(a.PointerIsMine(p));
    365     CHECK(a.PointerIsMine(p + 2000));
    366   }
    367   CHECK_GT(a.TotalMemoryUsed(), size * kNumAllocs);
    368   // Deallocate all in reverse order.
    369   for (int i = 0; i < kNumAllocs; i++) {
    370     int idx = kNumAllocs - i - 1;
    371     char *p = allocated[idx];
    372     uptr *meta = reinterpret_cast<uptr*>(a.GetMetaData(p));
    373     CHECK_EQ(*meta, idx);
    374     CHECK(a.PointerIsMine(p));
    375     a.Deallocate(&stats, p);
    376   }
    377   CHECK_EQ(a.TotalMemoryUsed(), 0);
    378 
    379   // Test alignments.
    380   uptr max_alignment = SANITIZER_WORDSIZE == 64 ? (1 << 28) : (1 << 24);
    381   for (uptr alignment = 8; alignment <= max_alignment; alignment *= 2) {
    382     const uptr kNumAlignedAllocs = 100;
    383     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
    384       uptr size = ((i % 10) + 1) * 4096;
    385       char *p = allocated[i] = (char *)a.Allocate(&stats, size, alignment);
    386       CHECK_EQ(p, a.GetBlockBegin(p));
    387       CHECK_EQ(p, a.GetBlockBegin(p + size - 1));
    388       CHECK_EQ(p, a.GetBlockBegin(p + size / 2));
    389       CHECK_EQ(0, (uptr)allocated[i] % alignment);
    390       p[0] = p[size - 1] = 0;
    391     }
    392     for (uptr i = 0; i < kNumAlignedAllocs; i++) {
    393       a.Deallocate(&stats, allocated[i]);
    394     }
    395   }
    396 
    397   // Regression test for boundary condition in GetBlockBegin().
    398   uptr page_size = GetPageSizeCached();
    399   char *p = (char *)a.Allocate(&stats, page_size, 1);
    400   CHECK_EQ(p, a.GetBlockBegin(p));
    401   CHECK_EQ(p, (char *)a.GetBlockBegin(p + page_size - 1));
    402   CHECK_NE(p, (char *)a.GetBlockBegin(p + page_size));
    403   a.Deallocate(&stats, p);
    404 }
    405 
    406 template
    407 <class PrimaryAllocator, class SecondaryAllocator, class AllocatorCache>
    408 void TestCombinedAllocator() {
    409   typedef
    410       CombinedAllocator<PrimaryAllocator, AllocatorCache, SecondaryAllocator>
    411       Allocator;
    412   Allocator *a = new Allocator;
    413   a->Init();
    414 
    415   AllocatorCache cache;
    416   memset(&cache, 0, sizeof(cache));
    417   a->InitCache(&cache);
    418 
    419   EXPECT_EQ(a->Allocate(&cache, -1, 1), (void*)0);
    420   EXPECT_EQ(a->Allocate(&cache, -1, 1024), (void*)0);
    421   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1), (void*)0);
    422   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1024, 1024), (void*)0);
    423   EXPECT_EQ(a->Allocate(&cache, (uptr)-1 - 1023, 1024), (void*)0);
    424 
    425   const uptr kNumAllocs = 100000;
    426   const uptr kNumIter = 10;
    427   for (uptr iter = 0; iter < kNumIter; iter++) {
    428     std::vector<void*> allocated;
    429     for (uptr i = 0; i < kNumAllocs; i++) {
    430       uptr size = (i % (1 << 14)) + 1;
    431       if ((i % 1024) == 0)
    432         size = 1 << (10 + (i % 14));
    433       void *x = a->Allocate(&cache, size, 1);
    434       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
    435       CHECK_EQ(*meta, 0);
    436       *meta = size;
    437       allocated.push_back(x);
    438     }
    439 
    440     random_shuffle(allocated.begin(), allocated.end());
    441 
    442     for (uptr i = 0; i < kNumAllocs; i++) {
    443       void *x = allocated[i];
    444       uptr *meta = reinterpret_cast<uptr*>(a->GetMetaData(x));
    445       CHECK_NE(*meta, 0);
    446       CHECK(a->PointerIsMine(x));
    447       *meta = 0;
    448       a->Deallocate(&cache, x);
    449     }
    450     allocated.clear();
    451     a->SwallowCache(&cache);
    452   }
    453   a->DestroyCache(&cache);
    454   a->TestOnlyUnmap();
    455 }
    456 
    457 #if SANITIZER_WORDSIZE == 64
    458 TEST(SanitizerCommon, CombinedAllocator64) {
    459   TestCombinedAllocator<Allocator64,
    460       LargeMmapAllocator<>,
    461       SizeClassAllocatorLocalCache<Allocator64> > ();
    462 }
    463 
    464 TEST(SanitizerCommon, CombinedAllocator64Compact) {
    465   TestCombinedAllocator<Allocator64Compact,
    466       LargeMmapAllocator<>,
    467       SizeClassAllocatorLocalCache<Allocator64Compact> > ();
    468 }
    469 #endif
    470 
    471 TEST(SanitizerCommon, CombinedAllocator32Compact) {
    472   TestCombinedAllocator<Allocator32Compact,
    473       LargeMmapAllocator<>,
    474       SizeClassAllocatorLocalCache<Allocator32Compact> > ();
    475 }
    476 
    477 template <class AllocatorCache>
    478 void TestSizeClassAllocatorLocalCache() {
    479   AllocatorCache cache;
    480   typedef typename AllocatorCache::Allocator Allocator;
    481   Allocator *a = new Allocator();
    482 
    483   a->Init();
    484   memset(&cache, 0, sizeof(cache));
    485   cache.Init(0);
    486 
    487   const uptr kNumAllocs = 10000;
    488   const int kNumIter = 100;
    489   uptr saved_total = 0;
    490   for (int class_id = 1; class_id <= 5; class_id++) {
    491     for (int it = 0; it < kNumIter; it++) {
    492       void *allocated[kNumAllocs];
    493       for (uptr i = 0; i < kNumAllocs; i++) {
    494         allocated[i] = cache.Allocate(a, class_id);
    495       }
    496       for (uptr i = 0; i < kNumAllocs; i++) {
    497         cache.Deallocate(a, class_id, allocated[i]);
    498       }
    499       cache.Drain(a);
    500       uptr total_allocated = a->TotalMemoryUsed();
    501       if (it)
    502         CHECK_EQ(saved_total, total_allocated);
    503       saved_total = total_allocated;
    504     }
    505   }
    506 
    507   a->TestOnlyUnmap();
    508   delete a;
    509 }
    510 
    511 #if SANITIZER_WORDSIZE == 64
    512 TEST(SanitizerCommon, SizeClassAllocator64LocalCache) {
    513   TestSizeClassAllocatorLocalCache<
    514       SizeClassAllocatorLocalCache<Allocator64> >();
    515 }
    516 
    517 TEST(SanitizerCommon, SizeClassAllocator64CompactLocalCache) {
    518   TestSizeClassAllocatorLocalCache<
    519       SizeClassAllocatorLocalCache<Allocator64Compact> >();
    520 }
    521 #endif
    522 
    523 TEST(SanitizerCommon, SizeClassAllocator32CompactLocalCache) {
    524   TestSizeClassAllocatorLocalCache<
    525       SizeClassAllocatorLocalCache<Allocator32Compact> >();
    526 }
    527 
    528 #if SANITIZER_WORDSIZE == 64
    529 typedef SizeClassAllocatorLocalCache<Allocator64> AllocatorCache;
    530 static AllocatorCache static_allocator_cache;
    531 
    532 void *AllocatorLeakTestWorker(void *arg) {
    533   typedef AllocatorCache::Allocator Allocator;
    534   Allocator *a = (Allocator*)(arg);
    535   static_allocator_cache.Allocate(a, 10);
    536   static_allocator_cache.Drain(a);
    537   return 0;
    538 }
    539 
    540 TEST(SanitizerCommon, AllocatorLeakTest) {
    541   typedef AllocatorCache::Allocator Allocator;
    542   Allocator a;
    543   a.Init();
    544   uptr total_used_memory = 0;
    545   for (int i = 0; i < 100; i++) {
    546     pthread_t t;
    547     EXPECT_EQ(0, pthread_create(&t, 0, AllocatorLeakTestWorker, &a));
    548     EXPECT_EQ(0, pthread_join(t, 0));
    549     if (i == 0)
    550       total_used_memory = a.TotalMemoryUsed();
    551     EXPECT_EQ(a.TotalMemoryUsed(), total_used_memory);
    552   }
    553 
    554   a.TestOnlyUnmap();
    555 }
    556 
    557 // Struct which is allocated to pass info to new threads.  The new thread frees
    558 // it.
    559 struct NewThreadParams {
    560   AllocatorCache *thread_cache;
    561   AllocatorCache::Allocator *allocator;
    562   uptr class_id;
    563 };
    564 
    565 // Called in a new thread.  Just frees its argument.
    566 static void *DeallocNewThreadWorker(void *arg) {
    567   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(arg);
    568   params->thread_cache->Deallocate(params->allocator, params->class_id, params);
    569   return NULL;
    570 }
    571 
    572 // The allocator cache is supposed to be POD and zero initialized.  We should be
    573 // able to call Deallocate on a zeroed cache, and it will self-initialize.
    574 TEST(Allocator, AllocatorCacheDeallocNewThread) {
    575   AllocatorCache::Allocator allocator;
    576   allocator.Init();
    577   AllocatorCache main_cache;
    578   AllocatorCache child_cache;
    579   memset(&main_cache, 0, sizeof(main_cache));
    580   memset(&child_cache, 0, sizeof(child_cache));
    581 
    582   uptr class_id = DefaultSizeClassMap::ClassID(sizeof(NewThreadParams));
    583   NewThreadParams *params = reinterpret_cast<NewThreadParams*>(
    584       main_cache.Allocate(&allocator, class_id));
    585   params->thread_cache = &child_cache;
    586   params->allocator = &allocator;
    587   params->class_id = class_id;
    588   pthread_t t;
    589   EXPECT_EQ(0, pthread_create(&t, 0, DeallocNewThreadWorker, params));
    590   EXPECT_EQ(0, pthread_join(t, 0));
    591 }
    592 #endif
    593 
    594 TEST(Allocator, Basic) {
    595   char *p = (char*)InternalAlloc(10);
    596   EXPECT_NE(p, (char*)0);
    597   char *p2 = (char*)InternalAlloc(20);
    598   EXPECT_NE(p2, (char*)0);
    599   EXPECT_NE(p2, p);
    600   InternalFree(p);
    601   InternalFree(p2);
    602 }
    603 
    604 TEST(Allocator, Stress) {
    605   const int kCount = 1000;
    606   char *ptrs[kCount];
    607   unsigned rnd = 42;
    608   for (int i = 0; i < kCount; i++) {
    609     uptr sz = my_rand_r(&rnd) % 1000;
    610     char *p = (char*)InternalAlloc(sz);
    611     EXPECT_NE(p, (char*)0);
    612     ptrs[i] = p;
    613   }
    614   for (int i = 0; i < kCount; i++) {
    615     InternalFree(ptrs[i]);
    616   }
    617 }
    618 
    619 TEST(Allocator, InternalAllocFailure) {
    620   EXPECT_DEATH(Ident(InternalAlloc(10 << 20)),
    621                "Unexpected mmap in InternalAllocator!");
    622 }
    623 
    624 TEST(Allocator, ScopedBuffer) {
    625   const int kSize = 512;
    626   {
    627     InternalScopedBuffer<int> int_buf(kSize);
    628     EXPECT_EQ(sizeof(int) * kSize, int_buf.size());  // NOLINT
    629   }
    630   InternalScopedBuffer<char> char_buf(kSize);
    631   EXPECT_EQ(sizeof(char) * kSize, char_buf.size());  // NOLINT
    632   internal_memset(char_buf.data(), 'c', kSize);
    633   for (int i = 0; i < kSize; i++) {
    634     EXPECT_EQ('c', char_buf[i]);
    635   }
    636 }
    637 
    638 void IterationTestCallback(uptr chunk, void *arg) {
    639   reinterpret_cast<std::set<uptr> *>(arg)->insert(chunk);
    640 }
    641 
    642 template <class Allocator>
    643 void TestSizeClassAllocatorIteration() {
    644   Allocator *a = new Allocator;
    645   a->Init();
    646   SizeClassAllocatorLocalCache<Allocator> cache;
    647   memset(&cache, 0, sizeof(cache));
    648   cache.Init(0);
    649 
    650   static const uptr sizes[] = {1, 16, 30, 40, 100, 1000, 10000,
    651     50000, 60000, 100000, 120000, 300000, 500000, 1000000, 2000000};
    652 
    653   std::vector<void *> allocated;
    654 
    655   // Allocate a bunch of chunks.
    656   for (uptr s = 0; s < ARRAY_SIZE(sizes); s++) {
    657     uptr size = sizes[s];
    658     if (!a->CanAllocate(size, 1)) continue;
    659     // printf("s = %ld\n", size);
    660     uptr n_iter = std::max((uptr)6, 80000 / size);
    661     // fprintf(stderr, "size: %ld iter: %ld\n", size, n_iter);
    662     for (uptr j = 0; j < n_iter; j++) {
    663       uptr class_id0 = Allocator::SizeClassMapT::ClassID(size);
    664       void *x = cache.Allocate(a, class_id0);
    665       allocated.push_back(x);
    666     }
    667   }
    668 
    669   std::set<uptr> reported_chunks;
    670   a->ForceLock();
    671   a->ForEachChunk(IterationTestCallback, &reported_chunks);
    672   a->ForceUnlock();
    673 
    674   for (uptr i = 0; i < allocated.size(); i++) {
    675     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
    676     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
    677               reported_chunks.end());
    678   }
    679 
    680   a->TestOnlyUnmap();
    681   delete a;
    682 }
    683 
    684 #if SANITIZER_WORDSIZE == 64
    685 TEST(SanitizerCommon, SizeClassAllocator64Iteration) {
    686   TestSizeClassAllocatorIteration<Allocator64>();
    687 }
    688 #endif
    689 
    690 TEST(SanitizerCommon, SizeClassAllocator32Iteration) {
    691   TestSizeClassAllocatorIteration<Allocator32Compact>();
    692 }
    693 
    694 TEST(SanitizerCommon, LargeMmapAllocatorIteration) {
    695   LargeMmapAllocator<> a;
    696   a.Init();
    697   AllocatorStats stats;
    698   stats.Init();
    699 
    700   static const uptr kNumAllocs = 1000;
    701   char *allocated[kNumAllocs];
    702   static const uptr size = 40;
    703   // Allocate some.
    704   for (uptr i = 0; i < kNumAllocs; i++)
    705     allocated[i] = (char *)a.Allocate(&stats, size, 1);
    706 
    707   std::set<uptr> reported_chunks;
    708   a.ForceLock();
    709   a.ForEachChunk(IterationTestCallback, &reported_chunks);
    710   a.ForceUnlock();
    711 
    712   for (uptr i = 0; i < kNumAllocs; i++) {
    713     // Don't use EXPECT_NE. Reporting the first mismatch is enough.
    714     ASSERT_NE(reported_chunks.find(reinterpret_cast<uptr>(allocated[i])),
    715               reported_chunks.end());
    716   }
    717   for (uptr i = 0; i < kNumAllocs; i++)
    718     a.Deallocate(&stats, allocated[i]);
    719 }
    720 
    721 TEST(SanitizerCommon, LargeMmapAllocatorBlockBegin) {
    722   LargeMmapAllocator<> a;
    723   a.Init();
    724   AllocatorStats stats;
    725   stats.Init();
    726 
    727   static const uptr kNumAllocs = 1024;
    728   static const uptr kNumExpectedFalseLookups = 10000000;
    729   char *allocated[kNumAllocs];
    730   static const uptr size = 4096;
    731   // Allocate some.
    732   for (uptr i = 0; i < kNumAllocs; i++) {
    733     allocated[i] = (char *)a.Allocate(&stats, size, 1);
    734   }
    735 
    736   for (uptr i = 0; i < kNumAllocs  * kNumAllocs; i++) {
    737     // if ((i & (i - 1)) == 0) fprintf(stderr, "[%zd]\n", i);
    738     char *p1 = allocated[i % kNumAllocs];
    739     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1));
    740     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size / 2));
    741     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 + size - 1));
    742     EXPECT_EQ(p1, a.GetBlockBeginFastLocked(p1 - 100));
    743   }
    744 
    745   for (uptr i = 0; i < kNumExpectedFalseLookups; i++) {
    746     void *p = reinterpret_cast<void *>(i % 1024);
    747     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
    748     p = reinterpret_cast<void *>(~0L - (i % 1024));
    749     EXPECT_EQ((void *)0, a.GetBlockBeginFastLocked(p));
    750   }
    751 
    752   for (uptr i = 0; i < kNumAllocs; i++)
    753     a.Deallocate(&stats, allocated[i]);
    754 }
    755 
    756 
    757 #if SANITIZER_WORDSIZE == 64
    758 // Regression test for out-of-memory condition in PopulateFreeList().
    759 TEST(SanitizerCommon, SizeClassAllocator64PopulateFreeListOOM) {
    760   // In a world where regions are small and chunks are huge...
    761   typedef SizeClassMap<63, 128, 16> SpecialSizeClassMap;
    762   typedef SizeClassAllocator64<kAllocatorSpace, kAllocatorSize, 0,
    763                                SpecialSizeClassMap> SpecialAllocator64;
    764   const uptr kRegionSize =
    765       kAllocatorSize / SpecialSizeClassMap::kNumClassesRounded;
    766   SpecialAllocator64 *a = new SpecialAllocator64;
    767   a->Init();
    768   SizeClassAllocatorLocalCache<SpecialAllocator64> cache;
    769   memset(&cache, 0, sizeof(cache));
    770   cache.Init(0);
    771 
    772   // ...one man is on a mission to overflow a region with a series of
    773   // successive allocations.
    774   const uptr kClassID = 107;
    775   const uptr kAllocationSize = DefaultSizeClassMap::Size(kClassID);
    776   ASSERT_LT(2 * kAllocationSize, kRegionSize);
    777   ASSERT_GT(3 * kAllocationSize, kRegionSize);
    778   cache.Allocate(a, kClassID);
    779   EXPECT_DEATH(cache.Allocate(a, kClassID) && cache.Allocate(a, kClassID),
    780                "The process has exhausted");
    781   a->TestOnlyUnmap();
    782   delete a;
    783 }
    784 #endif
    785 
    786 #endif  // #if TSAN_DEBUG==0
    787