Home | History | Annotate | Download | only in tests
      1 //===-- asan_noinst_test.cc -----------------------------------------------===//
      2 //
      3 //                     The LLVM Compiler Infrastructure
      4 //
      5 // This file is distributed under the University of Illinois Open Source
      6 // License. See LICENSE.TXT for details.
      7 //
      8 //===----------------------------------------------------------------------===//
      9 //
     10 // This file is a part of AddressSanitizer, an address sanity checker.
     11 //
     12 // This test file should be compiled w/o asan instrumentation.
     13 //===----------------------------------------------------------------------===//
     14 
     15 #include "asan_allocator.h"
     16 #include "asan_internal.h"
     17 #include "asan_mapping.h"
     18 #include "asan_test_utils.h"
     19 
     20 #include <assert.h>
     21 #include <stdio.h>
     22 #include <stdlib.h>
     23 #include <string.h>  // for memset()
     24 #include <algorithm>
     25 #include <vector>
     26 #include <limits>
     27 
     28 
     29 TEST(AddressSanitizer, InternalSimpleDeathTest) {
     30   EXPECT_DEATH(exit(1), "");
     31 }
     32 
     33 static void MallocStress(size_t n) {
     34   u32 seed = my_rand();
     35   StackTrace stack1;
     36   stack1.trace[0] = 0xa123;
     37   stack1.trace[1] = 0xa456;
     38   stack1.size = 2;
     39 
     40   StackTrace stack2;
     41   stack2.trace[0] = 0xb123;
     42   stack2.trace[1] = 0xb456;
     43   stack2.size = 2;
     44 
     45   StackTrace stack3;
     46   stack3.trace[0] = 0xc123;
     47   stack3.trace[1] = 0xc456;
     48   stack3.size = 2;
     49 
     50   std::vector<void *> vec;
     51   for (size_t i = 0; i < n; i++) {
     52     if ((i % 3) == 0) {
     53       if (vec.empty()) continue;
     54       size_t idx = my_rand_r(&seed) % vec.size();
     55       void *ptr = vec[idx];
     56       vec[idx] = vec.back();
     57       vec.pop_back();
     58       __asan::asan_free(ptr, &stack1, __asan::FROM_MALLOC);
     59     } else {
     60       size_t size = my_rand_r(&seed) % 1000 + 1;
     61       switch ((my_rand_r(&seed) % 128)) {
     62         case 0: size += 1024; break;
     63         case 1: size += 2048; break;
     64         case 2: size += 4096; break;
     65       }
     66       size_t alignment = 1 << (my_rand_r(&seed) % 10 + 1);
     67       char *ptr = (char*)__asan::asan_memalign(alignment, size,
     68                                                &stack2, __asan::FROM_MALLOC);
     69       EXPECT_EQ(size, __asan::asan_malloc_usable_size(ptr, &stack2));
     70       vec.push_back(ptr);
     71       ptr[0] = 0;
     72       ptr[size-1] = 0;
     73       ptr[size/2] = 0;
     74     }
     75   }
     76   for (size_t i = 0; i < vec.size(); i++)
     77     __asan::asan_free(vec[i], &stack3, __asan::FROM_MALLOC);
     78 }
     79 
     80 
     81 TEST(AddressSanitizer, NoInstMallocTest) {
     82   MallocStress(ASAN_LOW_MEMORY ? 300000 : 1000000);
     83 }
     84 
     85 TEST(AddressSanitizer, ThreadedMallocStressTest) {
     86   const int kNumThreads = 4;
     87   const int kNumIterations = (ASAN_LOW_MEMORY) ? 10000 : 100000;
     88   pthread_t t[kNumThreads];
     89   for (int i = 0; i < kNumThreads; i++) {
     90     PTHREAD_CREATE(&t[i], 0, (void* (*)(void *x))MallocStress,
     91         (void*)kNumIterations);
     92   }
     93   for (int i = 0; i < kNumThreads; i++) {
     94     PTHREAD_JOIN(t[i], 0);
     95   }
     96 }
     97 
     98 static void PrintShadow(const char *tag, uptr ptr, size_t size) {
     99   fprintf(stderr, "%s shadow: %lx size % 3ld: ", tag, (long)ptr, (long)size);
    100   uptr prev_shadow = 0;
    101   for (sptr i = -32; i < (sptr)size + 32; i++) {
    102     uptr shadow = __asan::MemToShadow(ptr + i);
    103     if (i == 0 || i == (sptr)size)
    104       fprintf(stderr, ".");
    105     if (shadow != prev_shadow) {
    106       prev_shadow = shadow;
    107       fprintf(stderr, "%02x", (int)*(u8*)shadow);
    108     }
    109   }
    110   fprintf(stderr, "\n");
    111 }
    112 
    113 TEST(AddressSanitizer, DISABLED_InternalPrintShadow) {
    114   for (size_t size = 1; size <= 513; size++) {
    115     char *ptr = new char[size];
    116     PrintShadow("m", (uptr)ptr, size);
    117     delete [] ptr;
    118     PrintShadow("f", (uptr)ptr, size);
    119   }
    120 }
    121 
    122 static uptr pc_array[] = {
    123 #if SANITIZER_WORDSIZE == 64
    124   0x7effbf756068ULL,
    125   0x7effbf75e5abULL,
    126   0x7effc0625b7cULL,
    127   0x7effc05b8997ULL,
    128   0x7effbf990577ULL,
    129   0x7effbf990c56ULL,
    130   0x7effbf992f3cULL,
    131   0x7effbf950c22ULL,
    132   0x7effc036dba0ULL,
    133   0x7effc03638a3ULL,
    134   0x7effc035be4aULL,
    135   0x7effc0539c45ULL,
    136   0x7effc0539a65ULL,
    137   0x7effc03db9b3ULL,
    138   0x7effc03db100ULL,
    139   0x7effc037c7b8ULL,
    140   0x7effc037bfffULL,
    141   0x7effc038b777ULL,
    142   0x7effc038021cULL,
    143   0x7effc037c7d1ULL,
    144   0x7effc037bfffULL,
    145   0x7effc038b777ULL,
    146   0x7effc038021cULL,
    147   0x7effc037c7d1ULL,
    148   0x7effc037bfffULL,
    149   0x7effc038b777ULL,
    150   0x7effc038021cULL,
    151   0x7effc037c7d1ULL,
    152   0x7effc037bfffULL,
    153   0x7effc0520d26ULL,
    154   0x7effc009ddffULL,
    155   0x7effbf90bb50ULL,
    156   0x7effbdddfa69ULL,
    157   0x7effbdde1fe2ULL,
    158   0x7effbdde2424ULL,
    159   0x7effbdde27b3ULL,
    160   0x7effbddee53bULL,
    161   0x7effbdde1988ULL,
    162   0x7effbdde0904ULL,
    163   0x7effc106ce0dULL,
    164   0x7effbcc3fa04ULL,
    165   0x7effbcc3f6a4ULL,
    166   0x7effbcc3e726ULL,
    167   0x7effbcc40852ULL,
    168   0x7effb681ec4dULL,
    169 #endif  // SANITIZER_WORDSIZE
    170   0xB0B5E768,
    171   0x7B682EC1,
    172   0x367F9918,
    173   0xAE34E13,
    174   0xBA0C6C6,
    175   0x13250F46,
    176   0xA0D6A8AB,
    177   0x2B07C1A8,
    178   0x6C844F4A,
    179   0x2321B53,
    180   0x1F3D4F8F,
    181   0x3FE2924B,
    182   0xB7A2F568,
    183   0xBD23950A,
    184   0x61020930,
    185   0x33E7970C,
    186   0x405998A1,
    187   0x59F3551D,
    188   0x350E3028,
    189   0xBC55A28D,
    190   0x361F3AED,
    191   0xBEAD0F73,
    192   0xAEF28479,
    193   0x757E971F,
    194   0xAEBA450,
    195   0x43AD22F5,
    196   0x8C2C50C4,
    197   0x7AD8A2E1,
    198   0x69EE4EE8,
    199   0xC08DFF,
    200   0x4BA6538,
    201   0x3708AB2,
    202   0xC24B6475,
    203   0x7C8890D7,
    204   0x6662495F,
    205   0x9B641689,
    206   0xD3596B,
    207   0xA1049569,
    208   0x44CBC16,
    209   0x4D39C39F
    210 };
    211 
    212 void CompressStackTraceTest(size_t n_iter) {
    213   u32 seed = my_rand();
    214   const size_t kNumPcs = ARRAY_SIZE(pc_array);
    215   u32 compressed[2 * kNumPcs];
    216 
    217   for (size_t iter = 0; iter < n_iter; iter++) {
    218     std::random_shuffle(pc_array, pc_array + kNumPcs);
    219     StackTrace stack0, stack1;
    220     stack0.CopyFrom(pc_array, kNumPcs);
    221     stack0.size = std::max((size_t)1, (size_t)(my_rand_r(&seed) % stack0.size));
    222     size_t compress_size =
    223       std::max((size_t)2, (size_t)my_rand_r(&seed) % (2 * kNumPcs));
    224     size_t n_frames =
    225       StackTrace::CompressStack(&stack0, compressed, compress_size);
    226     Ident(n_frames);
    227     assert(n_frames <= stack0.size);
    228     StackTrace::UncompressStack(&stack1, compressed, compress_size);
    229     assert(stack1.size == n_frames);
    230     for (size_t i = 0; i < stack1.size; i++) {
    231       assert(stack0.trace[i] == stack1.trace[i]);
    232     }
    233   }
    234 }
    235 
    236 TEST(AddressSanitizer, CompressStackTraceTest) {
    237   CompressStackTraceTest(10000);
    238 }
    239 
    240 void CompressStackTraceBenchmark(size_t n_iter) {
    241   const size_t kNumPcs = ARRAY_SIZE(pc_array);
    242   u32 compressed[2 * kNumPcs];
    243   std::random_shuffle(pc_array, pc_array + kNumPcs);
    244 
    245   StackTrace stack0;
    246   stack0.CopyFrom(pc_array, kNumPcs);
    247   stack0.size = kNumPcs;
    248   for (size_t iter = 0; iter < n_iter; iter++) {
    249     size_t compress_size = kNumPcs;
    250     size_t n_frames =
    251       StackTrace::CompressStack(&stack0, compressed, compress_size);
    252     Ident(n_frames);
    253   }
    254 }
    255 
    256 TEST(AddressSanitizer, CompressStackTraceBenchmark) {
    257   CompressStackTraceBenchmark(1 << 24);
    258 }
    259 
    260 TEST(AddressSanitizer, QuarantineTest) {
    261   StackTrace stack;
    262   stack.trace[0] = 0x890;
    263   stack.size = 1;
    264 
    265   const int size = 1024;
    266   void *p = __asan::asan_malloc(size, &stack);
    267   __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
    268   size_t i;
    269   size_t max_i = 1 << 30;
    270   for (i = 0; i < max_i; i++) {
    271     void *p1 = __asan::asan_malloc(size, &stack);
    272     __asan::asan_free(p1, &stack, __asan::FROM_MALLOC);
    273     if (p1 == p) break;
    274   }
    275   EXPECT_GE(i, 10000U);
    276   EXPECT_LT(i, max_i);
    277 }
    278 
    279 void *ThreadedQuarantineTestWorker(void *unused) {
    280   (void)unused;
    281   u32 seed = my_rand();
    282   StackTrace stack;
    283   stack.trace[0] = 0x890;
    284   stack.size = 1;
    285 
    286   for (size_t i = 0; i < 1000; i++) {
    287     void *p = __asan::asan_malloc(1 + (my_rand_r(&seed) % 4000), &stack);
    288     __asan::asan_free(p, &stack, __asan::FROM_MALLOC);
    289   }
    290   return NULL;
    291 }
    292 
    293 // Check that the thread local allocators are flushed when threads are
    294 // destroyed.
    295 TEST(AddressSanitizer, ThreadedQuarantineTest) {
    296   const int n_threads = 3000;
    297   size_t mmaped1 = __asan_get_heap_size();
    298   for (int i = 0; i < n_threads; i++) {
    299     pthread_t t;
    300     PTHREAD_CREATE(&t, NULL, ThreadedQuarantineTestWorker, 0);
    301     PTHREAD_JOIN(t, 0);
    302     size_t mmaped2 = __asan_get_heap_size();
    303     EXPECT_LT(mmaped2 - mmaped1, 320U * (1 << 20));
    304   }
    305 }
    306 
    307 void *ThreadedOneSizeMallocStress(void *unused) {
    308   (void)unused;
    309   StackTrace stack;
    310   stack.trace[0] = 0x890;
    311   stack.size = 1;
    312   const size_t kNumMallocs = 1000;
    313   for (int iter = 0; iter < 1000; iter++) {
    314     void *p[kNumMallocs];
    315     for (size_t i = 0; i < kNumMallocs; i++) {
    316       p[i] = __asan::asan_malloc(32, &stack);
    317     }
    318     for (size_t i = 0; i < kNumMallocs; i++) {
    319       __asan::asan_free(p[i], &stack, __asan::FROM_MALLOC);
    320     }
    321   }
    322   return NULL;
    323 }
    324 
    325 TEST(AddressSanitizer, ThreadedOneSizeMallocStressTest) {
    326   const int kNumThreads = 4;
    327   pthread_t t[kNumThreads];
    328   for (int i = 0; i < kNumThreads; i++) {
    329     PTHREAD_CREATE(&t[i], 0, ThreadedOneSizeMallocStress, 0);
    330   }
    331   for (int i = 0; i < kNumThreads; i++) {
    332     PTHREAD_JOIN(t[i], 0);
    333   }
    334 }
    335 
    336 TEST(AddressSanitizer, MemsetWildAddressTest) {
    337   using __asan::kHighMemEnd;
    338   typedef void*(*memset_p)(void*, int, size_t);
    339   // Prevent inlining of memset().
    340   volatile memset_p libc_memset = (memset_p)memset;
    341   EXPECT_DEATH(libc_memset((void*)(kLowShadowBeg + 200), 0, 100),
    342                (kLowShadowEnd == 0) ? "unknown-crash.*shadow gap"
    343                                     : "unknown-crash.*low shadow");
    344   EXPECT_DEATH(libc_memset((void*)(kShadowGapBeg + 200), 0, 100),
    345                "unknown-crash.*shadow gap");
    346   EXPECT_DEATH(libc_memset((void*)(kHighShadowBeg + 200), 0, 100),
    347                "unknown-crash.*high shadow");
    348 }
    349 
    350 TEST(AddressSanitizerInterface, GetEstimatedAllocatedSize) {
    351   EXPECT_EQ(0U, __asan_get_estimated_allocated_size(0));
    352   const size_t sizes[] = { 1, 30, 1<<30 };
    353   for (size_t i = 0; i < 3; i++) {
    354     EXPECT_EQ(sizes[i], __asan_get_estimated_allocated_size(sizes[i]));
    355   }
    356 }
    357 
    358 static const char* kGetAllocatedSizeErrorMsg =
    359   "attempting to call __asan_get_allocated_size()";
    360 
    361 TEST(AddressSanitizerInterface, GetAllocatedSizeAndOwnershipTest) {
    362   const size_t kArraySize = 100;
    363   char *array = Ident((char*)malloc(kArraySize));
    364   int *int_ptr = Ident(new int);
    365 
    366   // Allocated memory is owned by allocator. Allocated size should be
    367   // equal to requested size.
    368   EXPECT_EQ(true, __asan_get_ownership(array));
    369   EXPECT_EQ(kArraySize, __asan_get_allocated_size(array));
    370   EXPECT_EQ(true, __asan_get_ownership(int_ptr));
    371   EXPECT_EQ(sizeof(int), __asan_get_allocated_size(int_ptr));
    372 
    373   // We cannot call GetAllocatedSize from the memory we didn't map,
    374   // and from the interior pointers (not returned by previous malloc).
    375   void *wild_addr = (void*)0x1;
    376   EXPECT_FALSE(__asan_get_ownership(wild_addr));
    377   EXPECT_DEATH(__asan_get_allocated_size(wild_addr), kGetAllocatedSizeErrorMsg);
    378   EXPECT_FALSE(__asan_get_ownership(array + kArraySize / 2));
    379   EXPECT_DEATH(__asan_get_allocated_size(array + kArraySize / 2),
    380                kGetAllocatedSizeErrorMsg);
    381 
    382   // NULL is not owned, but is a valid argument for __asan_get_allocated_size().
    383   EXPECT_FALSE(__asan_get_ownership(NULL));
    384   EXPECT_EQ(0U, __asan_get_allocated_size(NULL));
    385 
    386   // When memory is freed, it's not owned, and call to GetAllocatedSize
    387   // is forbidden.
    388   free(array);
    389   EXPECT_FALSE(__asan_get_ownership(array));
    390   EXPECT_DEATH(__asan_get_allocated_size(array), kGetAllocatedSizeErrorMsg);
    391   delete int_ptr;
    392 
    393   void *zero_alloc = Ident(malloc(0));
    394   if (zero_alloc != 0) {
    395     // If malloc(0) is not null, this pointer is owned and should have valid
    396     // allocated size.
    397     EXPECT_TRUE(__asan_get_ownership(zero_alloc));
    398     // Allocated size is 0 or 1 depending on the allocator used.
    399     EXPECT_LT(__asan_get_allocated_size(zero_alloc), 2U);
    400   }
    401   free(zero_alloc);
    402 }
    403 
    404 TEST(AddressSanitizerInterface, GetCurrentAllocatedBytesTest) {
    405   size_t before_malloc, after_malloc, after_free;
    406   char *array;
    407   const size_t kMallocSize = 100;
    408   before_malloc = __asan_get_current_allocated_bytes();
    409 
    410   array = Ident((char*)malloc(kMallocSize));
    411   after_malloc = __asan_get_current_allocated_bytes();
    412   EXPECT_EQ(before_malloc + kMallocSize, after_malloc);
    413 
    414   free(array);
    415   after_free = __asan_get_current_allocated_bytes();
    416   EXPECT_EQ(before_malloc, after_free);
    417 }
    418 
    419 static void DoDoubleFree() {
    420   int *x = Ident(new int);
    421   delete Ident(x);
    422   delete Ident(x);
    423 }
    424 
    425 TEST(AddressSanitizerInterface, GetHeapSizeTest) {
    426   // asan_allocator2 does not keep huge chunks in free list, but unmaps them.
    427   // The chunk should be greater than the quarantine size,
    428   // otherwise it will be stuck in quarantine instead of being unmaped.
    429   static const size_t kLargeMallocSize = (1 << 28) + 1;  // 256M
    430   free(Ident(malloc(kLargeMallocSize)));  // Drain quarantine.
    431   uptr old_heap_size = __asan_get_heap_size();
    432   for (int i = 0; i < 3; i++) {
    433     // fprintf(stderr, "allocating %zu bytes:\n", kLargeMallocSize);
    434     free(Ident(malloc(kLargeMallocSize)));
    435     EXPECT_EQ(old_heap_size, __asan_get_heap_size());
    436   }
    437 }
    438 
    439 static const size_t kManyThreadsMallocSizes[] = {5, 1UL<<10, 1UL<<14, 357};
    440 static const size_t kManyThreadsIterations = 250;
    441 static const size_t kManyThreadsNumThreads =
    442   (SANITIZER_WORDSIZE == 32) ? 40 : 200;
    443 
    444 void *ManyThreadsWithStatsWorker(void *arg) {
    445   (void)arg;
    446   for (size_t iter = 0; iter < kManyThreadsIterations; iter++) {
    447     for (size_t size_index = 0; size_index < 4; size_index++) {
    448       free(Ident(malloc(kManyThreadsMallocSizes[size_index])));
    449     }
    450   }
    451   // Just one large allocation.
    452   free(Ident(malloc(1 << 20)));
    453   return 0;
    454 }
    455 
    456 TEST(AddressSanitizerInterface, ManyThreadsWithStatsStressTest) {
    457   size_t before_test, after_test, i;
    458   pthread_t threads[kManyThreadsNumThreads];
    459   before_test = __asan_get_current_allocated_bytes();
    460   for (i = 0; i < kManyThreadsNumThreads; i++) {
    461     PTHREAD_CREATE(&threads[i], 0,
    462                    (void* (*)(void *x))ManyThreadsWithStatsWorker, (void*)i);
    463   }
    464   for (i = 0; i < kManyThreadsNumThreads; i++) {
    465     PTHREAD_JOIN(threads[i], 0);
    466   }
    467   after_test = __asan_get_current_allocated_bytes();
    468   // ASan stats also reflect memory usage of internal ASan RTL structs,
    469   // so we can't check for equality here.
    470   EXPECT_LT(after_test, before_test + (1UL<<20));
    471 }
    472 
    473 TEST(AddressSanitizerInterface, ExitCode) {
    474   int original_exit_code = __asan_set_error_exit_code(7);
    475   EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(7), "");
    476   EXPECT_EQ(7, __asan_set_error_exit_code(8));
    477   EXPECT_EXIT(DoDoubleFree(), ::testing::ExitedWithCode(8), "");
    478   EXPECT_EQ(8, __asan_set_error_exit_code(original_exit_code));
    479   EXPECT_EXIT(DoDoubleFree(),
    480               ::testing::ExitedWithCode(original_exit_code), "");
    481 }
    482 
    483 static void MyDeathCallback() {
    484   fprintf(stderr, "MyDeathCallback\n");
    485 }
    486 
    487 TEST(AddressSanitizerInterface, DeathCallbackTest) {
    488   __asan_set_death_callback(MyDeathCallback);
    489   EXPECT_DEATH(DoDoubleFree(), "MyDeathCallback");
    490   __asan_set_death_callback(NULL);
    491 }
    492 
    493 static const char* kUseAfterPoisonErrorMessage = "use-after-poison";
    494 
    495 #define GOOD_ACCESS(ptr, offset)  \
    496     EXPECT_FALSE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
    497 
    498 #define BAD_ACCESS(ptr, offset) \
    499     EXPECT_TRUE(__asan::AddressIsPoisoned((uptr)(ptr + offset)))
    500 
    501 TEST(AddressSanitizerInterface, SimplePoisonMemoryRegionTest) {
    502   char *array = Ident((char*)malloc(120));
    503   // poison array[40..80)
    504   __asan_poison_memory_region(array + 40, 40);
    505   GOOD_ACCESS(array, 39);
    506   GOOD_ACCESS(array, 80);
    507   BAD_ACCESS(array, 40);
    508   BAD_ACCESS(array, 60);
    509   BAD_ACCESS(array, 79);
    510   EXPECT_DEATH(__asan_report_error(0, 0, 0, (uptr)(array + 40), true, 1),
    511                kUseAfterPoisonErrorMessage);
    512   __asan_unpoison_memory_region(array + 40, 40);
    513   // access previously poisoned memory.
    514   GOOD_ACCESS(array, 40);
    515   GOOD_ACCESS(array, 79);
    516   free(array);
    517 }
    518 
    519 TEST(AddressSanitizerInterface, OverlappingPoisonMemoryRegionTest) {
    520   char *array = Ident((char*)malloc(120));
    521   // Poison [0..40) and [80..120)
    522   __asan_poison_memory_region(array, 40);
    523   __asan_poison_memory_region(array + 80, 40);
    524   BAD_ACCESS(array, 20);
    525   GOOD_ACCESS(array, 60);
    526   BAD_ACCESS(array, 100);
    527   // Poison whole array - [0..120)
    528   __asan_poison_memory_region(array, 120);
    529   BAD_ACCESS(array, 60);
    530   // Unpoison [24..96)
    531   __asan_unpoison_memory_region(array + 24, 72);
    532   BAD_ACCESS(array, 23);
    533   GOOD_ACCESS(array, 24);
    534   GOOD_ACCESS(array, 60);
    535   GOOD_ACCESS(array, 95);
    536   BAD_ACCESS(array, 96);
    537   free(array);
    538 }
    539 
    540 TEST(AddressSanitizerInterface, PushAndPopWithPoisoningTest) {
    541   // Vector of capacity 20
    542   char *vec = Ident((char*)malloc(20));
    543   __asan_poison_memory_region(vec, 20);
    544   for (size_t i = 0; i < 7; i++) {
    545     // Simulate push_back.
    546     __asan_unpoison_memory_region(vec + i, 1);
    547     GOOD_ACCESS(vec, i);
    548     BAD_ACCESS(vec, i + 1);
    549   }
    550   for (size_t i = 7; i > 0; i--) {
    551     // Simulate pop_back.
    552     __asan_poison_memory_region(vec + i - 1, 1);
    553     BAD_ACCESS(vec, i - 1);
    554     if (i > 1) GOOD_ACCESS(vec, i - 2);
    555   }
    556   free(vec);
    557 }
    558 
    559 TEST(AddressSanitizerInterface, GlobalRedzones) {
    560   GOOD_ACCESS(glob1, 1 - 1);
    561   GOOD_ACCESS(glob2, 2 - 1);
    562   GOOD_ACCESS(glob3, 3 - 1);
    563   GOOD_ACCESS(glob4, 4 - 1);
    564   GOOD_ACCESS(glob5, 5 - 1);
    565   GOOD_ACCESS(glob6, 6 - 1);
    566   GOOD_ACCESS(glob7, 7 - 1);
    567   GOOD_ACCESS(glob8, 8 - 1);
    568   GOOD_ACCESS(glob9, 9 - 1);
    569   GOOD_ACCESS(glob10, 10 - 1);
    570   GOOD_ACCESS(glob11, 11 - 1);
    571   GOOD_ACCESS(glob12, 12 - 1);
    572   GOOD_ACCESS(glob13, 13 - 1);
    573   GOOD_ACCESS(glob14, 14 - 1);
    574   GOOD_ACCESS(glob15, 15 - 1);
    575   GOOD_ACCESS(glob16, 16 - 1);
    576   GOOD_ACCESS(glob17, 17 - 1);
    577   GOOD_ACCESS(glob1000, 1000 - 1);
    578   GOOD_ACCESS(glob10000, 10000 - 1);
    579   GOOD_ACCESS(glob100000, 100000 - 1);
    580 
    581   BAD_ACCESS(glob1, 1);
    582   BAD_ACCESS(glob2, 2);
    583   BAD_ACCESS(glob3, 3);
    584   BAD_ACCESS(glob4, 4);
    585   BAD_ACCESS(glob5, 5);
    586   BAD_ACCESS(glob6, 6);
    587   BAD_ACCESS(glob7, 7);
    588   BAD_ACCESS(glob8, 8);
    589   BAD_ACCESS(glob9, 9);
    590   BAD_ACCESS(glob10, 10);
    591   BAD_ACCESS(glob11, 11);
    592   BAD_ACCESS(glob12, 12);
    593   BAD_ACCESS(glob13, 13);
    594   BAD_ACCESS(glob14, 14);
    595   BAD_ACCESS(glob15, 15);
    596   BAD_ACCESS(glob16, 16);
    597   BAD_ACCESS(glob17, 17);
    598   BAD_ACCESS(glob1000, 1000);
    599   BAD_ACCESS(glob1000, 1100);  // Redzone is at least 101 bytes.
    600   BAD_ACCESS(glob10000, 10000);
    601   BAD_ACCESS(glob10000, 11000);  // Redzone is at least 1001 bytes.
    602   BAD_ACCESS(glob100000, 100000);
    603   BAD_ACCESS(glob100000, 110000);  // Redzone is at least 10001 bytes.
    604 }
    605 
    606 // Make sure that each aligned block of size "2^granularity" doesn't have
    607 // "true" value before "false" value.
    608 static void MakeShadowValid(bool *shadow, int length, int granularity) {
    609   bool can_be_poisoned = true;
    610   for (int i = length - 1; i >= 0; i--) {
    611     if (!shadow[i])
    612       can_be_poisoned = false;
    613     if (!can_be_poisoned)
    614       shadow[i] = false;
    615     if (i % (1 << granularity) == 0) {
    616       can_be_poisoned = true;
    617     }
    618   }
    619 }
    620 
    621 TEST(AddressSanitizerInterface, PoisoningStressTest) {
    622   const size_t kSize = 24;
    623   bool expected[kSize];
    624   char *arr = Ident((char*)malloc(kSize));
    625   for (size_t l1 = 0; l1 < kSize; l1++) {
    626     for (size_t s1 = 1; l1 + s1 <= kSize; s1++) {
    627       for (size_t l2 = 0; l2 < kSize; l2++) {
    628         for (size_t s2 = 1; l2 + s2 <= kSize; s2++) {
    629           // Poison [l1, l1+s1), [l2, l2+s2) and check result.
    630           __asan_unpoison_memory_region(arr, kSize);
    631           __asan_poison_memory_region(arr + l1, s1);
    632           __asan_poison_memory_region(arr + l2, s2);
    633           memset(expected, false, kSize);
    634           memset(expected + l1, true, s1);
    635           MakeShadowValid(expected, kSize, /*granularity*/ 3);
    636           memset(expected + l2, true, s2);
    637           MakeShadowValid(expected, kSize, /*granularity*/ 3);
    638           for (size_t i = 0; i < kSize; i++) {
    639             ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
    640           }
    641           // Unpoison [l1, l1+s1) and [l2, l2+s2) and check result.
    642           __asan_poison_memory_region(arr, kSize);
    643           __asan_unpoison_memory_region(arr + l1, s1);
    644           __asan_unpoison_memory_region(arr + l2, s2);
    645           memset(expected, true, kSize);
    646           memset(expected + l1, false, s1);
    647           MakeShadowValid(expected, kSize, /*granularity*/ 3);
    648           memset(expected + l2, false, s2);
    649           MakeShadowValid(expected, kSize, /*granularity*/ 3);
    650           for (size_t i = 0; i < kSize; i++) {
    651             ASSERT_EQ(expected[i], __asan_address_is_poisoned(arr + i));
    652           }
    653         }
    654       }
    655     }
    656   }
    657 }
    658 
    659 TEST(AddressSanitizerInterface, PoisonedRegion) {
    660   size_t rz = 16;
    661   for (size_t size = 1; size <= 64; size++) {
    662     char *p = new char[size];
    663     uptr x = reinterpret_cast<uptr>(p);
    664     for (size_t beg = 0; beg < size + rz; beg++) {
    665       for (size_t end = beg; end < size + rz; end++) {
    666         uptr first_poisoned = __asan_region_is_poisoned(x + beg, end - beg);
    667         if (beg == end) {
    668           EXPECT_FALSE(first_poisoned);
    669         } else if (beg < size && end <= size) {
    670           EXPECT_FALSE(first_poisoned);
    671         } else if (beg >= size) {
    672           EXPECT_EQ(x + beg, first_poisoned);
    673         } else {
    674           EXPECT_GT(end, size);
    675           EXPECT_EQ(x + size, first_poisoned);
    676         }
    677       }
    678     }
    679     delete [] p;
    680   }
    681 }
    682 
    683 // This is a performance benchmark for manual runs.
    684 // asan's memset interceptor calls mem_is_zero for the entire shadow region.
    685 // the profile should look like this:
    686 //     89.10%   [.] __memset_sse2
    687 //     10.50%   [.] __sanitizer::mem_is_zero
    688 // I.e. mem_is_zero should consume ~ SHADOW_GRANULARITY less CPU cycles
    689 // than memset itself.
    690 TEST(AddressSanitizerInterface, DISABLED_StressLargeMemset) {
    691   size_t size = 1 << 20;
    692   char *x = new char[size];
    693   for (int i = 0; i < 100000; i++)
    694     Ident(memset)(x, 0, size);
    695   delete [] x;
    696 }
    697 
    698 // Same here, but we run memset with small sizes.
    699 TEST(AddressSanitizerInterface, DISABLED_StressSmallMemset) {
    700   size_t size = 32;
    701   char *x = new char[size];
    702   for (int i = 0; i < 100000000; i++)
    703     Ident(memset)(x, 0, size);
    704   delete [] x;
    705 }
    706 
    707 static const char *kInvalidPoisonMessage = "invalid-poison-memory-range";
    708 static const char *kInvalidUnpoisonMessage = "invalid-unpoison-memory-range";
    709 
    710 TEST(AddressSanitizerInterface, DISABLED_InvalidPoisonAndUnpoisonCallsTest) {
    711   char *array = Ident((char*)malloc(120));
    712   __asan_unpoison_memory_region(array, 120);
    713   // Try to unpoison not owned memory
    714   EXPECT_DEATH(__asan_unpoison_memory_region(array, 121),
    715                kInvalidUnpoisonMessage);
    716   EXPECT_DEATH(__asan_unpoison_memory_region(array - 1, 120),
    717                kInvalidUnpoisonMessage);
    718 
    719   __asan_poison_memory_region(array, 120);
    720   // Try to poison not owned memory.
    721   EXPECT_DEATH(__asan_poison_memory_region(array, 121), kInvalidPoisonMessage);
    722   EXPECT_DEATH(__asan_poison_memory_region(array - 1, 120),
    723                kInvalidPoisonMessage);
    724   free(array);
    725 }
    726 
    727 static void ErrorReportCallbackOneToZ(const char *report) {
    728   int report_len = strlen(report);
    729   ASSERT_EQ(6, write(2, "ABCDEF", 6));
    730   ASSERT_EQ(report_len, write(2, report, report_len));
    731   ASSERT_EQ(6, write(2, "ABCDEF", 6));
    732   _exit(1);
    733 }
    734 
    735 TEST(AddressSanitizerInterface, SetErrorReportCallbackTest) {
    736   __asan_set_error_report_callback(ErrorReportCallbackOneToZ);
    737   EXPECT_DEATH(__asan_report_error(0, 0, 0, 0, true, 1),
    738                ASAN_PCRE_DOTALL "ABCDEF.*AddressSanitizer.*WRITE.*ABCDEF");
    739   __asan_set_error_report_callback(NULL);
    740 }
    741 
    742 TEST(AddressSanitizerInterface, GetOwnershipStressTest) {
    743   std::vector<char *> pointers;
    744   std::vector<size_t> sizes;
    745   const size_t kNumMallocs = 1 << 9;
    746   for (size_t i = 0; i < kNumMallocs; i++) {
    747     size_t size = i * 100 + 1;
    748     pointers.push_back((char*)malloc(size));
    749     sizes.push_back(size);
    750   }
    751   for (size_t i = 0; i < 4000000; i++) {
    752     EXPECT_FALSE(__asan_get_ownership(&pointers));
    753     EXPECT_FALSE(__asan_get_ownership((void*)0x1234));
    754     size_t idx = i % kNumMallocs;
    755     EXPECT_TRUE(__asan_get_ownership(pointers[idx]));
    756     EXPECT_EQ(sizes[idx], __asan_get_allocated_size(pointers[idx]));
    757   }
    758   for (size_t i = 0, n = pointers.size(); i < n; i++)
    759     free(pointers[i]);
    760 }
    761 
    762 TEST(AddressSanitizerInterface, CallocOverflow) {
    763   size_t kArraySize = 4096;
    764   volatile size_t kMaxSizeT = std::numeric_limits<size_t>::max();
    765   volatile size_t kArraySize2 = kMaxSizeT / kArraySize + 10;
    766   void *p = calloc(kArraySize, kArraySize2);  // Should return 0.
    767   EXPECT_EQ(0L, Ident(p));
    768 }
    769 
    770 TEST(AddressSanitizerInterface, CallocOverflow2) {
    771 #if SANITIZER_WORDSIZE == 32
    772   size_t kArraySize = 112;
    773   volatile size_t kArraySize2 = 43878406;
    774   void *p = calloc(kArraySize, kArraySize2);  // Should return 0.
    775   EXPECT_EQ(0L, Ident(p));
    776 #endif
    777 }
    778 
    779 TEST(AddressSanitizerInterface, CallocReturnsZeroMem) {
    780   size_t sizes[] = {16, 1000, 10000, 100000, 2100000};
    781   for (size_t s = 0; s < ARRAY_SIZE(sizes); s++) {
    782     size_t size = sizes[s];
    783     for (size_t iter = 0; iter < 5; iter++) {
    784       char *x = Ident((char*)calloc(1, size));
    785       EXPECT_EQ(x[0], 0);
    786       EXPECT_EQ(x[size - 1], 0);
    787       EXPECT_EQ(x[size / 2], 0);
    788       EXPECT_EQ(x[size / 3], 0);
    789       EXPECT_EQ(x[size / 4], 0);
    790       memset(x, 0x42, size);
    791       free(Ident(x));
    792       free(Ident(malloc(Ident(1 << 27))));  // Try to drain the quarantine.
    793     }
    794   }
    795 }
    796