Home | History | Annotate | Download | only in allocator
      1 // Copyright 2014 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include <stdio.h>
      6 #include <stdlib.h>
      7 #include <algorithm>   // for min()
      8 
      9 #include "base/atomicops.h"
     10 #include "testing/gtest/include/gtest/gtest.h"
     11 
     12 // Number of bits in a size_t.
     13 static const int kSizeBits = 8 * sizeof(size_t);
     14 // The maximum size of a size_t.
     15 static const size_t kMaxSize = ~static_cast<size_t>(0);
     16 // Maximum positive size of a size_t if it were signed.
     17 static const size_t kMaxSignedSize = ((size_t(1) << (kSizeBits-1)) - 1);
     18 // An allocation size which is not too big to be reasonable.
     19 static const size_t kNotTooBig = 100000;
     20 // An allocation size which is just too big.
     21 static const size_t kTooBig = ~static_cast<size_t>(0);
     22 
     23 namespace {
     24 
     25 using std::min;
     26 
     27 // Fill a buffer of the specified size with a predetermined pattern
     28 static void Fill(unsigned char* buffer, int n) {
     29   for (int i = 0; i < n; i++) {
     30     buffer[i] = (i & 0xff);
     31   }
     32 }
     33 
     34 // Check that the specified buffer has the predetermined pattern
     35 // generated by Fill()
     36 static bool Valid(unsigned char* buffer, int n) {
     37   for (int i = 0; i < n; i++) {
     38     if (buffer[i] != (i & 0xff)) {
     39       return false;
     40     }
     41   }
     42   return true;
     43 }
     44 
     45 // Check that a buffer is completely zeroed.
     46 static bool IsZeroed(unsigned char* buffer, int n) {
     47   for (int i = 0; i < n; i++) {
     48     if (buffer[i] != 0) {
     49       return false;
     50     }
     51   }
     52   return true;
     53 }
     54 
     55 // Check alignment
     56 static void CheckAlignment(void* p, int align) {
     57   EXPECT_EQ(0, reinterpret_cast<uintptr_t>(p) & (align-1));
     58 }
     59 
     60 // Return the next interesting size/delta to check.  Returns -1 if no more.
     61 static int NextSize(int size) {
     62   if (size < 100)
     63     return size+1;
     64 
     65   if (size < 100000) {
     66     // Find next power of two
     67     int power = 1;
     68     while (power < size)
     69       power <<= 1;
     70 
     71     // Yield (power-1, power, power+1)
     72     if (size < power-1)
     73       return power-1;
     74 
     75     if (size == power-1)
     76       return power;
     77 
     78     assert(size == power);
     79     return power+1;
     80   } else {
     81     return -1;
     82   }
     83 }
     84 
     85 template <class AtomicType>
     86 static void TestAtomicIncrement() {
     87   // For now, we just test single threaded execution
     88 
     89   // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go
     90   // outside the expected address bounds.  This is in particular to
     91   // test that some future change to the asm code doesn't cause the
     92   // 32-bit NoBarrier_AtomicIncrement to do the wrong thing on 64-bit machines.
     93   struct {
     94     AtomicType prev_word;
     95     AtomicType count;
     96     AtomicType next_word;
     97   } s;
     98 
     99   AtomicType prev_word_value, next_word_value;
    100   memset(&prev_word_value, 0xFF, sizeof(AtomicType));
    101   memset(&next_word_value, 0xEE, sizeof(AtomicType));
    102 
    103   s.prev_word = prev_word_value;
    104   s.count = 0;
    105   s.next_word = next_word_value;
    106 
    107   EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 1), 1);
    108   EXPECT_EQ(s.count, 1);
    109   EXPECT_EQ(s.prev_word, prev_word_value);
    110   EXPECT_EQ(s.next_word, next_word_value);
    111 
    112   EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 2), 3);
    113   EXPECT_EQ(s.count, 3);
    114   EXPECT_EQ(s.prev_word, prev_word_value);
    115   EXPECT_EQ(s.next_word, next_word_value);
    116 
    117   EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 3), 6);
    118   EXPECT_EQ(s.count, 6);
    119   EXPECT_EQ(s.prev_word, prev_word_value);
    120   EXPECT_EQ(s.next_word, next_word_value);
    121 
    122   EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -3), 3);
    123   EXPECT_EQ(s.count, 3);
    124   EXPECT_EQ(s.prev_word, prev_word_value);
    125   EXPECT_EQ(s.next_word, next_word_value);
    126 
    127   EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -2), 1);
    128   EXPECT_EQ(s.count, 1);
    129   EXPECT_EQ(s.prev_word, prev_word_value);
    130   EXPECT_EQ(s.next_word, next_word_value);
    131 
    132   EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), 0);
    133   EXPECT_EQ(s.count, 0);
    134   EXPECT_EQ(s.prev_word, prev_word_value);
    135   EXPECT_EQ(s.next_word, next_word_value);
    136 
    137   EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), -1);
    138   EXPECT_EQ(s.count, -1);
    139   EXPECT_EQ(s.prev_word, prev_word_value);
    140   EXPECT_EQ(s.next_word, next_word_value);
    141 
    142   EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -4), -5);
    143   EXPECT_EQ(s.count, -5);
    144   EXPECT_EQ(s.prev_word, prev_word_value);
    145   EXPECT_EQ(s.next_word, next_word_value);
    146 
    147   EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 5), 0);
    148   EXPECT_EQ(s.count, 0);
    149   EXPECT_EQ(s.prev_word, prev_word_value);
    150   EXPECT_EQ(s.next_word, next_word_value);
    151 }
    152 
    153 
    154 #define NUM_BITS(T) (sizeof(T) * 8)
    155 
    156 
    157 template <class AtomicType>
    158 static void TestCompareAndSwap() {
    159   AtomicType value = 0;
    160   AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1);
    161   EXPECT_EQ(1, value);
    162   EXPECT_EQ(0, prev);
    163 
    164   // Use test value that has non-zero bits in both halves, more for testing
    165   // 64-bit implementation on 32-bit platforms.
    166   const AtomicType k_test_val = (static_cast<uint64_t>(1) <<
    167                                  (NUM_BITS(AtomicType) - 2)) + 11;
    168   value = k_test_val;
    169   prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5);
    170   EXPECT_EQ(k_test_val, value);
    171   EXPECT_EQ(k_test_val, prev);
    172 
    173   value = k_test_val;
    174   prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5);
    175   EXPECT_EQ(5, value);
    176   EXPECT_EQ(k_test_val, prev);
    177 }
    178 
    179 
    180 template <class AtomicType>
    181 static void TestAtomicExchange() {
    182   AtomicType value = 0;
    183   AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1);
    184   EXPECT_EQ(1, value);
    185   EXPECT_EQ(0, new_value);
    186 
    187   // Use test value that has non-zero bits in both halves, more for testing
    188   // 64-bit implementation on 32-bit platforms.
    189   const AtomicType k_test_val = (static_cast<uint64_t>(1) <<
    190                                  (NUM_BITS(AtomicType) - 2)) + 11;
    191   value = k_test_val;
    192   new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val);
    193   EXPECT_EQ(k_test_val, value);
    194   EXPECT_EQ(k_test_val, new_value);
    195 
    196   value = k_test_val;
    197   new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5);
    198   EXPECT_EQ(5, value);
    199   EXPECT_EQ(k_test_val, new_value);
    200 }
    201 
    202 
    203 template <class AtomicType>
    204 static void TestAtomicIncrementBounds() {
    205   // Test increment at the half-width boundary of the atomic type.
    206   // It is primarily for testing at the 32-bit boundary for 64-bit atomic type.
    207   AtomicType test_val = static_cast<uint64_t>(1) << (NUM_BITS(AtomicType) / 2);
    208   AtomicType value = test_val - 1;
    209   AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1);
    210   EXPECT_EQ(test_val, value);
    211   EXPECT_EQ(value, new_value);
    212 
    213   base::subtle::NoBarrier_AtomicIncrement(&value, -1);
    214   EXPECT_EQ(test_val - 1, value);
    215 }
    216 
    217 // This is a simple sanity check that values are correct. Not testing
    218 // atomicity
    219 template <class AtomicType>
    220 static void TestStore() {
    221   const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
    222   const AtomicType kVal2 = static_cast<AtomicType>(-1);
    223 
    224   AtomicType value;
    225 
    226   base::subtle::NoBarrier_Store(&value, kVal1);
    227   EXPECT_EQ(kVal1, value);
    228   base::subtle::NoBarrier_Store(&value, kVal2);
    229   EXPECT_EQ(kVal2, value);
    230 
    231   base::subtle::Acquire_Store(&value, kVal1);
    232   EXPECT_EQ(kVal1, value);
    233   base::subtle::Acquire_Store(&value, kVal2);
    234   EXPECT_EQ(kVal2, value);
    235 
    236   base::subtle::Release_Store(&value, kVal1);
    237   EXPECT_EQ(kVal1, value);
    238   base::subtle::Release_Store(&value, kVal2);
    239   EXPECT_EQ(kVal2, value);
    240 }
    241 
    242 // This is a simple sanity check that values are correct. Not testing
    243 // atomicity
    244 template <class AtomicType>
    245 static void TestLoad() {
    246   const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL);
    247   const AtomicType kVal2 = static_cast<AtomicType>(-1);
    248 
    249   AtomicType value;
    250 
    251   value = kVal1;
    252   EXPECT_EQ(kVal1, base::subtle::NoBarrier_Load(&value));
    253   value = kVal2;
    254   EXPECT_EQ(kVal2, base::subtle::NoBarrier_Load(&value));
    255 
    256   value = kVal1;
    257   EXPECT_EQ(kVal1, base::subtle::Acquire_Load(&value));
    258   value = kVal2;
    259   EXPECT_EQ(kVal2, base::subtle::Acquire_Load(&value));
    260 
    261   value = kVal1;
    262   EXPECT_EQ(kVal1, base::subtle::Release_Load(&value));
    263   value = kVal2;
    264   EXPECT_EQ(kVal2, base::subtle::Release_Load(&value));
    265 }
    266 
    267 template <class AtomicType>
    268 static void TestAtomicOps() {
    269   TestCompareAndSwap<AtomicType>();
    270   TestAtomicExchange<AtomicType>();
    271   TestAtomicIncrementBounds<AtomicType>();
    272   TestStore<AtomicType>();
    273   TestLoad<AtomicType>();
    274 }
    275 
    276 static void TestCalloc(size_t n, size_t s, bool ok) {
    277   char* p = reinterpret_cast<char*>(calloc(n, s));
    278   if (!ok) {
    279     EXPECT_EQ(NULL, p) << "calloc(n, s) should not succeed";
    280   } else {
    281     EXPECT_NE(reinterpret_cast<void*>(NULL), p) <<
    282         "calloc(n, s) should succeed";
    283     for (int i = 0; i < n*s; i++) {
    284       EXPECT_EQ('\0', p[i]);
    285     }
    286     free(p);
    287   }
    288 }
    289 
    290 
    291 // A global test counter for number of times the NewHandler is called.
    292 static int news_handled = 0;
    293 static void TestNewHandler() {
    294   ++news_handled;
    295   throw std::bad_alloc();
    296 }
    297 
    298 // Because we compile without exceptions, we expect these will not throw.
    299 static void TestOneNewWithoutExceptions(void* (*func)(size_t),
    300                                         bool should_throw) {
    301   // success test
    302   try {
    303     void* ptr = (*func)(kNotTooBig);
    304     EXPECT_NE(reinterpret_cast<void*>(NULL), ptr) <<
    305         "allocation should not have failed.";
    306   } catch(...) {
    307     EXPECT_EQ(0, 1) << "allocation threw unexpected exception.";
    308   }
    309 
    310   // failure test
    311   try {
    312     void* rv = (*func)(kTooBig);
    313     EXPECT_EQ(NULL, rv);
    314     EXPECT_FALSE(should_throw) << "allocation should have thrown.";
    315   } catch(...) {
    316     EXPECT_TRUE(should_throw) << "allocation threw unexpected exception.";
    317   }
    318 }
    319 
    320 static void TestNothrowNew(void* (*func)(size_t)) {
    321   news_handled = 0;
    322 
    323   // test without new_handler:
    324   std::new_handler saved_handler = std::set_new_handler(0);
    325   TestOneNewWithoutExceptions(func, false);
    326 
    327   // test with new_handler:
    328   std::set_new_handler(TestNewHandler);
    329   TestOneNewWithoutExceptions(func, true);
    330   EXPECT_EQ(news_handled, 1) << "nothrow new_handler was not called.";
    331   std::set_new_handler(saved_handler);
    332 }
    333 
    334 }  // namespace
    335 
    336 //-----------------------------------------------------------------------------
    337 
    338 TEST(Atomics, AtomicIncrementWord) {
    339   TestAtomicIncrement<AtomicWord>();
    340 }
    341 
    342 TEST(Atomics, AtomicIncrement32) {
    343   TestAtomicIncrement<Atomic32>();
    344 }
    345 
    346 TEST(Atomics, AtomicOpsWord) {
    347   TestAtomicIncrement<AtomicWord>();
    348 }
    349 
    350 TEST(Atomics, AtomicOps32) {
    351   TestAtomicIncrement<Atomic32>();
    352 }
    353 
    354 TEST(Allocators, Malloc) {
    355   // Try allocating data with a bunch of alignments and sizes
    356   for (int size = 1; size < 1048576; size *= 2) {
    357     unsigned char* ptr = reinterpret_cast<unsigned char*>(malloc(size));
    358     CheckAlignment(ptr, 2);  // Should be 2 byte aligned
    359     Fill(ptr, size);
    360     EXPECT_TRUE(Valid(ptr, size));
    361     free(ptr);
    362   }
    363 }
    364 
    365 TEST(Allocators, Calloc) {
    366   TestCalloc(0, 0, true);
    367   TestCalloc(0, 1, true);
    368   TestCalloc(1, 1, true);
    369   TestCalloc(1<<10, 0, true);
    370   TestCalloc(1<<20, 0, true);
    371   TestCalloc(0, 1<<10, true);
    372   TestCalloc(0, 1<<20, true);
    373   TestCalloc(1<<20, 2, true);
    374   TestCalloc(2, 1<<20, true);
    375   TestCalloc(1000, 1000, true);
    376 
    377   TestCalloc(kMaxSize, 2, false);
    378   TestCalloc(2, kMaxSize, false);
    379   TestCalloc(kMaxSize, kMaxSize, false);
    380 
    381   TestCalloc(kMaxSignedSize, 3, false);
    382   TestCalloc(3, kMaxSignedSize, false);
    383   TestCalloc(kMaxSignedSize, kMaxSignedSize, false);
    384 }
    385 
    386 TEST(Allocators, New) {
    387   TestNothrowNew(&::operator new);
    388   TestNothrowNew(&::operator new[]);
    389 }
    390 
    391 // This makes sure that reallocing a small number of bytes in either
    392 // direction doesn't cause us to allocate new memory.
    393 TEST(Allocators, Realloc1) {
    394   int start_sizes[] = { 100, 1000, 10000, 100000 };
    395   int deltas[] = { 1, -2, 4, -8, 16, -32, 64, -128 };
    396 
    397   for (int s = 0; s < sizeof(start_sizes)/sizeof(*start_sizes); ++s) {
    398     void* p = malloc(start_sizes[s]);
    399     ASSERT_TRUE(p);
    400     // The larger the start-size, the larger the non-reallocing delta.
    401     for (int d = 0; d < s*2; ++d) {
    402       void* new_p = realloc(p, start_sizes[s] + deltas[d]);
    403       ASSERT_EQ(p, new_p);  // realloc should not allocate new memory
    404     }
    405     // Test again, but this time reallocing smaller first.
    406     for (int d = 0; d < s*2; ++d) {
    407       void* new_p = realloc(p, start_sizes[s] - deltas[d]);
    408       ASSERT_EQ(p, new_p);  // realloc should not allocate new memory
    409     }
    410     free(p);
    411   }
    412 }
    413 
    414 TEST(Allocators, Realloc2) {
    415   for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) {
    416     for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) {
    417       unsigned char* src = reinterpret_cast<unsigned char*>(malloc(src_size));
    418       Fill(src, src_size);
    419       unsigned char* dst =
    420           reinterpret_cast<unsigned char*>(realloc(src, dst_size));
    421       EXPECT_TRUE(Valid(dst, min(src_size, dst_size)));
    422       Fill(dst, dst_size);
    423       EXPECT_TRUE(Valid(dst, dst_size));
    424       if (dst != NULL) free(dst);
    425     }
    426   }
    427 
    428   // Now make sure realloc works correctly even when we overflow the
    429   // packed cache, so some entries are evicted from the cache.
    430   // The cache has 2^12 entries, keyed by page number.
    431   const int kNumEntries = 1 << 14;
    432   int** p = reinterpret_cast<int**>(malloc(sizeof(*p) * kNumEntries));
    433   int sum = 0;
    434   for (int i = 0; i < kNumEntries; i++) {
    435     // no page size is likely to be bigger than 8192?
    436     p[i] = reinterpret_cast<int*>(malloc(8192));
    437     p[i][1000] = i;              // use memory deep in the heart of p
    438   }
    439   for (int i = 0; i < kNumEntries; i++) {
    440     p[i] = reinterpret_cast<int*>(realloc(p[i], 9000));
    441   }
    442   for (int i = 0; i < kNumEntries; i++) {
    443     sum += p[i][1000];
    444     free(p[i]);
    445   }
    446   EXPECT_EQ(kNumEntries/2 * (kNumEntries - 1), sum);  // assume kNE is even
    447   free(p);
    448 }
    449 
    450 TEST(Allocators, ReallocZero) {
    451   // Test that realloc to zero does not return NULL.
    452   for (int size = 0; size >= 0; size = NextSize(size)) {
    453     char* ptr = reinterpret_cast<char*>(malloc(size));
    454     EXPECT_NE(static_cast<char*>(NULL), ptr);
    455     ptr = reinterpret_cast<char*>(realloc(ptr, 0));
    456     EXPECT_NE(static_cast<char*>(NULL), ptr);
    457     if (ptr)
    458       free(ptr);
    459   }
    460 }
    461 
    462 #ifdef WIN32
    463 // Test recalloc
    464 TEST(Allocators, Recalloc) {
    465   for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) {
    466     for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) {
    467       unsigned char* src =
    468           reinterpret_cast<unsigned char*>(_recalloc(NULL, 1, src_size));
    469       EXPECT_TRUE(IsZeroed(src, src_size));
    470       Fill(src, src_size);
    471       unsigned char* dst =
    472           reinterpret_cast<unsigned char*>(_recalloc(src, 1, dst_size));
    473       EXPECT_TRUE(Valid(dst, min(src_size, dst_size)));
    474       Fill(dst, dst_size);
    475       EXPECT_TRUE(Valid(dst, dst_size));
    476       if (dst != NULL)
    477         free(dst);
    478     }
    479   }
    480 }
    481 
    482 // Test windows specific _aligned_malloc() and _aligned_free() methods.
    483 TEST(Allocators, AlignedMalloc) {
    484   // Try allocating data with a bunch of alignments and sizes
    485   static const int kTestAlignments[] = {8, 16, 256, 4096, 8192, 16384};
    486   for (int size = 1; size > 0; size = NextSize(size)) {
    487     for (int i = 0; i < ARRAYSIZE(kTestAlignments); ++i) {
    488       unsigned char* ptr = static_cast<unsigned char*>(
    489           _aligned_malloc(size, kTestAlignments[i]));
    490       CheckAlignment(ptr, kTestAlignments[i]);
    491       Fill(ptr, size);
    492       EXPECT_TRUE(Valid(ptr, size));
    493 
    494       // Make a second allocation of the same size and alignment to prevent
    495       // allocators from passing this test by accident.  Per jar, tcmalloc
    496       // provides allocations for new (never before seen) sizes out of a thread
    497       // local heap of a given "size class."  Each time the test requests a new
    498       // size, it will usually get the first element of a span, which is a
    499       // 4K aligned allocation.
    500       unsigned char* ptr2 = static_cast<unsigned char*>(
    501           _aligned_malloc(size, kTestAlignments[i]));
    502       CheckAlignment(ptr2, kTestAlignments[i]);
    503       Fill(ptr2, size);
    504       EXPECT_TRUE(Valid(ptr2, size));
    505 
    506       // Should never happen, but sanity check just in case.
    507       ASSERT_NE(ptr, ptr2);
    508       _aligned_free(ptr);
    509       _aligned_free(ptr2);
    510     }
    511   }
    512 }
    513 
    514 #endif
    515 
    516 
    517 int main(int argc, char** argv) {
    518   testing::InitGoogleTest(&argc, argv);
    519   return RUN_ALL_TESTS();
    520 }
    521