Home | History | Annotate | Download | only in metrics
      1 // Copyright 2015 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #include "base/metrics/persistent_memory_allocator.h"
      6 
      7 #include <memory>
      8 
      9 #include "base/files/file.h"
     10 #include "base/files/file_util.h"
     11 #include "base/files/memory_mapped_file.h"
     12 #include "base/files/scoped_temp_dir.h"
     13 #include "base/memory/shared_memory.h"
     14 #include "base/metrics/histogram.h"
     15 #include "base/rand_util.h"
     16 #include "base/strings/safe_sprintf.h"
     17 #include "base/strings/stringprintf.h"
     18 #include "base/synchronization/condition_variable.h"
     19 #include "base/synchronization/lock.h"
     20 #include "base/threading/simple_thread.h"
     21 #include "testing/gmock/include/gmock/gmock.h"
     22 
     23 namespace base {
     24 
     25 namespace {
     26 
     27 const uint32_t TEST_MEMORY_SIZE = 1 << 20;   // 1 MiB
     28 const uint32_t TEST_MEMORY_PAGE = 64 << 10;  // 64 KiB
     29 const uint32_t TEST_ID = 12345;
     30 const char TEST_NAME[] = "TestAllocator";
     31 
     32 void SetFileLength(const base::FilePath& path, size_t length) {
     33   {
     34     File file(path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE);
     35     DCHECK(file.IsValid());
     36     ASSERT_TRUE(file.SetLength(static_cast<int64_t>(length)));
     37   }
     38 
     39   int64_t actual_length;
     40   DCHECK(GetFileSize(path, &actual_length));
     41   DCHECK_EQ(length, static_cast<size_t>(actual_length));
     42 }
     43 
     44 }  // namespace
     45 
     46 typedef PersistentMemoryAllocator::Reference Reference;
     47 
     48 class PersistentMemoryAllocatorTest : public testing::Test {
     49  public:
     50   // This can't be statically initialized because it's value isn't defined
     51   // in the PersistentMemoryAllocator header file. Instead, it's simply set
     52   // in the constructor.
     53   uint32_t kAllocAlignment;
     54 
     55   struct TestObject1 {
     56     static constexpr uint32_t kPersistentTypeId = 1;
     57     static constexpr size_t kExpectedInstanceSize = 4 + 1 + 3;
     58     int32_t onething;
     59     char oranother;
     60   };
     61 
     62   struct TestObject2 {
     63     static constexpr uint32_t kPersistentTypeId = 2;
     64     static constexpr size_t kExpectedInstanceSize = 8 + 4 + 4 + 8 + 8;
     65     int64_t thiis;
     66     int32_t that;
     67     float andthe;
     68     double other;
     69     char thing[8];
     70   };
     71 
     72   PersistentMemoryAllocatorTest() {
     73     kAllocAlignment = GetAllocAlignment();
     74     mem_segment_.reset(new char[TEST_MEMORY_SIZE]);
     75   }
     76 
     77   void SetUp() override {
     78     allocator_.reset();
     79     ::memset(mem_segment_.get(), 0, TEST_MEMORY_SIZE);
     80     allocator_.reset(new PersistentMemoryAllocator(
     81         mem_segment_.get(), TEST_MEMORY_SIZE, TEST_MEMORY_PAGE,
     82         TEST_ID, TEST_NAME, false));
     83   }
     84 
     85   void TearDown() override {
     86     allocator_.reset();
     87   }
     88 
     89   unsigned CountIterables() {
     90     PersistentMemoryAllocator::Iterator iter(allocator_.get());
     91     uint32_t type;
     92     unsigned count = 0;
     93     while (iter.GetNext(&type) != 0) {
     94       ++count;
     95     }
     96     return count;
     97   }
     98 
     99   static uint32_t GetAllocAlignment() {
    100     return PersistentMemoryAllocator::kAllocAlignment;
    101   }
    102 
    103  protected:
    104   std::unique_ptr<char[]> mem_segment_;
    105   std::unique_ptr<PersistentMemoryAllocator> allocator_;
    106 };
    107 
    108 TEST_F(PersistentMemoryAllocatorTest, AllocateAndIterate) {
    109   allocator_->CreateTrackingHistograms(allocator_->Name());
    110 
    111   std::string base_name(TEST_NAME);
    112   EXPECT_EQ(TEST_ID, allocator_->Id());
    113   EXPECT_TRUE(allocator_->used_histogram_);
    114   EXPECT_EQ("UMA.PersistentAllocator." + base_name + ".UsedPct",
    115             allocator_->used_histogram_->histogram_name());
    116   EXPECT_EQ(PersistentMemoryAllocator::MEMORY_INITIALIZED,
    117             allocator_->GetMemoryState());
    118 
    119   // Get base memory info for later comparison.
    120   PersistentMemoryAllocator::MemoryInfo meminfo0;
    121   allocator_->GetMemoryInfo(&meminfo0);
    122   EXPECT_EQ(TEST_MEMORY_SIZE, meminfo0.total);
    123   EXPECT_GT(meminfo0.total, meminfo0.free);
    124 
    125   // Validate allocation of test object and make sure it can be referenced
    126   // and all metadata looks correct.
    127   TestObject1* obj1 = allocator_->New<TestObject1>();
    128   ASSERT_TRUE(obj1);
    129   Reference block1 = allocator_->GetAsReference(obj1);
    130   ASSERT_NE(0U, block1);
    131   EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject1>(block1));
    132   EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject2>(block1));
    133   EXPECT_LE(sizeof(TestObject1), allocator_->GetAllocSize(block1));
    134   EXPECT_GT(sizeof(TestObject1) + kAllocAlignment,
    135             allocator_->GetAllocSize(block1));
    136   PersistentMemoryAllocator::MemoryInfo meminfo1;
    137   allocator_->GetMemoryInfo(&meminfo1);
    138   EXPECT_EQ(meminfo0.total, meminfo1.total);
    139   EXPECT_GT(meminfo0.free, meminfo1.free);
    140 
    141   // Verify that pointers can be turned back into references and that invalid
    142   // addresses return null.
    143   char* memory1 = allocator_->GetAsArray<char>(block1, 1, 1);
    144   ASSERT_TRUE(memory1);
    145   EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 0));
    146   EXPECT_EQ(block1, allocator_->GetAsReference(memory1, 1));
    147   EXPECT_EQ(0U, allocator_->GetAsReference(memory1, 2));
    148   EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 1, 0));
    149   EXPECT_EQ(0U, allocator_->GetAsReference(memory1 + 16, 0));
    150   EXPECT_EQ(0U, allocator_->GetAsReference(nullptr, 0));
    151   EXPECT_EQ(0U, allocator_->GetAsReference(&base_name, 0));
    152 
    153   // Ensure that the test-object can be made iterable.
    154   PersistentMemoryAllocator::Iterator iter1a(allocator_.get());
    155   EXPECT_EQ(0U, iter1a.GetLast());
    156   uint32_t type;
    157   EXPECT_EQ(0U, iter1a.GetNext(&type));
    158   allocator_->MakeIterable(block1);
    159   EXPECT_EQ(block1, iter1a.GetNext(&type));
    160   EXPECT_EQ(1U, type);
    161   EXPECT_EQ(block1, iter1a.GetLast());
    162   EXPECT_EQ(0U, iter1a.GetNext(&type));
    163   EXPECT_EQ(block1, iter1a.GetLast());
    164 
    165   // Create second test-object and ensure everything is good and it cannot
    166   // be confused with test-object of another type.
    167   TestObject2* obj2 = allocator_->New<TestObject2>();
    168   ASSERT_TRUE(obj2);
    169   Reference block2 = allocator_->GetAsReference(obj2);
    170   ASSERT_NE(0U, block2);
    171   EXPECT_NE(nullptr, allocator_->GetAsObject<TestObject2>(block2));
    172   EXPECT_EQ(nullptr, allocator_->GetAsObject<TestObject1>(block2));
    173   EXPECT_LE(sizeof(TestObject2), allocator_->GetAllocSize(block2));
    174   EXPECT_GT(sizeof(TestObject2) + kAllocAlignment,
    175             allocator_->GetAllocSize(block2));
    176   PersistentMemoryAllocator::MemoryInfo meminfo2;
    177   allocator_->GetMemoryInfo(&meminfo2);
    178   EXPECT_EQ(meminfo1.total, meminfo2.total);
    179   EXPECT_GT(meminfo1.free, meminfo2.free);
    180 
    181   // Ensure that second test-object can also be made iterable.
    182   allocator_->MakeIterable(obj2);
    183   EXPECT_EQ(block2, iter1a.GetNext(&type));
    184   EXPECT_EQ(2U, type);
    185   EXPECT_EQ(block2, iter1a.GetLast());
    186   EXPECT_EQ(0U, iter1a.GetNext(&type));
    187   EXPECT_EQ(block2, iter1a.GetLast());
    188 
    189   // Check that the iterator can be reset to the beginning.
    190   iter1a.Reset();
    191   EXPECT_EQ(0U, iter1a.GetLast());
    192   EXPECT_EQ(block1, iter1a.GetNext(&type));
    193   EXPECT_EQ(block1, iter1a.GetLast());
    194   EXPECT_EQ(block2, iter1a.GetNext(&type));
    195   EXPECT_EQ(block2, iter1a.GetLast());
    196   EXPECT_EQ(0U, iter1a.GetNext(&type));
    197 
    198   // Check that the iterator can be reset to an arbitrary location.
    199   iter1a.Reset(block1);
    200   EXPECT_EQ(block1, iter1a.GetLast());
    201   EXPECT_EQ(block2, iter1a.GetNext(&type));
    202   EXPECT_EQ(block2, iter1a.GetLast());
    203   EXPECT_EQ(0U, iter1a.GetNext(&type));
    204 
    205   // Check that iteration can begin after an arbitrary location.
    206   PersistentMemoryAllocator::Iterator iter1b(allocator_.get(), block1);
    207   EXPECT_EQ(block2, iter1b.GetNext(&type));
    208   EXPECT_EQ(0U, iter1b.GetNext(&type));
    209 
    210   // Ensure nothing has gone noticably wrong.
    211   EXPECT_FALSE(allocator_->IsFull());
    212   EXPECT_FALSE(allocator_->IsCorrupt());
    213 
    214   // Check the internal histogram record of used memory.
    215   allocator_->UpdateTrackingHistograms();
    216   std::unique_ptr<HistogramSamples> used_samples(
    217       allocator_->used_histogram_->SnapshotSamples());
    218   EXPECT_TRUE(used_samples);
    219   EXPECT_EQ(1, used_samples->TotalCount());
    220 
    221   // Check that an object's type can be changed.
    222   EXPECT_EQ(2U, allocator_->GetType(block2));
    223   allocator_->ChangeType(block2, 3, 2, false);
    224   EXPECT_EQ(3U, allocator_->GetType(block2));
    225   allocator_->New<TestObject2>(block2, 3, false);
    226   EXPECT_EQ(2U, allocator_->GetType(block2));
    227 
    228   // Create second allocator (read/write) using the same memory segment.
    229   std::unique_ptr<PersistentMemoryAllocator> allocator2(
    230       new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
    231                                     TEST_MEMORY_PAGE, 0, "", false));
    232   EXPECT_EQ(TEST_ID, allocator2->Id());
    233   EXPECT_FALSE(allocator2->used_histogram_);
    234 
    235   // Ensure that iteration and access through second allocator works.
    236   PersistentMemoryAllocator::Iterator iter2(allocator2.get());
    237   EXPECT_EQ(block1, iter2.GetNext(&type));
    238   EXPECT_EQ(block2, iter2.GetNext(&type));
    239   EXPECT_EQ(0U, iter2.GetNext(&type));
    240   EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject1>(block1));
    241   EXPECT_NE(nullptr, allocator2->GetAsObject<TestObject2>(block2));
    242 
    243   // Create a third allocator (read-only) using the same memory segment.
    244   std::unique_ptr<const PersistentMemoryAllocator> allocator3(
    245       new PersistentMemoryAllocator(mem_segment_.get(), TEST_MEMORY_SIZE,
    246                                     TEST_MEMORY_PAGE, 0, "", true));
    247   EXPECT_EQ(TEST_ID, allocator3->Id());
    248   EXPECT_FALSE(allocator3->used_histogram_);
    249 
    250   // Ensure that iteration and access through third allocator works.
    251   PersistentMemoryAllocator::Iterator iter3(allocator3.get());
    252   EXPECT_EQ(block1, iter3.GetNext(&type));
    253   EXPECT_EQ(block2, iter3.GetNext(&type));
    254   EXPECT_EQ(0U, iter3.GetNext(&type));
    255   EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject1>(block1));
    256   EXPECT_NE(nullptr, allocator3->GetAsObject<TestObject2>(block2));
    257 
    258   // Ensure that GetNextOfType works.
    259   PersistentMemoryAllocator::Iterator iter1c(allocator_.get());
    260   EXPECT_EQ(block2, iter1c.GetNextOfType<TestObject2>());
    261   EXPECT_EQ(0U, iter1c.GetNextOfType(2));
    262 
    263   // Ensure that GetNextOfObject works.
    264   PersistentMemoryAllocator::Iterator iter1d(allocator_.get());
    265   EXPECT_EQ(obj2, iter1d.GetNextOfObject<TestObject2>());
    266   EXPECT_EQ(nullptr, iter1d.GetNextOfObject<TestObject2>());
    267 
    268   // Ensure that deleting an object works.
    269   allocator_->Delete(obj2);
    270   PersistentMemoryAllocator::Iterator iter1z(allocator_.get());
    271   EXPECT_EQ(nullptr, iter1z.GetNextOfObject<TestObject2>());
    272 
    273   // Ensure that the memory state can be set.
    274   allocator_->SetMemoryState(PersistentMemoryAllocator::MEMORY_DELETED);
    275   EXPECT_EQ(PersistentMemoryAllocator::MEMORY_DELETED,
    276             allocator_->GetMemoryState());
    277 }
    278 
    279 TEST_F(PersistentMemoryAllocatorTest, PageTest) {
    280   // This allocation will go into the first memory page.
    281   Reference block1 = allocator_->Allocate(TEST_MEMORY_PAGE / 2, 1);
    282   EXPECT_LT(0U, block1);
    283   EXPECT_GT(TEST_MEMORY_PAGE, block1);
    284 
    285   // This allocation won't fit in same page as previous block.
    286   Reference block2 =
    287       allocator_->Allocate(TEST_MEMORY_PAGE - 2 * kAllocAlignment, 2);
    288   EXPECT_EQ(TEST_MEMORY_PAGE, block2);
    289 
    290   // This allocation will also require a new page.
    291   Reference block3 = allocator_->Allocate(2 * kAllocAlignment + 99, 3);
    292   EXPECT_EQ(2U * TEST_MEMORY_PAGE, block3);
    293 }
    294 
    295 // A simple thread that takes an allocator and repeatedly allocates random-
    296 // sized chunks from it until no more can be done.
    297 class AllocatorThread : public SimpleThread {
    298  public:
    299   AllocatorThread(const std::string& name,
    300                   void* base,
    301                   uint32_t size,
    302                   uint32_t page_size)
    303       : SimpleThread(name, Options()),
    304         count_(0),
    305         iterable_(0),
    306         allocator_(base, size, page_size, 0, std::string(), false) {}
    307 
    308   void Run() override {
    309     for (;;) {
    310       uint32_t size = RandInt(1, 99);
    311       uint32_t type = RandInt(100, 999);
    312       Reference block = allocator_.Allocate(size, type);
    313       if (!block)
    314         break;
    315 
    316       count_++;
    317       if (RandInt(0, 1)) {
    318         allocator_.MakeIterable(block);
    319         iterable_++;
    320       }
    321     }
    322   }
    323 
    324   unsigned iterable() { return iterable_; }
    325   unsigned count() { return count_; }
    326 
    327  private:
    328   unsigned count_;
    329   unsigned iterable_;
    330   PersistentMemoryAllocator allocator_;
    331 };
    332 
    333 // Test parallel allocation/iteration and ensure consistency across all
    334 // instances.
    335 TEST_F(PersistentMemoryAllocatorTest, ParallelismTest) {
    336   void* memory = mem_segment_.get();
    337   AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
    338   AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
    339   AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
    340   AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
    341   AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
    342 
    343   t1.Start();
    344   t2.Start();
    345   t3.Start();
    346   t4.Start();
    347   t5.Start();
    348 
    349   unsigned last_count = 0;
    350   do {
    351     unsigned count = CountIterables();
    352     EXPECT_LE(last_count, count);
    353   } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
    354 
    355   t1.Join();
    356   t2.Join();
    357   t3.Join();
    358   t4.Join();
    359   t5.Join();
    360 
    361   EXPECT_FALSE(allocator_->IsCorrupt());
    362   EXPECT_TRUE(allocator_->IsFull());
    363   EXPECT_EQ(CountIterables(),
    364             t1.iterable() + t2.iterable() + t3.iterable() + t4.iterable() +
    365             t5.iterable());
    366 }
    367 
    368 // A simple thread that counts objects by iterating through an allocator.
    369 class CounterThread : public SimpleThread {
    370  public:
    371   CounterThread(const std::string& name,
    372                 PersistentMemoryAllocator::Iterator* iterator,
    373                 Lock* lock,
    374                 ConditionVariable* condition,
    375                 bool* wake_up)
    376       : SimpleThread(name, Options()),
    377         iterator_(iterator),
    378         lock_(lock),
    379         condition_(condition),
    380         count_(0),
    381         wake_up_(wake_up) {}
    382 
    383   void Run() override {
    384     // Wait so all threads can start at approximately the same time.
    385     // Best performance comes from releasing a single worker which then
    386     // releases the next, etc., etc.
    387     {
    388       AutoLock autolock(*lock_);
    389 
    390       // Before calling Wait(), make sure that the wake up condition
    391       // has not already passed.  Also, since spurious signal events
    392       // are possible, check the condition in a while loop to make
    393       // sure that the wake up condition is met when this thread
    394       // returns from the Wait().
    395       // See usage comments in src/base/synchronization/condition_variable.h.
    396       while (!*wake_up_) {
    397         condition_->Wait();
    398         condition_->Signal();
    399       }
    400     }
    401 
    402     uint32_t type;
    403     while (iterator_->GetNext(&type) != 0) {
    404       ++count_;
    405     }
    406   }
    407 
    408   unsigned count() { return count_; }
    409 
    410  private:
    411   PersistentMemoryAllocator::Iterator* iterator_;
    412   Lock* lock_;
    413   ConditionVariable* condition_;
    414   unsigned count_;
    415   bool* wake_up_;
    416 
    417   DISALLOW_COPY_AND_ASSIGN(CounterThread);
    418 };
    419 
    420 // Ensure that parallel iteration returns the same number of objects as
    421 // single-threaded iteration.
    422 TEST_F(PersistentMemoryAllocatorTest, IteratorParallelismTest) {
    423   // Fill the memory segment with random allocations.
    424   unsigned iterable_count = 0;
    425   for (;;) {
    426     uint32_t size = RandInt(1, 99);
    427     uint32_t type = RandInt(100, 999);
    428     Reference block = allocator_->Allocate(size, type);
    429     if (!block)
    430       break;
    431     allocator_->MakeIterable(block);
    432     ++iterable_count;
    433   }
    434   EXPECT_FALSE(allocator_->IsCorrupt());
    435   EXPECT_TRUE(allocator_->IsFull());
    436   EXPECT_EQ(iterable_count, CountIterables());
    437 
    438   PersistentMemoryAllocator::Iterator iter(allocator_.get());
    439   Lock lock;
    440   ConditionVariable condition(&lock);
    441   bool wake_up = false;
    442 
    443   CounterThread t1("t1", &iter, &lock, &condition, &wake_up);
    444   CounterThread t2("t2", &iter, &lock, &condition, &wake_up);
    445   CounterThread t3("t3", &iter, &lock, &condition, &wake_up);
    446   CounterThread t4("t4", &iter, &lock, &condition, &wake_up);
    447   CounterThread t5("t5", &iter, &lock, &condition, &wake_up);
    448 
    449   t1.Start();
    450   t2.Start();
    451   t3.Start();
    452   t4.Start();
    453   t5.Start();
    454 
    455   // Take the lock and set the wake up condition to true.  This helps to
    456   // avoid a race condition where the Signal() event is called before
    457   // all the threads have reached the Wait() and thus never get woken up.
    458   {
    459     AutoLock autolock(lock);
    460     wake_up = true;
    461   }
    462 
    463   // This will release all the waiting threads.
    464   condition.Signal();
    465 
    466   t1.Join();
    467   t2.Join();
    468   t3.Join();
    469   t4.Join();
    470   t5.Join();
    471 
    472   EXPECT_EQ(iterable_count,
    473             t1.count() + t2.count() + t3.count() + t4.count() + t5.count());
    474 
    475 #if 0
    476   // These ensure that the threads don't run sequentially. It shouldn't be
    477   // enabled in general because it could lead to a flaky test if it happens
    478   // simply by chance but it is useful during development to ensure that the
    479   // test is working correctly.
    480   EXPECT_NE(iterable_count, t1.count());
    481   EXPECT_NE(iterable_count, t2.count());
    482   EXPECT_NE(iterable_count, t3.count());
    483   EXPECT_NE(iterable_count, t4.count());
    484   EXPECT_NE(iterable_count, t5.count());
    485 #endif
    486 }
    487 
    488 TEST_F(PersistentMemoryAllocatorTest, DelayedAllocationTest) {
    489   std::atomic<Reference> ref1, ref2;
    490   ref1.store(0, std::memory_order_relaxed);
    491   ref2.store(0, std::memory_order_relaxed);
    492   DelayedPersistentAllocation da1(allocator_.get(), &ref1, 1001, 100, true);
    493   DelayedPersistentAllocation da2a(allocator_.get(), &ref2, 2002, 200, 0, true);
    494   DelayedPersistentAllocation da2b(allocator_.get(), &ref2, 2002, 200, 5, true);
    495 
    496   // Nothing should yet have been allocated.
    497   uint32_t type;
    498   PersistentMemoryAllocator::Iterator iter(allocator_.get());
    499   EXPECT_EQ(0U, iter.GetNext(&type));
    500 
    501   // Do first delayed allocation and check that a new persistent object exists.
    502   EXPECT_EQ(0U, da1.reference());
    503   void* mem1 = da1.Get();
    504   ASSERT_TRUE(mem1);
    505   EXPECT_NE(0U, da1.reference());
    506   EXPECT_EQ(allocator_->GetAsReference(mem1, 1001),
    507             ref1.load(std::memory_order_relaxed));
    508   EXPECT_NE(0U, iter.GetNext(&type));
    509   EXPECT_EQ(1001U, type);
    510   EXPECT_EQ(0U, iter.GetNext(&type));
    511 
    512   // Do second delayed allocation and check.
    513   void* mem2a = da2a.Get();
    514   ASSERT_TRUE(mem2a);
    515   EXPECT_EQ(allocator_->GetAsReference(mem2a, 2002),
    516             ref2.load(std::memory_order_relaxed));
    517   EXPECT_NE(0U, iter.GetNext(&type));
    518   EXPECT_EQ(2002U, type);
    519   EXPECT_EQ(0U, iter.GetNext(&type));
    520 
    521   // Third allocation should just return offset into second allocation.
    522   void* mem2b = da2b.Get();
    523   ASSERT_TRUE(mem2b);
    524   EXPECT_EQ(0U, iter.GetNext(&type));
    525   EXPECT_EQ(reinterpret_cast<uintptr_t>(mem2a) + 5,
    526             reinterpret_cast<uintptr_t>(mem2b));
    527 }
    528 
    529 // This test doesn't verify anything other than it doesn't crash. Its goal
    530 // is to find coding errors that aren't otherwise tested for, much like a
    531 // "fuzzer" would.
    532 // This test is suppsoed to fail on TSAN bot (crbug.com/579867).
    533 #if defined(THREAD_SANITIZER)
    534 #define MAYBE_CorruptionTest DISABLED_CorruptionTest
    535 #else
    536 #define MAYBE_CorruptionTest CorruptionTest
    537 #endif
    538 TEST_F(PersistentMemoryAllocatorTest, MAYBE_CorruptionTest) {
    539   char* memory = mem_segment_.get();
    540   AllocatorThread t1("t1", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
    541   AllocatorThread t2("t2", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
    542   AllocatorThread t3("t3", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
    543   AllocatorThread t4("t4", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
    544   AllocatorThread t5("t5", memory, TEST_MEMORY_SIZE, TEST_MEMORY_PAGE);
    545 
    546   t1.Start();
    547   t2.Start();
    548   t3.Start();
    549   t4.Start();
    550   t5.Start();
    551 
    552   do {
    553     size_t offset = RandInt(0, TEST_MEMORY_SIZE - 1);
    554     char value = RandInt(0, 255);
    555     memory[offset] = value;
    556   } while (!allocator_->IsCorrupt() && !allocator_->IsFull());
    557 
    558   t1.Join();
    559   t2.Join();
    560   t3.Join();
    561   t4.Join();
    562   t5.Join();
    563 
    564   CountIterables();
    565 }
    566 
    567 // Attempt to cause crashes or loops by expressly creating dangerous conditions.
    568 TEST_F(PersistentMemoryAllocatorTest, MaliciousTest) {
    569   Reference block1 = allocator_->Allocate(sizeof(TestObject1), 1);
    570   Reference block2 = allocator_->Allocate(sizeof(TestObject1), 2);
    571   Reference block3 = allocator_->Allocate(sizeof(TestObject1), 3);
    572   Reference block4 = allocator_->Allocate(sizeof(TestObject1), 3);
    573   Reference block5 = allocator_->Allocate(sizeof(TestObject1), 3);
    574   allocator_->MakeIterable(block1);
    575   allocator_->MakeIterable(block2);
    576   allocator_->MakeIterable(block3);
    577   allocator_->MakeIterable(block4);
    578   allocator_->MakeIterable(block5);
    579   EXPECT_EQ(5U, CountIterables());
    580   EXPECT_FALSE(allocator_->IsCorrupt());
    581 
    582   // Create loop in iterable list and ensure it doesn't hang. The return value
    583   // from CountIterables() in these cases is unpredictable. If there is a
    584   // failure, the call will hang and the test killed for taking too long.
    585   uint32_t* header4 = (uint32_t*)(mem_segment_.get() + block4);
    586   EXPECT_EQ(block5, header4[3]);
    587   header4[3] = block4;
    588   CountIterables();  // loop: 1-2-3-4-4
    589   EXPECT_TRUE(allocator_->IsCorrupt());
    590 
    591   // Test where loop goes back to previous block.
    592   header4[3] = block3;
    593   CountIterables();  // loop: 1-2-3-4-3
    594 
    595   // Test where loop goes back to the beginning.
    596   header4[3] = block1;
    597   CountIterables();  // loop: 1-2-3-4-1
    598 }
    599 
    600 
    601 //----- LocalPersistentMemoryAllocator -----------------------------------------
    602 
    603 TEST(LocalPersistentMemoryAllocatorTest, CreationTest) {
    604   LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, 42, "");
    605   EXPECT_EQ(42U, allocator.Id());
    606   EXPECT_NE(0U, allocator.Allocate(24, 1));
    607   EXPECT_FALSE(allocator.IsFull());
    608   EXPECT_FALSE(allocator.IsCorrupt());
    609 }
    610 
    611 
    612 //----- SharedPersistentMemoryAllocator ----------------------------------------
    613 
    614 TEST(SharedPersistentMemoryAllocatorTest, CreationTest) {
    615   SharedMemoryHandle shared_handle_1;
    616   SharedMemoryHandle shared_handle_2;
    617 
    618   PersistentMemoryAllocator::MemoryInfo meminfo1;
    619   Reference r123, r456, r789;
    620   {
    621     std::unique_ptr<SharedMemory> shmem1(new SharedMemory());
    622     ASSERT_TRUE(shmem1->CreateAndMapAnonymous(TEST_MEMORY_SIZE));
    623     SharedPersistentMemoryAllocator local(std::move(shmem1), TEST_ID, "",
    624                                           false);
    625     EXPECT_FALSE(local.IsReadonly());
    626     r123 = local.Allocate(123, 123);
    627     r456 = local.Allocate(456, 456);
    628     r789 = local.Allocate(789, 789);
    629     local.MakeIterable(r123);
    630     local.ChangeType(r456, 654, 456, false);
    631     local.MakeIterable(r789);
    632     local.GetMemoryInfo(&meminfo1);
    633     EXPECT_FALSE(local.IsFull());
    634     EXPECT_FALSE(local.IsCorrupt());
    635 
    636     shared_handle_1 = local.shared_memory()->handle().Duplicate();
    637     ASSERT_TRUE(shared_handle_1.IsValid());
    638     shared_handle_2 = local.shared_memory()->handle().Duplicate();
    639     ASSERT_TRUE(shared_handle_2.IsValid());
    640   }
    641 
    642   // Read-only test.
    643   std::unique_ptr<SharedMemory> shmem2(new SharedMemory(shared_handle_1,
    644                                                         /*readonly=*/true));
    645   ASSERT_TRUE(shmem2->Map(TEST_MEMORY_SIZE));
    646 
    647   SharedPersistentMemoryAllocator shalloc2(std::move(shmem2), 0, "", true);
    648   EXPECT_TRUE(shalloc2.IsReadonly());
    649   EXPECT_EQ(TEST_ID, shalloc2.Id());
    650   EXPECT_FALSE(shalloc2.IsFull());
    651   EXPECT_FALSE(shalloc2.IsCorrupt());
    652 
    653   PersistentMemoryAllocator::Iterator iter2(&shalloc2);
    654   uint32_t type;
    655   EXPECT_EQ(r123, iter2.GetNext(&type));
    656   EXPECT_EQ(r789, iter2.GetNext(&type));
    657   EXPECT_EQ(0U, iter2.GetNext(&type));
    658 
    659   EXPECT_EQ(123U, shalloc2.GetType(r123));
    660   EXPECT_EQ(654U, shalloc2.GetType(r456));
    661   EXPECT_EQ(789U, shalloc2.GetType(r789));
    662 
    663   PersistentMemoryAllocator::MemoryInfo meminfo2;
    664   shalloc2.GetMemoryInfo(&meminfo2);
    665   EXPECT_EQ(meminfo1.total, meminfo2.total);
    666   EXPECT_EQ(meminfo1.free, meminfo2.free);
    667 
    668   // Read/write test.
    669   std::unique_ptr<SharedMemory> shmem3(new SharedMemory(shared_handle_2,
    670                                                         /*readonly=*/false));
    671   ASSERT_TRUE(shmem3->Map(TEST_MEMORY_SIZE));
    672 
    673   SharedPersistentMemoryAllocator shalloc3(std::move(shmem3), 0, "", false);
    674   EXPECT_FALSE(shalloc3.IsReadonly());
    675   EXPECT_EQ(TEST_ID, shalloc3.Id());
    676   EXPECT_FALSE(shalloc3.IsFull());
    677   EXPECT_FALSE(shalloc3.IsCorrupt());
    678 
    679   PersistentMemoryAllocator::Iterator iter3(&shalloc3);
    680   EXPECT_EQ(r123, iter3.GetNext(&type));
    681   EXPECT_EQ(r789, iter3.GetNext(&type));
    682   EXPECT_EQ(0U, iter3.GetNext(&type));
    683 
    684   EXPECT_EQ(123U, shalloc3.GetType(r123));
    685   EXPECT_EQ(654U, shalloc3.GetType(r456));
    686   EXPECT_EQ(789U, shalloc3.GetType(r789));
    687 
    688   PersistentMemoryAllocator::MemoryInfo meminfo3;
    689   shalloc3.GetMemoryInfo(&meminfo3);
    690   EXPECT_EQ(meminfo1.total, meminfo3.total);
    691   EXPECT_EQ(meminfo1.free, meminfo3.free);
    692 
    693   // Interconnectivity test.
    694   Reference obj = shalloc3.Allocate(42, 42);
    695   ASSERT_TRUE(obj);
    696   shalloc3.MakeIterable(obj);
    697   EXPECT_EQ(obj, iter2.GetNext(&type));
    698   EXPECT_EQ(42U, type);
    699 
    700   // Clear-on-change test.
    701   Reference data_ref = shalloc3.Allocate(sizeof(int) * 4, 911);
    702   int* data = shalloc3.GetAsArray<int>(data_ref, 911, 4);
    703   ASSERT_TRUE(data);
    704   data[0] = 0;
    705   data[1] = 1;
    706   data[2] = 2;
    707   data[3] = 3;
    708   ASSERT_TRUE(shalloc3.ChangeType(data_ref, 119, 911, false));
    709   EXPECT_EQ(0, data[0]);
    710   EXPECT_EQ(1, data[1]);
    711   EXPECT_EQ(2, data[2]);
    712   EXPECT_EQ(3, data[3]);
    713   ASSERT_TRUE(shalloc3.ChangeType(data_ref, 191, 119, true));
    714   EXPECT_EQ(0, data[0]);
    715   EXPECT_EQ(0, data[1]);
    716   EXPECT_EQ(0, data[2]);
    717   EXPECT_EQ(0, data[3]);
    718 }
    719 
    720 
    721 #if !defined(OS_NACL)
    722 //----- FilePersistentMemoryAllocator ------------------------------------------
    723 
    724 TEST(FilePersistentMemoryAllocatorTest, CreationTest) {
    725   ScopedTempDir temp_dir;
    726   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
    727   FilePath file_path = temp_dir.GetPath().AppendASCII("persistent_memory");
    728 
    729   PersistentMemoryAllocator::MemoryInfo meminfo1;
    730   Reference r123, r456, r789;
    731   {
    732     LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
    733     EXPECT_FALSE(local.IsReadonly());
    734     r123 = local.Allocate(123, 123);
    735     r456 = local.Allocate(456, 456);
    736     r789 = local.Allocate(789, 789);
    737     local.MakeIterable(r123);
    738     local.ChangeType(r456, 654, 456, false);
    739     local.MakeIterable(r789);
    740     local.GetMemoryInfo(&meminfo1);
    741     EXPECT_FALSE(local.IsFull());
    742     EXPECT_FALSE(local.IsCorrupt());
    743 
    744     File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
    745     ASSERT_TRUE(writer.IsValid());
    746     writer.Write(0, (const char*)local.data(), local.used());
    747   }
    748 
    749   std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
    750   mmfile->Initialize(file_path);
    751   EXPECT_TRUE(mmfile->IsValid());
    752   const size_t mmlength = mmfile->length();
    753   EXPECT_GE(meminfo1.total, mmlength);
    754 
    755   FilePersistentMemoryAllocator file(std::move(mmfile), 0, 0, "", false);
    756   EXPECT_FALSE(file.IsReadonly());
    757   EXPECT_EQ(TEST_ID, file.Id());
    758   EXPECT_FALSE(file.IsFull());
    759   EXPECT_FALSE(file.IsCorrupt());
    760 
    761   PersistentMemoryAllocator::Iterator iter(&file);
    762   uint32_t type;
    763   EXPECT_EQ(r123, iter.GetNext(&type));
    764   EXPECT_EQ(r789, iter.GetNext(&type));
    765   EXPECT_EQ(0U, iter.GetNext(&type));
    766 
    767   EXPECT_EQ(123U, file.GetType(r123));
    768   EXPECT_EQ(654U, file.GetType(r456));
    769   EXPECT_EQ(789U, file.GetType(r789));
    770 
    771   PersistentMemoryAllocator::MemoryInfo meminfo2;
    772   file.GetMemoryInfo(&meminfo2);
    773   EXPECT_GE(meminfo1.total, meminfo2.total);
    774   EXPECT_GE(meminfo1.free, meminfo2.free);
    775   EXPECT_EQ(mmlength, meminfo2.total);
    776   EXPECT_EQ(0U, meminfo2.free);
    777 
    778   // There's no way of knowing if Flush actually does anything but at least
    779   // verify that it runs without CHECK violations.
    780   file.Flush(false);
    781   file.Flush(true);
    782 }
    783 
    784 TEST(FilePersistentMemoryAllocatorTest, ExtendTest) {
    785   ScopedTempDir temp_dir;
    786   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
    787   FilePath file_path = temp_dir.GetPath().AppendASCII("extend_test");
    788   MemoryMappedFile::Region region = {0, 16 << 10};  // 16KiB maximum size.
    789 
    790   // Start with a small but valid file of persistent data.
    791   ASSERT_FALSE(PathExists(file_path));
    792   {
    793     LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
    794     local.Allocate(1, 1);
    795     local.Allocate(11, 11);
    796 
    797     File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
    798     ASSERT_TRUE(writer.IsValid());
    799     writer.Write(0, (const char*)local.data(), local.used());
    800   }
    801   ASSERT_TRUE(PathExists(file_path));
    802   int64_t before_size;
    803   ASSERT_TRUE(GetFileSize(file_path, &before_size));
    804 
    805   // Map it as an extendable read/write file and append to it.
    806   {
    807     std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
    808     mmfile->Initialize(
    809         File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
    810         region, MemoryMappedFile::READ_WRITE_EXTEND);
    811     FilePersistentMemoryAllocator allocator(std::move(mmfile), region.size, 0,
    812                                             "", false);
    813     EXPECT_EQ(static_cast<size_t>(before_size), allocator.used());
    814 
    815     allocator.Allocate(111, 111);
    816     EXPECT_LT(static_cast<size_t>(before_size), allocator.used());
    817   }
    818 
    819   // Validate that append worked.
    820   int64_t after_size;
    821   ASSERT_TRUE(GetFileSize(file_path, &after_size));
    822   EXPECT_LT(before_size, after_size);
    823 
    824   // Verify that it's still an acceptable file.
    825   {
    826     std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
    827     mmfile->Initialize(
    828         File(file_path, File::FLAG_OPEN | File::FLAG_READ | File::FLAG_WRITE),
    829         region, MemoryMappedFile::READ_WRITE_EXTEND);
    830     EXPECT_TRUE(FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, true));
    831     EXPECT_TRUE(
    832         FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, false));
    833   }
    834 }
    835 
    836 TEST(FilePersistentMemoryAllocatorTest, AcceptableTest) {
    837   const uint32_t kAllocAlignment =
    838       PersistentMemoryAllocatorTest::GetAllocAlignment();
    839   ScopedTempDir temp_dir;
    840   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
    841 
    842   LocalPersistentMemoryAllocator local(TEST_MEMORY_SIZE, TEST_ID, "");
    843   local.MakeIterable(local.Allocate(1, 1));
    844   local.MakeIterable(local.Allocate(11, 11));
    845   const size_t minsize = local.used();
    846   std::unique_ptr<char[]> garbage(new char[minsize]);
    847   RandBytes(garbage.get(), minsize);
    848 
    849   std::unique_ptr<MemoryMappedFile> mmfile;
    850   char filename[100];
    851   for (size_t filesize = minsize; filesize > 0; --filesize) {
    852     strings::SafeSPrintf(filename, "memory_%d_A", filesize);
    853     FilePath file_path = temp_dir.GetPath().AppendASCII(filename);
    854     ASSERT_FALSE(PathExists(file_path));
    855     {
    856       File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
    857       ASSERT_TRUE(writer.IsValid());
    858       writer.Write(0, (const char*)local.data(), filesize);
    859     }
    860     ASSERT_TRUE(PathExists(file_path));
    861 
    862     // Request read/write access for some sizes that are a multple of the
    863     // allocator's alignment size. The allocator is strict about file size
    864     // being a multiple of its internal alignment when doing read/write access.
    865     const bool read_only = (filesize % (2 * kAllocAlignment)) != 0;
    866     const uint32_t file_flags =
    867         File::FLAG_OPEN | File::FLAG_READ | (read_only ? 0 : File::FLAG_WRITE);
    868     const MemoryMappedFile::Access map_access =
    869         read_only ? MemoryMappedFile::READ_ONLY : MemoryMappedFile::READ_WRITE;
    870 
    871     mmfile.reset(new MemoryMappedFile());
    872     mmfile->Initialize(File(file_path, file_flags), map_access);
    873     EXPECT_EQ(filesize, mmfile->length());
    874     if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
    875       // Make sure construction doesn't crash. It will, however, cause
    876       // error messages warning about about a corrupted memory segment.
    877       FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
    878                                               read_only);
    879       // Also make sure that iteration doesn't crash.
    880       PersistentMemoryAllocator::Iterator iter(&allocator);
    881       uint32_t type_id;
    882       Reference ref;
    883       while ((ref = iter.GetNext(&type_id)) != 0) {
    884         const char* data = allocator.GetAsArray<char>(
    885             ref, 0, PersistentMemoryAllocator::kSizeAny);
    886         uint32_t type = allocator.GetType(ref);
    887         size_t size = allocator.GetAllocSize(ref);
    888         // Ensure compiler can't optimize-out above variables.
    889         (void)data;
    890         (void)type;
    891         (void)size;
    892       }
    893 
    894       // Ensure that short files are detected as corrupt and full files are not.
    895       EXPECT_EQ(filesize != minsize, allocator.IsCorrupt());
    896     } else {
    897       // For filesize >= minsize, the file must be acceptable. This
    898       // else clause (file-not-acceptable) should be reached only if
    899       // filesize < minsize.
    900       EXPECT_LT(filesize, minsize);
    901     }
    902 
    903     strings::SafeSPrintf(filename, "memory_%d_B", filesize);
    904     file_path = temp_dir.GetPath().AppendASCII(filename);
    905     ASSERT_FALSE(PathExists(file_path));
    906     {
    907       File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
    908       ASSERT_TRUE(writer.IsValid());
    909       writer.Write(0, (const char*)garbage.get(), filesize);
    910     }
    911     ASSERT_TRUE(PathExists(file_path));
    912 
    913     mmfile.reset(new MemoryMappedFile());
    914     mmfile->Initialize(File(file_path, file_flags), map_access);
    915     EXPECT_EQ(filesize, mmfile->length());
    916     if (FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only)) {
    917       // Make sure construction doesn't crash. It will, however, cause
    918       // error messages warning about about a corrupted memory segment.
    919       FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
    920                                               read_only);
    921       EXPECT_TRUE(allocator.IsCorrupt());  // Garbage data so it should be.
    922     } else {
    923       // For filesize >= minsize, the file must be acceptable. This
    924       // else clause (file-not-acceptable) should be reached only if
    925       // filesize < minsize.
    926       EXPECT_GT(minsize, filesize);
    927     }
    928   }
    929 }
    930 
    931 TEST_F(PersistentMemoryAllocatorTest, TruncateTest) {
    932   ScopedTempDir temp_dir;
    933   ASSERT_TRUE(temp_dir.CreateUniqueTempDir());
    934   FilePath file_path = temp_dir.GetPath().AppendASCII("truncate_test");
    935 
    936   // Start with a small but valid file of persistent data. Keep the "used"
    937   // amount for both allocations.
    938   Reference a1_ref;
    939   Reference a2_ref;
    940   size_t a1_used;
    941   size_t a2_used;
    942   ASSERT_FALSE(PathExists(file_path));
    943   {
    944     LocalPersistentMemoryAllocator allocator(TEST_MEMORY_SIZE, TEST_ID, "");
    945     a1_ref = allocator.Allocate(100 << 10, 1);
    946     allocator.MakeIterable(a1_ref);
    947     a1_used = allocator.used();
    948     a2_ref = allocator.Allocate(200 << 10, 11);
    949     allocator.MakeIterable(a2_ref);
    950     a2_used = allocator.used();
    951 
    952     File writer(file_path, File::FLAG_CREATE | File::FLAG_WRITE);
    953     ASSERT_TRUE(writer.IsValid());
    954     writer.Write(0, static_cast<const char*>(allocator.data()),
    955                  allocator.size());
    956   }
    957   ASSERT_TRUE(PathExists(file_path));
    958   EXPECT_LE(a1_used, a2_ref);
    959 
    960   // Truncate the file to include everything and make sure it can be read, both
    961   // with read-write and read-only access.
    962   for (size_t file_length : {a2_used, a1_used, a1_used / 2}) {
    963     SCOPED_TRACE(StringPrintf("file_length=%zu", file_length));
    964     SetFileLength(file_path, file_length);
    965 
    966     for (bool read_only : {false, true}) {
    967       SCOPED_TRACE(StringPrintf("read_only=%s", read_only ? "true" : "false"));
    968 
    969       std::unique_ptr<MemoryMappedFile> mmfile(new MemoryMappedFile());
    970       mmfile->Initialize(
    971           File(file_path, File::FLAG_OPEN |
    972                               (read_only ? File::FLAG_READ
    973                                          : File::FLAG_READ | File::FLAG_WRITE)),
    974           read_only ? MemoryMappedFile::READ_ONLY
    975                     : MemoryMappedFile::READ_WRITE);
    976       ASSERT_TRUE(
    977           FilePersistentMemoryAllocator::IsFileAcceptable(*mmfile, read_only));
    978 
    979       FilePersistentMemoryAllocator allocator(std::move(mmfile), 0, 0, "",
    980                                               read_only);
    981 
    982       PersistentMemoryAllocator::Iterator iter(&allocator);
    983       uint32_t type_id;
    984       EXPECT_EQ(file_length >= a1_used ? a1_ref : 0U, iter.GetNext(&type_id));
    985       EXPECT_EQ(file_length >= a2_used ? a2_ref : 0U, iter.GetNext(&type_id));
    986       EXPECT_EQ(0U, iter.GetNext(&type_id));
    987 
    988       // Ensure that short files are detected as corrupt and full files are not.
    989       EXPECT_EQ(file_length < a2_used, allocator.IsCorrupt());
    990     }
    991 
    992     // Ensure that file length was not adjusted.
    993     int64_t actual_length;
    994     ASSERT_TRUE(GetFileSize(file_path, &actual_length));
    995     EXPECT_EQ(file_length, static_cast<size_t>(actual_length));
    996   }
    997 }
    998 
    999 #endif  // !defined(OS_NACL)
   1000 
   1001 }  // namespace base
   1002