Home | History | Annotate | Download | only in metrics
      1 // Copyright (c) 2015 The Chromium Authors. All rights reserved.
      2 // Use of this source code is governed by a BSD-style license that can be
      3 // found in the LICENSE file.
      4 
      5 #ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
      6 #define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
      7 
      8 #include <stdint.h>
      9 
     10 #include <atomic>
     11 #include <memory>
     12 #include <type_traits>
     13 
     14 #include "base/atomicops.h"
     15 #include "base/base_export.h"
     16 #include "base/files/file_path.h"
     17 #include "base/gtest_prod_util.h"
     18 #include "base/macros.h"
     19 #include "base/strings/string_piece.h"
     20 
     21 namespace base {
     22 
     23 class HistogramBase;
     24 class MemoryMappedFile;
     25 class SharedMemory;
     26 
     27 // Simple allocator for pieces of a memory block that may be persistent
     28 // to some storage or shared across multiple processes. This class resides
     29 // under base/metrics because it was written for that purpose. It is,
     30 // however, fully general-purpose and can be freely moved to base/memory
     31 // if other uses are found.
     32 //
     33 // This class provides for thread-secure (i.e. safe against other threads
     34 // or processes that may be compromised and thus have malicious intent)
     35 // allocation of memory within a designated block and also a mechanism by
     36 // which other threads can learn of these allocations.
     37 //
     38 // There is (currently) no way to release an allocated block of data because
     39 // doing so would risk invalidating pointers held by other processes and
     40 // greatly complicate the allocation algorithm.
     41 //
     42 // Construction of this object can accept new, clean (i.e. zeroed) memory
     43 // or previously initialized memory. In the first case, construction must
     44 // be allowed to complete before letting other allocators attach to the same
     45 // segment. In other words, don't share the segment until at least one
     46 // allocator has been attached to it.
     47 //
     48 // Note that memory not in active use is not accessed so it is possible to
     49 // use virtual memory, including memory-mapped files, as backing storage with
     50 // the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
     51 //
     52 // OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
     53 // character arrays and manipulating that memory manually, the better way is
     54 // generally to use the "object" methods to create and manage allocations. In
     55 // this way the sizing, type-checking, and construction are all automatic. For
     56 // this to work, however, every type of stored object must define two public
     57 // "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
     58 //
     59 // struct MyPersistentObjectType {
     60 //     // SHA1(MyPersistentObjectType): Increment this if structure changes!
     61 //     static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
     62 //
     63 //     // Expected size for 32/64-bit check. Update this if structure changes!
     64 //     static constexpr size_t kExpectedInstanceSize = 20;
     65 //
     66 //     ...
     67 // };
     68 //
     69 // kPersistentTypeId: This value is an arbitrary identifier that allows the
     70 //   identification of these objects in the allocator, including the ability
     71 //   to find them via iteration. The number is arbitrary but using the first
     72 //   four bytes of the SHA1 hash of the type name means that there shouldn't
     73 //   be any conflicts with other types that may also be stored in the memory.
     74 //   The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
     75 //   be used to generate the hash if the type name seems common. Use a command
     76 //   like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
     77 //   If the structure layout changes, ALWAYS increment this number so that
     78 //   newer versions of the code don't try to interpret persistent data written
     79 //   by older versions with a different layout.
     80 //
     81 // kExpectedInstanceSize: This value is the hard-coded number that matches
     82 //   what sizeof(T) would return. By providing it explicitly, the allocator can
     83 //   verify that the structure is compatible between both 32-bit and 64-bit
     84 //   versions of the code.
     85 //
     86 // Using New manages the memory and then calls the default constructor for the
     87 // object. Given that objects are persistent, no destructor is ever called
     88 // automatically though a caller can explicitly call Delete to destruct it and
     89 // change the type to something indicating it is no longer in use.
     90 //
     91 // Though persistent memory segments are transferrable between programs built
     92 // for different natural word widths, they CANNOT be exchanged between CPUs
     93 // of different endianess. Attempts to do so will simply see the existing data
     94 // as corrupt and refuse to access any of it.
     95 class BASE_EXPORT PersistentMemoryAllocator {
     96  public:
     97   typedef uint32_t Reference;
     98 
     99   // These states are used to indicate the overall condition of the memory
    100   // segment irrespective of what is stored within it. Because the data is
    101   // often persistent and thus needs to be readable by different versions of
    102   // a program, these values are fixed and can never change.
    103   enum MemoryState : uint8_t {
    104     // Persistent memory starts all zeros and so shows "uninitialized".
    105     MEMORY_UNINITIALIZED = 0,
    106 
    107     // The header has been written and the memory is ready for use.
    108     MEMORY_INITIALIZED = 1,
    109 
    110     // The data should be considered deleted. This would be set when the
    111     // allocator is being cleaned up. If file-backed, the file is likely
    112     // to be deleted but since deletion can fail for a variety of reasons,
    113     // having this extra status means a future reader can realize what
    114     // should have happened.
    115     MEMORY_DELETED = 2,
    116 
    117     // Outside code can create states starting with this number; these too
    118     // must also never change between code versions.
    119     MEMORY_USER_DEFINED = 100,
    120   };
    121 
    122   // Iterator for going through all iterable memory records in an allocator.
    123   // Like the allocator itself, iterators are lock-free and thread-secure.
    124   // That means that multiple threads can share an iterator and the same
    125   // reference will not be returned twice.
    126   //
    127   // The order of the items returned by an iterator matches the order in which
    128   // MakeIterable() was called on them. Once an allocation is made iterable,
    129   // it is always such so the only possible difference between successive
    130   // iterations is for more to be added to the end.
    131   //
    132   // Iteration, in general, is tolerant of corrupted memory. It will return
    133   // what it can and stop only when corruption forces it to. Bad corruption
    134   // could cause the same object to be returned many times but it will
    135   // eventually quit.
    136   class BASE_EXPORT Iterator {
    137    public:
    138     // Constructs an iterator on a given |allocator|, starting at the beginning.
    139     // The allocator must live beyond the lifetime of the iterator. This class
    140     // has read-only access to the allocator (hence "const") but the returned
    141     // references can be used on a read/write version, too.
    142     explicit Iterator(const PersistentMemoryAllocator* allocator);
    143 
    144     // As above but resuming from the |starting_after| reference. The first call
    145     // to GetNext() will return the next object found after that reference. The
    146     // reference must be to an "iterable" object; references to non-iterable
    147     // objects (those that never had MakeIterable() called for them) will cause
    148     // a run-time error.
    149     Iterator(const PersistentMemoryAllocator* allocator,
    150              Reference starting_after);
    151 
    152     // Resets the iterator back to the beginning.
    153     void Reset();
    154 
    155     // Resets the iterator, resuming from the |starting_after| reference.
    156     void Reset(Reference starting_after);
    157 
    158     // Returns the previously retrieved reference, or kReferenceNull if none.
    159     // If constructor or reset with a starting_after location, this will return
    160     // that value.
    161     Reference GetLast();
    162 
    163     // Gets the next iterable, storing that type in |type_return|. The actual
    164     // return value is a reference to the allocation inside the allocator or
    165     // zero if there are no more. GetNext() may still be called again at a
    166     // later time to retrieve any new allocations that have been added.
    167     Reference GetNext(uint32_t* type_return);
    168 
    169     // Similar to above but gets the next iterable of a specific |type_match|.
    170     // This should not be mixed with calls to GetNext() because any allocations
    171     // skipped here due to a type mis-match will never be returned by later
    172     // calls to GetNext() meaning it's possible to completely miss entries.
    173     Reference GetNextOfType(uint32_t type_match);
    174 
    175     // As above but works using object type.
    176     template <typename T>
    177     Reference GetNextOfType() {
    178       return GetNextOfType(T::kPersistentTypeId);
    179     }
    180 
    181     // As above but works using objects and returns null if not found.
    182     template <typename T>
    183     const T* GetNextOfObject() {
    184       return GetAsObject<T>(GetNextOfType<T>());
    185     }
    186 
    187     // Converts references to objects. This is a convenience method so that
    188     // users of the iterator don't need to also have their own pointer to the
    189     // allocator over which the iterator runs in order to retrieve objects.
    190     // Because the iterator is not read/write, only "const" objects can be
    191     // fetched. Non-const objects can be fetched using the reference on a
    192     // non-const (external) pointer to the same allocator (or use const_cast
    193     // to remove the qualifier).
    194     template <typename T>
    195     const T* GetAsObject(Reference ref) const {
    196       return allocator_->GetAsObject<T>(ref);
    197     }
    198 
    199     // Similar to GetAsObject() but converts references to arrays of things.
    200     template <typename T>
    201     const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
    202       return allocator_->GetAsArray<T>(ref, type_id, count);
    203     }
    204 
    205     // Convert a generic pointer back into a reference. A null reference will
    206     // be returned if |memory| is not inside the persistent segment or does not
    207     // point to an object of the specified |type_id|.
    208     Reference GetAsReference(const void* memory, uint32_t type_id) const {
    209       return allocator_->GetAsReference(memory, type_id);
    210     }
    211 
    212     // As above but convert an object back into a reference.
    213     template <typename T>
    214     Reference GetAsReference(const T* obj) const {
    215       return allocator_->GetAsReference(obj);
    216     }
    217 
    218    private:
    219     // Weak-pointer to memory allocator being iterated over.
    220     const PersistentMemoryAllocator* allocator_;
    221 
    222     // The last record that was returned.
    223     std::atomic<Reference> last_record_;
    224 
    225     // The number of records found; used for detecting loops.
    226     std::atomic<uint32_t> record_count_;
    227 
    228     DISALLOW_COPY_AND_ASSIGN(Iterator);
    229   };
    230 
    231   // Returned information about the internal state of the heap.
    232   struct MemoryInfo {
    233     size_t total;
    234     size_t free;
    235   };
    236 
    237   enum : Reference {
    238     // A common "null" reference value.
    239     kReferenceNull = 0,
    240   };
    241 
    242   enum : uint32_t {
    243     // A value that will match any type when doing lookups.
    244     kTypeIdAny = 0x00000000,
    245 
    246     // A value indicating that the type is in transition. Work is being done
    247     // on the contents to prepare it for a new type to come.
    248     kTypeIdTransitioning = 0xFFFFFFFF,
    249   };
    250 
    251   enum : size_t {
    252     kSizeAny = 1  // Constant indicating that any array size is acceptable.
    253   };
    254 
    255   // This is the standard file extension (suitable for being passed to the
    256   // AddExtension() method of base::FilePath) for dumps of persistent memory.
    257   static const base::FilePath::CharType kFileExtension[];
    258 
    259   // The allocator operates on any arbitrary block of memory. Creation and
    260   // persisting or sharing of that block with another process is the
    261   // responsibility of the caller. The allocator needs to know only the
    262   // block's |base| address, the total |size| of the block, and any internal
    263   // |page| size (zero if not paged) across which allocations should not span.
    264   // The |id| is an arbitrary value the caller can use to identify a
    265   // particular memory segment. It will only be loaded during the initial
    266   // creation of the segment and can be checked by the caller for consistency.
    267   // The |name|, if provided, is used to distinguish histograms for this
    268   // allocator. Only the primary owner of the segment should define this value;
    269   // other processes can learn it from the shared state. If the underlying
    270   // memory is |readonly| then no changes will be made to it. The resulting
    271   // object should be stored as a "const" pointer.
    272   //
    273   // PersistentMemoryAllocator does NOT take ownership of the memory block.
    274   // The caller must manage it and ensure it stays available throughout the
    275   // lifetime of this object.
    276   //
    277   // Memory segments for sharing must have had an allocator attached to them
    278   // before actually being shared. If the memory segment was just created, it
    279   // should be zeroed before being passed here. If it was an existing segment,
    280   // the values here will be compared to copies stored in the shared segment
    281   // as a guard against corruption.
    282   //
    283   // Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
    284   // method below) before construction if the definition of the segment can
    285   // vary in any way at run-time. Invalid memory segments will cause a crash.
    286   PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
    287                             uint64_t id, base::StringPiece name,
    288                             bool readonly);
    289   virtual ~PersistentMemoryAllocator();
    290 
    291   // Check if memory segment is acceptable for creation of an Allocator. This
    292   // doesn't do any analysis of the data and so doesn't guarantee that the
    293   // contents are valid, just that the paramaters won't cause the program to
    294   // abort. The IsCorrupt() method will report detection of data problems
    295   // found during construction and general operation.
    296   static bool IsMemoryAcceptable(const void* data, size_t size,
    297                                  size_t page_size, bool readonly);
    298 
    299   // Get the internal identifier for this persistent memory segment.
    300   uint64_t Id() const;
    301 
    302   // Get the internal name of this allocator (possibly an empty string).
    303   const char* Name() const;
    304 
    305   // Is this segment open only for read?
    306   bool IsReadonly() const { return readonly_; }
    307 
    308   // Manage the saved state of the memory.
    309   void SetMemoryState(uint8_t memory_state);
    310   uint8_t GetMemoryState() const;
    311 
    312   // Create internal histograms for tracking memory use and allocation sizes
    313   // for allocator of |name| (which can simply be the result of Name()). This
    314   // is done seperately from construction for situations such as when the
    315   // histograms will be backed by memory provided by this very allocator.
    316   //
    317   // IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
    318   // with the following histograms:
    319   //    UMA.PersistentAllocator.name.Errors
    320   //    UMA.PersistentAllocator.name.UsedPct
    321   void CreateTrackingHistograms(base::StringPiece name);
    322 
    323   // Flushes the persistent memory to any backing store. This typically does
    324   // nothing but is used by the FilePersistentMemoryAllocator to inform the
    325   // OS that all the data should be sent to the disk immediately. This is
    326   // useful in the rare case where something has just been stored that needs
    327   // to survive a hard shutdown of the machine like from a power failure.
    328   // The |sync| parameter indicates if this call should block until the flush
    329   // is complete but is only advisory and may or may not have an effect
    330   // depending on the capabilities of the OS. Synchronous flushes are allowed
    331   // only from theads that are allowed to do I/O.
    332   void Flush(bool sync);
    333 
    334   // Direct access to underlying memory segment. If the segment is shared
    335   // across threads or processes, reading data through these values does
    336   // not guarantee consistency. Use with care. Do not write.
    337   const void* data() const { return const_cast<const char*>(mem_base_); }
    338   size_t length() const { return mem_size_; }
    339   size_t size() const { return mem_size_; }
    340   size_t used() const;
    341 
    342   // Get an object referenced by a |ref|. For safety reasons, the |type_id|
    343   // code and size-of(|T|) are compared to ensure the reference is valid
    344   // and cannot return an object outside of the memory segment. A |type_id| of
    345   // kTypeIdAny (zero) will match any though the size is still checked. NULL is
    346   // returned if any problem is detected, such as corrupted storage or incorrect
    347   // parameters. Callers MUST check that the returned value is not-null EVERY
    348   // TIME before accessing it or risk crashing! Once dereferenced, the pointer
    349   // is safe to reuse forever.
    350   //
    351   // It is essential that the object be of a fixed size. All fields must be of
    352   // a defined type that does not change based on the compiler or the CPU
    353   // natural word size. Acceptable are char, float, double, and (u)intXX_t.
    354   // Unacceptable are int, bool, and wchar_t which are implementation defined
    355   // with regards to their size.
    356   //
    357   // Alignment must also be consistent. A uint64_t after a uint32_t will pad
    358   // differently between 32 and 64 bit architectures. Either put the bigger
    359   // elements first, group smaller elements into blocks the size of larger
    360   // elements, or manually insert padding fields as appropriate for the
    361   // largest architecture, including at the end.
    362   //
    363   // To protected against mistakes, all objects must have the attribute
    364   // |kExpectedInstanceSize| (static constexpr size_t)  that is a hard-coded
    365   // numerical value -- NNN, not sizeof(T) -- that can be tested. If the
    366   // instance size is not fixed, at least one build will fail.
    367   //
    368   // If the size of a structure changes, the type-ID used to recognize it
    369   // should also change so later versions of the code don't try to read
    370   // incompatible structures from earlier versions.
    371   //
    372   // NOTE: Though this method will guarantee that an object of the specified
    373   // type can be accessed without going outside the bounds of the memory
    374   // segment, it makes no guarantees of the validity of the data within the
    375   // object itself. If it is expected that the contents of the segment could
    376   // be compromised with malicious intent, the object must be hardened as well.
    377   //
    378   // Though the persistent data may be "volatile" if it is shared with
    379   // other processes, such is not necessarily the case. The internal
    380   // "volatile" designation is discarded so as to not propagate the viral
    381   // nature of that keyword to the caller. It can add it back, if necessary,
    382   // based on knowledge of how the allocator is being used.
    383   template <typename T>
    384   T* GetAsObject(Reference ref) {
    385     static_assert(std::is_standard_layout<T>::value, "only standard objects");
    386     static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
    387     static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
    388     return const_cast<T*>(reinterpret_cast<volatile T*>(
    389         GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
    390   }
    391   template <typename T>
    392   const T* GetAsObject(Reference ref) const {
    393     static_assert(std::is_standard_layout<T>::value, "only standard objects");
    394     static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
    395     static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
    396     return const_cast<const T*>(reinterpret_cast<const volatile T*>(
    397         GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
    398   }
    399 
    400   // Like GetAsObject but get an array of simple, fixed-size types.
    401   //
    402   // Use a |count| of the required number of array elements, or kSizeAny.
    403   // GetAllocSize() can be used to calculate the upper bound but isn't reliable
    404   // because padding can make space for extra elements that were not written.
    405   //
    406   // Remember that an array of char is a string but may not be NUL terminated.
    407   //
    408   // There are no compile-time or run-time checks to ensure 32/64-bit size
    409   // compatibilty when using these accessors. Only use fixed-size types such
    410   // as char, float, double, or (u)intXX_t.
    411   template <typename T>
    412   T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
    413     static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
    414     return const_cast<T*>(reinterpret_cast<volatile T*>(
    415         GetBlockData(ref, type_id, count * sizeof(T))));
    416   }
    417   template <typename T>
    418   const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
    419     static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
    420     return const_cast<const char*>(reinterpret_cast<const volatile T*>(
    421         GetBlockData(ref, type_id, count * sizeof(T))));
    422   }
    423 
    424   // Get the corresponding reference for an object held in persistent memory.
    425   // If the |memory| is not valid or the type does not match, a kReferenceNull
    426   // result will be returned.
    427   Reference GetAsReference(const void* memory, uint32_t type_id) const;
    428 
    429   // Get the number of bytes allocated to a block. This is useful when storing
    430   // arrays in order to validate the ending boundary. The returned value will
    431   // include any padding added to achieve the required alignment and so could
    432   // be larger than given in the original Allocate() request.
    433   size_t GetAllocSize(Reference ref) const;
    434 
    435   // Access the internal "type" of an object. This generally isn't necessary
    436   // but can be used to "clear" the type and so effectively mark it as deleted
    437   // even though the memory stays valid and allocated. Changing the type is
    438   // an atomic compare/exchange and so requires knowing the existing value.
    439   // It will return false if the existing type is not what is expected.
    440   //
    441   // Changing the type doesn't mean the data is compatible with the new type.
    442   // Passing true for |clear| will zero the memory after the type has been
    443   // changed away from |from_type_id| but before it becomes |to_type_id| meaning
    444   // that it is done in a manner that is thread-safe. Memory is guaranteed to
    445   // be zeroed atomically by machine-word in a monotonically increasing order.
    446   //
    447   // It will likely be necessary to reconstruct the type before it can be used.
    448   // Changing the type WILL NOT invalidate existing pointers to the data, either
    449   // in this process or others, so changing the data structure could have
    450   // unpredicatable results. USE WITH CARE!
    451   uint32_t GetType(Reference ref) const;
    452   bool ChangeType(Reference ref,
    453                   uint32_t to_type_id,
    454                   uint32_t from_type_id,
    455                   bool clear);
    456 
    457   // Allocated objects can be added to an internal list that can then be
    458   // iterated over by other processes. If an allocated object can be found
    459   // another way, such as by having its reference within a different object
    460   // that will be made iterable, then this call is not necessary. This always
    461   // succeeds unless corruption is detected; check IsCorrupted() to find out.
    462   // Once an object is made iterable, its position in iteration can never
    463   // change; new iterable objects will always be added after it in the series.
    464   // Changing the type does not alter its "iterable" status.
    465   void MakeIterable(Reference ref);
    466 
    467   // Get the information about the amount of free space in the allocator. The
    468   // amount of free space should be treated as approximate due to extras from
    469   // alignment and metadata. Concurrent allocations from other threads will
    470   // also make the true amount less than what is reported.
    471   void GetMemoryInfo(MemoryInfo* meminfo) const;
    472 
    473   // If there is some indication that the memory has become corrupted,
    474   // calling this will attempt to prevent further damage by indicating to
    475   // all processes that something is not as expected.
    476   void SetCorrupt() const;
    477 
    478   // This can be called to determine if corruption has been detected in the
    479   // segment, possibly my a malicious actor. Once detected, future allocations
    480   // will fail and iteration may not locate all objects.
    481   bool IsCorrupt() const;
    482 
    483   // Flag set if an allocation has failed because the memory segment was full.
    484   bool IsFull() const;
    485 
    486   // Update those "tracking" histograms which do not get updates during regular
    487   // operation, such as how much memory is currently used. This should be
    488   // called before such information is to be displayed or uploaded.
    489   void UpdateTrackingHistograms();
    490 
    491   // While the above works much like malloc & free, these next methods provide
    492   // an "object" interface similar to new and delete.
    493 
    494   // Reserve space in the memory segment of the desired |size| and |type_id|.
    495   // A return value of zero indicates the allocation failed, otherwise the
    496   // returned reference can be used by any process to get a real pointer via
    497   // the GetAsObject() or GetAsArray calls.
    498   Reference Allocate(size_t size, uint32_t type_id);
    499 
    500   // Allocate and construct an object in persistent memory. The type must have
    501   // both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
    502   // static constexpr fields that are used to ensure compatibility between
    503   // software versions. An optional size parameter can be specified to force
    504   // the allocation to be bigger than the size of the object; this is useful
    505   // when the last field is actually variable length.
    506   template <typename T>
    507   T* New(size_t size) {
    508     if (size < sizeof(T))
    509       size = sizeof(T);
    510     Reference ref = Allocate(size, T::kPersistentTypeId);
    511     void* mem =
    512         const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
    513     if (!mem)
    514       return nullptr;
    515     DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1));
    516     return new (mem) T();
    517   }
    518   template <typename T>
    519   T* New() {
    520     return New<T>(sizeof(T));
    521   }
    522 
    523   // Similar to New, above, but construct the object out of an existing memory
    524   // block and of an expected type. If |clear| is true, memory will be zeroed
    525   // before construction. Though this is not standard object behavior, it
    526   // is present to match with new allocations that always come from zeroed
    527   // memory. Anything previously present simply ceases to exist; no destructor
    528   // is called for it so explicitly Delete() the old object first if need be.
    529   // Calling this will not invalidate existing pointers to the object, either
    530   // in this process or others, so changing the object could have unpredictable
    531   // results. USE WITH CARE!
    532   template <typename T>
    533   T* New(Reference ref, uint32_t from_type_id, bool clear) {
    534     DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
    535     // Make sure the memory is appropriate. This won't be used until after
    536     // the type is changed but checking first avoids the possibility of having
    537     // to change the type back.
    538     void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
    539     if (!mem)
    540       return nullptr;
    541     // Ensure the allocator's internal alignment is sufficient for this object.
    542     // This protects against coding errors in the allocator.
    543     DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (ALIGNOF(T) - 1));
    544     // Change the type, clearing the memory if so desired. The new type is
    545     // "transitioning" so that there is no race condition with the construction
    546     // of the object should another thread be simultaneously iterating over
    547     // data. This will "acquire" the memory so no changes get reordered before
    548     // it.
    549     if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear))
    550       return nullptr;
    551     // Construct an object of the desired type on this memory, just as if
    552     // New() had been called to create it.
    553     T* obj = new (mem) T();
    554     // Finally change the type to the desired one. This will "release" all of
    555     // the changes above and so provide a consistent view to other threads.
    556     bool success =
    557         ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false);
    558     DCHECK(success);
    559     return obj;
    560   }
    561 
    562   // Deletes an object by destructing it and then changing the type to a
    563   // different value (default 0).
    564   template <typename T>
    565   void Delete(T* obj, uint32_t new_type) {
    566     // Get the reference for the object.
    567     Reference ref = GetAsReference<T>(obj);
    568     // First change the type to "transitioning" so there is no race condition
    569     // where another thread could find the object through iteration while it
    570     // is been destructed. This will "acquire" the memory so no changes get
    571     // reordered before it. It will fail if |ref| is invalid.
    572     if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false))
    573       return;
    574     // Destruct the object.
    575     obj->~T();
    576     // Finally change the type to the desired value. This will "release" all
    577     // the changes above.
    578     bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false);
    579     DCHECK(success);
    580   }
    581   template <typename T>
    582   void Delete(T* obj) {
    583     Delete<T>(obj, 0);
    584   }
    585 
    586   // As above but works with objects allocated from persistent memory.
    587   template <typename T>
    588   Reference GetAsReference(const T* obj) const {
    589     return GetAsReference(obj, T::kPersistentTypeId);
    590   }
    591 
    592   // As above but works with an object allocated from persistent memory.
    593   template <typename T>
    594   void MakeIterable(const T* obj) {
    595     MakeIterable(GetAsReference<T>(obj));
    596   }
    597 
    598  protected:
    599   enum MemoryType {
    600     MEM_EXTERNAL,
    601     MEM_MALLOC,
    602     MEM_VIRTUAL,
    603     MEM_SHARED,
    604     MEM_FILE,
    605   };
    606 
    607   struct Memory {
    608     Memory(void* b, MemoryType t) : base(b), type(t) {}
    609 
    610     void* base;
    611     MemoryType type;
    612   };
    613 
    614   // Constructs the allocator. Everything is the same as the public allocator
    615   // except |memory| which is a structure with additional information besides
    616   // the base address.
    617   PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size,
    618                             uint64_t id, base::StringPiece name,
    619                             bool readonly);
    620 
    621   // Implementation of Flush that accepts how much to flush.
    622   virtual void FlushPartial(size_t length, bool sync);
    623 
    624   volatile char* const mem_base_;  // Memory base. (char so sizeof guaranteed 1)
    625   const MemoryType mem_type_;      // Type of memory allocation.
    626   const uint32_t mem_size_;        // Size of entire memory segment.
    627   const uint32_t mem_page_;        // Page size allocations shouldn't cross.
    628 
    629  private:
    630   struct SharedMetadata;
    631   struct BlockHeader;
    632   static const uint32_t kAllocAlignment;
    633   static const Reference kReferenceQueue;
    634 
    635   // The shared metadata is always located at the top of the memory segment.
    636   // These convenience functions eliminate constant casting of the base
    637   // pointer within the code.
    638   const SharedMetadata* shared_meta() const {
    639     return reinterpret_cast<const SharedMetadata*>(
    640         const_cast<const char*>(mem_base_));
    641   }
    642   SharedMetadata* shared_meta() {
    643     return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
    644   }
    645 
    646   // Actual method for doing the allocation.
    647   Reference AllocateImpl(size_t size, uint32_t type_id);
    648 
    649   // Get the block header associated with a specific reference.
    650   const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
    651                                        uint32_t size, bool queue_ok,
    652                                        bool free_ok) const;
    653   volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
    654                                  bool queue_ok, bool free_ok) {
    655       return const_cast<volatile BlockHeader*>(
    656           const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
    657               ref, type_id, size, queue_ok, free_ok));
    658   }
    659 
    660   // Get the actual data within a block associated with a specific reference.
    661   const volatile void* GetBlockData(Reference ref, uint32_t type_id,
    662                                     uint32_t size) const;
    663   volatile void* GetBlockData(Reference ref, uint32_t type_id,
    664                               uint32_t size) {
    665       return const_cast<volatile void*>(
    666           const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
    667               ref, type_id, size));
    668   }
    669 
    670   // Record an error in the internal histogram.
    671   void RecordError(int error) const;
    672 
    673   const bool readonly_;                // Indicates access to read-only memory.
    674   mutable std::atomic<bool> corrupt_;  // Local version of "corrupted" flag.
    675 
    676   HistogramBase* allocs_histogram_;  // Histogram recording allocs.
    677   HistogramBase* used_histogram_;    // Histogram recording used space.
    678   HistogramBase* errors_histogram_;  // Histogram recording errors.
    679 
    680   friend class PersistentMemoryAllocatorTest;
    681   FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
    682   DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
    683 };
    684 
    685 
    686 // This allocator uses a local memory block it allocates from the general
    687 // heap. It is generally used when some kind of "death rattle" handler will
    688 // save the contents to persistent storage during process shutdown. It is
    689 // also useful for testing.
    690 class BASE_EXPORT LocalPersistentMemoryAllocator
    691     : public PersistentMemoryAllocator {
    692  public:
    693   LocalPersistentMemoryAllocator(size_t size, uint64_t id,
    694                                  base::StringPiece name);
    695   ~LocalPersistentMemoryAllocator() override;
    696 
    697  private:
    698   // Allocates a block of local memory of the specified |size|, ensuring that
    699   // the memory will not be physically allocated until accessed and will read
    700   // as zero when that happens.
    701   static Memory AllocateLocalMemory(size_t size);
    702 
    703   // Deallocates a block of local |memory| of the specified |size|.
    704   static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
    705 
    706   DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
    707 };
    708 
    709 
    710 // This allocator takes a shared-memory object and performs allocation from
    711 // it. The memory must be previously mapped via Map() or MapAt(). The allocator
    712 // takes ownership of the memory object.
    713 class BASE_EXPORT SharedPersistentMemoryAllocator
    714     : public PersistentMemoryAllocator {
    715  public:
    716   SharedPersistentMemoryAllocator(std::unique_ptr<SharedMemory> memory,
    717                                   uint64_t id,
    718                                   base::StringPiece name,
    719                                   bool read_only);
    720   ~SharedPersistentMemoryAllocator() override;
    721 
    722   SharedMemory* shared_memory() { return shared_memory_.get(); }
    723 
    724   // Ensure that the memory isn't so invalid that it would crash when passing it
    725   // to the allocator. This doesn't guarantee the data is valid, just that it
    726   // won't cause the program to abort. The existing IsCorrupt() call will handle
    727   // the rest.
    728   static bool IsSharedMemoryAcceptable(const SharedMemory& memory);
    729 
    730  private:
    731   std::unique_ptr<SharedMemory> shared_memory_;
    732 
    733   DISALLOW_COPY_AND_ASSIGN(SharedPersistentMemoryAllocator);
    734 };
    735 
    736 
    737 #if !defined(OS_NACL)  // NACL doesn't support any kind of file access in build.
    738 // This allocator takes a memory-mapped file object and performs allocation
    739 // from it. The allocator takes ownership of the file object.
    740 class BASE_EXPORT FilePersistentMemoryAllocator
    741     : public PersistentMemoryAllocator {
    742  public:
    743   // A |max_size| of zero will use the length of the file as the maximum
    744   // size. The |file| object must have been already created with sufficient
    745   // permissions (read, read/write, or read/write/extend).
    746   FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
    747                                 size_t max_size,
    748                                 uint64_t id,
    749                                 base::StringPiece name,
    750                                 bool read_only);
    751   ~FilePersistentMemoryAllocator() override;
    752 
    753   // Ensure that the file isn't so invalid that it would crash when passing it
    754   // to the allocator. This doesn't guarantee the file is valid, just that it
    755   // won't cause the program to abort. The existing IsCorrupt() call will handle
    756   // the rest.
    757   static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
    758 
    759  protected:
    760   // PersistentMemoryAllocator:
    761   void FlushPartial(size_t length, bool sync) override;
    762 
    763  private:
    764   std::unique_ptr<MemoryMappedFile> mapped_file_;
    765 
    766   DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
    767 };
    768 #endif  // !defined(OS_NACL)
    769 
    770 }  // namespace base
    771 
    772 #endif  // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
    773