Home | History | Annotate | Download | only in heap
      1 /*
      2  * Copyright (C) 2013 Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are
      6  * met:
      7  *
      8  *     * Redistributions of source code must retain the above copyright
      9  * notice, this list of conditions and the following disclaimer.
     10  *     * Redistributions in binary form must reproduce the above
     11  * copyright notice, this list of conditions and the following disclaimer
     12  * in the documentation and/or other materials provided with the
     13  * distribution.
     14  *     * Neither the name of Google Inc. nor the names of its
     15  * contributors may be used to endorse or promote products derived from
     16  * this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include "config.h"
     32 #include "heap/Heap.h"
     33 
     34 #include "heap/ThreadState.h"
     35 
     36 #if OS(POSIX)
     37 #include <sys/mman.h>
     38 #include <unistd.h>
     39 #elif OS(WIN)
     40 #include <windows.h>
     41 #endif
     42 
     43 namespace WebCore {
     44 
     45 #if OS(WIN)
     46 static bool IsPowerOf2(size_t power)
     47 {
     48     return !((power - 1) & power);
     49 }
     50 #endif
     51 
     52 static Address roundToBlinkPageBoundary(void* base)
     53 {
     54     return reinterpret_cast<Address>((reinterpret_cast<uintptr_t>(base) + blinkPageOffsetMask) & blinkPageBaseMask);
     55 }
     56 
     57 static size_t roundToOsPageSize(size_t size)
     58 {
     59     return (size + osPageSize() - 1) & ~(osPageSize() - 1);
     60 }
     61 
     62 size_t osPageSize()
     63 {
     64 #if OS(POSIX)
     65     static const size_t pageSize = getpagesize();
     66 #else
     67     static size_t pageSize = 0;
     68     if (!pageSize) {
     69         SYSTEM_INFO info;
     70         GetSystemInfo(&info);
     71         pageSize = info.dwPageSize;
     72         ASSERT(IsPowerOf2(pageSize));
     73     }
     74 #endif
     75     return pageSize;
     76 }
     77 
     78 class MemoryRegion {
     79 public:
     80     MemoryRegion(Address base, size_t size) : m_base(base), m_size(size) { ASSERT(size > 0); }
     81 
     82     bool contains(Address addr) const
     83     {
     84         return m_base <= addr && addr < (m_base + m_size);
     85     }
     86 
     87 
     88     bool contains(const MemoryRegion& other) const
     89     {
     90         return contains(other.m_base) && contains(other.m_base + other.m_size - 1);
     91     }
     92 
     93     void release()
     94     {
     95 #if OS(POSIX)
     96         int err = munmap(m_base, m_size);
     97         RELEASE_ASSERT(!err);
     98 #else
     99         bool success = VirtualFree(m_base, 0, MEM_RELEASE);
    100         RELEASE_ASSERT(success);
    101 #endif
    102     }
    103 
    104     WARN_UNUSED_RETURN bool commit()
    105     {
    106 #if OS(POSIX)
    107         int err = mprotect(m_base, m_size, PROT_READ | PROT_WRITE);
    108         if (!err) {
    109             madvise(m_base, m_size, MADV_NORMAL);
    110             return true;
    111         }
    112         return false;
    113 #else
    114         void* result = VirtualAlloc(m_base, m_size, MEM_COMMIT, PAGE_READWRITE);
    115         return !!result;
    116 #endif
    117     }
    118 
    119     void decommit()
    120     {
    121 #if OS(POSIX)
    122         int err = mprotect(m_base, m_size, PROT_NONE);
    123         RELEASE_ASSERT(!err);
    124         // FIXME: Consider using MADV_FREE on MacOS.
    125         madvise(m_base, m_size, MADV_DONTNEED);
    126 #else
    127         bool success = VirtualFree(m_base, m_size, MEM_DECOMMIT);
    128         RELEASE_ASSERT(success);
    129 #endif
    130     }
    131 
    132     Address base() const { return m_base; }
    133 
    134 private:
    135     Address m_base;
    136     size_t m_size;
    137 };
    138 
    139 // Representation of the memory used for a Blink heap page.
    140 //
    141 // The representation keeps track of two memory regions:
    142 //
    143 // 1. The virtual memory reserved from the sytem in order to be able
    144 //    to free all the virtual memory reserved on destruction.
    145 //
    146 // 2. The writable memory (a sub-region of the reserved virtual
    147 //    memory region) that is used for the actual heap page payload.
    148 //
    149 // Guard pages are create before and after the writable memory.
    150 class PageMemory {
    151 public:
    152     ~PageMemory() { m_reserved.release(); }
    153 
    154     bool commit() WARN_UNUSED_RETURN { return m_writable.commit(); }
    155     void decommit() { m_writable.decommit(); }
    156 
    157     Address writableStart() { return m_writable.base(); }
    158 
    159     // Allocate a virtual address space for the blink page with the
    160     // following layout:
    161     //
    162     //    [ guard os page | ... payload ... | guard os page ]
    163     //    ^---{ aligned to blink page size }
    164     //
    165     static PageMemory* allocate(size_t payloadSize)
    166     {
    167         ASSERT(payloadSize > 0);
    168 
    169         // Virtual memory allocation routines operate in OS page sizes.
    170         // Round up the requested size to nearest os page size.
    171         payloadSize = roundToOsPageSize(payloadSize);
    172 
    173         // Overallocate by blinkPageSize and 2 times OS page size to
    174         // ensure a chunk of memory which is blinkPageSize aligned and
    175         // has a system page before and after to use for guarding. We
    176         // unmap the excess memory before returning.
    177         size_t allocationSize = payloadSize + 2 * osPageSize() + blinkPageSize;
    178 
    179 #if OS(POSIX)
    180         Address base = static_cast<Address>(mmap(0, allocationSize, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0));
    181         RELEASE_ASSERT(base != MAP_FAILED);
    182 
    183         Address end = base + allocationSize;
    184         Address alignedBase = roundToBlinkPageBoundary(base);
    185         Address payloadBase = alignedBase + osPageSize();
    186         Address payloadEnd = payloadBase + payloadSize;
    187         Address blinkPageEnd = payloadEnd + osPageSize();
    188 
    189         // If the allocate memory was not blink page aligned release
    190         // the memory before the aligned address.
    191         if (alignedBase != base)
    192             MemoryRegion(base, alignedBase - base).release();
    193 
    194         // Create guard pages by decommiting an OS page before and
    195         // after the payload.
    196         MemoryRegion(alignedBase, osPageSize()).decommit();
    197         MemoryRegion(payloadEnd, osPageSize()).decommit();
    198 
    199         // Free the additional memory at the end of the page if any.
    200         if (blinkPageEnd < end)
    201             MemoryRegion(blinkPageEnd, end - blinkPageEnd).release();
    202 
    203         return new PageMemory(MemoryRegion(alignedBase, blinkPageEnd - alignedBase), MemoryRegion(payloadBase, payloadSize));
    204 #else
    205         Address base = 0;
    206         Address alignedBase = 0;
    207 
    208         // On Windows it is impossible to partially release a region
    209         // of memory allocated by VirtualAlloc. To avoid wasting
    210         // virtual address space we attempt to release a large region
    211         // of memory returned as a whole and then allocate an aligned
    212         // region inside this larger region.
    213         for (int attempt = 0; attempt < 3; attempt++) {
    214             base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
    215             RELEASE_ASSERT(base);
    216             VirtualFree(base, 0, MEM_RELEASE);
    217 
    218             alignedBase = roundToBlinkPageBoundary(base);
    219             base = static_cast<Address>(VirtualAlloc(alignedBase, payloadSize + 2 * osPageSize(), MEM_RESERVE, PAGE_NOACCESS));
    220             if (base) {
    221                 RELEASE_ASSERT(base == alignedBase);
    222                 allocationSize = payloadSize + 2 * osPageSize();
    223                 break;
    224             }
    225         }
    226 
    227         if (!base) {
    228             // We failed to avoid wasting virtual address space after
    229             // several attempts.
    230             base = static_cast<Address>(VirtualAlloc(0, allocationSize, MEM_RESERVE, PAGE_NOACCESS));
    231             RELEASE_ASSERT(base);
    232 
    233             // FIXME: If base is by accident blink page size aligned
    234             // here then we can create two pages out of reserved
    235             // space. Do this.
    236             alignedBase = roundToBlinkPageBoundary(base);
    237         }
    238 
    239         Address payloadBase = alignedBase + osPageSize();
    240         PageMemory* storage = new PageMemory(MemoryRegion(base, allocationSize), MemoryRegion(payloadBase, payloadSize));
    241         bool res = storage->commit();
    242         RELEASE_ASSERT(res);
    243         return storage;
    244 #endif
    245     }
    246 
    247 private:
    248     PageMemory(const MemoryRegion& reserved, const MemoryRegion& writable)
    249         : m_reserved(reserved)
    250         , m_writable(writable)
    251     {
    252         ASSERT(reserved.contains(writable));
    253     }
    254 
    255     MemoryRegion m_reserved;
    256     MemoryRegion m_writable;
    257 };
    258 
    259 void Heap::init(intptr_t* startOfStack)
    260 {
    261     ThreadState::init(startOfStack);
    262 }
    263 
    264 void Heap::shutdown()
    265 {
    266     ThreadState::shutdown();
    267 }
    268 
    269 }
    270