Home | History | Annotate | Download | only in jit
      1 /*
      2  * Copyright (C) 2009 Apple Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions
      6  * are met:
      7  * 1. Redistributions of source code must retain the above copyright
      8  *    notice, this list of conditions and the following disclaimer.
      9  * 2. Redistributions in binary form must reproduce the above copyright
     10  *    notice, this list of conditions and the following disclaimer in the
     11  *    documentation and/or other materials provided with the distribution.
     12  *
     13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
     14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
     15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
     16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR
     17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
     18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
     19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
     20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
     21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     24  */
     25 
     26 #include "config.h"
     27 
     28 #include "ExecutableAllocator.h"
     29 
     30 #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
     31 
     32 #include <errno.h>
     33 
     34 #include "TCSpinLock.h"
     35 #include <sys/mman.h>
     36 #include <unistd.h>
     37 #include <wtf/AVLTree.h>
     38 #include <wtf/PageReservation.h>
     39 #include <wtf/VMTags.h>
     40 
     41 #if OS(LINUX)
     42 #include <stdio.h>
     43 #endif
     44 
     45 using namespace WTF;
     46 
     47 namespace JSC {
     48 
     49 #define TwoPow(n) (1ull << n)
     50 
     51 class AllocationTableSizeClass {
     52 public:
     53     AllocationTableSizeClass(size_t size, size_t blockSize, unsigned log2BlockSize)
     54         : m_blockSize(blockSize)
     55     {
     56         ASSERT(blockSize == TwoPow(log2BlockSize));
     57 
     58         // Calculate the number of blocks needed to hold size.
     59         size_t blockMask = blockSize - 1;
     60         m_blockCount = (size + blockMask) >> log2BlockSize;
     61 
     62         // Align to the smallest power of two >= m_blockCount.
     63         m_blockAlignment = 1;
     64         while (m_blockAlignment < m_blockCount)
     65             m_blockAlignment += m_blockAlignment;
     66     }
     67 
     68     size_t blockSize() const { return m_blockSize; }
     69     size_t blockCount() const { return m_blockCount; }
     70     size_t blockAlignment() const { return m_blockAlignment; }
     71 
     72     size_t size()
     73     {
     74         return m_blockSize * m_blockCount;
     75     }
     76 
     77 private:
     78     size_t m_blockSize;
     79     size_t m_blockCount;
     80     size_t m_blockAlignment;
     81 };
     82 
     83 template<unsigned log2Entries>
     84 class AllocationTableLeaf {
     85     typedef uint64_t BitField;
     86 
     87 public:
     88     static const unsigned log2SubregionSize = 12; // 2^12 == pagesize
     89     static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
     90 
     91     static const size_t subregionSize = TwoPow(log2SubregionSize);
     92     static const size_t regionSize = TwoPow(log2RegionSize);
     93     static const unsigned entries = TwoPow(log2Entries);
     94     COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableLeaf_entries_fit_in_BitField);
     95 
     96     AllocationTableLeaf()
     97         : m_allocated(0)
     98     {
     99     }
    100 
    101     ~AllocationTableLeaf()
    102     {
    103         ASSERT(isEmpty());
    104     }
    105 
    106     size_t allocate(AllocationTableSizeClass& sizeClass)
    107     {
    108         ASSERT(sizeClass.blockSize() == subregionSize);
    109         ASSERT(!isFull());
    110 
    111         size_t alignment = sizeClass.blockAlignment();
    112         size_t count = sizeClass.blockCount();
    113         // Use this mask to check for spans of free blocks.
    114         BitField mask = ((1ull << count) - 1) << (alignment - count);
    115 
    116         // Step in units of alignment size.
    117         for (unsigned i = 0; i < entries; i += alignment) {
    118             if (!(m_allocated & mask)) {
    119                 m_allocated |= mask;
    120                 return (i + (alignment - count)) << log2SubregionSize;
    121             }
    122             mask <<= alignment;
    123         }
    124         return notFound;
    125     }
    126 
    127     void free(size_t location, AllocationTableSizeClass& sizeClass)
    128     {
    129         ASSERT(sizeClass.blockSize() == subregionSize);
    130 
    131         size_t entry = location >> log2SubregionSize;
    132         size_t count = sizeClass.blockCount();
    133         BitField mask = ((1ull << count) - 1) << entry;
    134 
    135         ASSERT((m_allocated & mask) == mask);
    136         m_allocated &= ~mask;
    137     }
    138 
    139     bool isEmpty()
    140     {
    141         return !m_allocated;
    142     }
    143 
    144     bool isFull()
    145     {
    146         return !~m_allocated;
    147     }
    148 
    149     static size_t size()
    150     {
    151         return regionSize;
    152     }
    153 
    154     static AllocationTableSizeClass classForSize(size_t size)
    155     {
    156         return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
    157     }
    158 
    159 #ifndef NDEBUG
    160     void dump(size_t parentOffset = 0, unsigned indent = 0)
    161     {
    162         for (unsigned i = 0; i < indent; ++i)
    163             fprintf(stderr, "    ");
    164         fprintf(stderr, "%08x: [%016llx]\n", (int)parentOffset, m_allocated);
    165     }
    166 #endif
    167 
    168 private:
    169     BitField m_allocated;
    170 };
    171 
    172 
    173 template<class NextLevel>
    174 class LazyAllocationTable {
    175 public:
    176     static const unsigned log2RegionSize = NextLevel::log2RegionSize;
    177     static const unsigned entries = NextLevel::entries;
    178 
    179     LazyAllocationTable()
    180         : m_ptr(0)
    181     {
    182     }
    183 
    184     ~LazyAllocationTable()
    185     {
    186         ASSERT(isEmpty());
    187     }
    188 
    189     size_t allocate(AllocationTableSizeClass& sizeClass)
    190     {
    191         if (!m_ptr)
    192             m_ptr = new NextLevel();
    193         return m_ptr->allocate(sizeClass);
    194     }
    195 
    196     void free(size_t location, AllocationTableSizeClass& sizeClass)
    197     {
    198         ASSERT(m_ptr);
    199         m_ptr->free(location, sizeClass);
    200         if (m_ptr->isEmpty()) {
    201             delete m_ptr;
    202             m_ptr = 0;
    203         }
    204     }
    205 
    206     bool isEmpty()
    207     {
    208         return !m_ptr;
    209     }
    210 
    211     bool isFull()
    212     {
    213         return m_ptr && m_ptr->isFull();
    214     }
    215 
    216     static size_t size()
    217     {
    218         return NextLevel::size();
    219     }
    220 
    221 #ifndef NDEBUG
    222     void dump(size_t parentOffset = 0, unsigned indent = 0)
    223     {
    224         ASSERT(m_ptr);
    225         m_ptr->dump(parentOffset, indent);
    226     }
    227 #endif
    228 
    229     static AllocationTableSizeClass classForSize(size_t size)
    230     {
    231         return NextLevel::classForSize(size);
    232     }
    233 
    234 private:
    235     NextLevel* m_ptr;
    236 };
    237 
    238 template<class NextLevel, unsigned log2Entries>
    239 class AllocationTableDirectory {
    240     typedef uint64_t BitField;
    241 
    242 public:
    243     static const unsigned log2SubregionSize = NextLevel::log2RegionSize;
    244     static const unsigned log2RegionSize = log2SubregionSize + log2Entries;
    245 
    246     static const size_t subregionSize = TwoPow(log2SubregionSize);
    247     static const size_t regionSize = TwoPow(log2RegionSize);
    248     static const unsigned entries = TwoPow(log2Entries);
    249     COMPILE_ASSERT(entries <= (sizeof(BitField) * 8), AllocationTableDirectory_entries_fit_in_BitField);
    250 
    251     AllocationTableDirectory()
    252         : m_full(0)
    253         , m_hasSuballocation(0)
    254     {
    255     }
    256 
    257     ~AllocationTableDirectory()
    258     {
    259         ASSERT(isEmpty());
    260     }
    261 
    262     size_t allocate(AllocationTableSizeClass& sizeClass)
    263     {
    264         ASSERT(sizeClass.blockSize() <= subregionSize);
    265         ASSERT(!isFull());
    266 
    267         if (sizeClass.blockSize() < subregionSize) {
    268             BitField bit = 1;
    269             for (unsigned i = 0; i < entries; ++i, bit += bit) {
    270                 if (m_full & bit)
    271                     continue;
    272                 size_t location = m_suballocations[i].allocate(sizeClass);
    273                 if (location != notFound) {
    274                     // If this didn't already have a subregion, it does now!
    275                     m_hasSuballocation |= bit;
    276                     // Mirror the suballocation's full bit.
    277                     if (m_suballocations[i].isFull())
    278                         m_full |= bit;
    279                     return (i * subregionSize) | location;
    280                 }
    281             }
    282             return notFound;
    283         }
    284 
    285         // A block is allocated if either it is fully allocated or contains suballocations.
    286         BitField allocated = m_full | m_hasSuballocation;
    287 
    288         size_t alignment = sizeClass.blockAlignment();
    289         size_t count = sizeClass.blockCount();
    290         // Use this mask to check for spans of free blocks.
    291         BitField mask = ((1ull << count) - 1) << (alignment - count);
    292 
    293         // Step in units of alignment size.
    294         for (unsigned i = 0; i < entries; i += alignment) {
    295             if (!(allocated & mask)) {
    296                 m_full |= mask;
    297                 return (i + (alignment - count)) << log2SubregionSize;
    298             }
    299             mask <<= alignment;
    300         }
    301         return notFound;
    302     }
    303 
    304     void free(size_t location, AllocationTableSizeClass& sizeClass)
    305     {
    306         ASSERT(sizeClass.blockSize() <= subregionSize);
    307 
    308         size_t entry = location >> log2SubregionSize;
    309 
    310         if (sizeClass.blockSize() < subregionSize) {
    311             BitField bit = 1ull << entry;
    312             m_suballocations[entry].free(location & (subregionSize - 1), sizeClass);
    313             // Check if the suballocation is now empty.
    314             if (m_suballocations[entry].isEmpty())
    315                 m_hasSuballocation &= ~bit;
    316             // No need to check, it clearly isn't full any more!
    317             m_full &= ~bit;
    318         } else {
    319             size_t count = sizeClass.blockCount();
    320             BitField mask = ((1ull << count) - 1) << entry;
    321             ASSERT((m_full & mask) == mask);
    322             ASSERT(!(m_hasSuballocation & mask));
    323             m_full &= ~mask;
    324         }
    325     }
    326 
    327     bool isEmpty()
    328     {
    329         return !(m_full | m_hasSuballocation);
    330     }
    331 
    332     bool isFull()
    333     {
    334         return !~m_full;
    335     }
    336 
    337     static size_t size()
    338     {
    339         return regionSize;
    340     }
    341 
    342     static AllocationTableSizeClass classForSize(size_t size)
    343     {
    344         if (size < subregionSize) {
    345             AllocationTableSizeClass sizeClass = NextLevel::classForSize(size);
    346             if (sizeClass.size() < NextLevel::size())
    347                 return sizeClass;
    348         }
    349         return AllocationTableSizeClass(size, subregionSize, log2SubregionSize);
    350     }
    351 
    352 #ifndef NDEBUG
    353     void dump(size_t parentOffset = 0, unsigned indent = 0)
    354     {
    355         for (unsigned i = 0; i < indent; ++i)
    356             fprintf(stderr, "    ");
    357         fprintf(stderr, "%08x: [", (int)parentOffset);
    358         for (unsigned i = 0; i < entries; ++i) {
    359             BitField bit = 1ull << i;
    360             char c = m_hasSuballocation & bit
    361                 ? (m_full & bit ? 'N' : 'n')
    362                 : (m_full & bit ? 'F' : '-');
    363             fprintf(stderr, "%c", c);
    364         }
    365         fprintf(stderr, "]\n");
    366 
    367         for (unsigned i = 0; i < entries; ++i) {
    368             BitField bit = 1ull << i;
    369             size_t offset = parentOffset | (subregionSize * i);
    370             if (m_hasSuballocation & bit)
    371                 m_suballocations[i].dump(offset, indent + 1);
    372         }
    373     }
    374 #endif
    375 
    376 private:
    377     NextLevel m_suballocations[entries];
    378     // Subregions exist in one of four states:
    379     // (1) empty (both bits clear)
    380     // (2) fully allocated as a single allocation (m_full set)
    381     // (3) partially allocated through suballocations (m_hasSuballocation set)
    382     // (4) fully allocated through suballocations (both bits set)
    383     BitField m_full;
    384     BitField m_hasSuballocation;
    385 };
    386 
    387 
    388 typedef AllocationTableLeaf<6> PageTables256KB;
    389 typedef AllocationTableDirectory<PageTables256KB, 6> PageTables16MB;
    390 typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 1> PageTables32MB;
    391 typedef AllocationTableDirectory<LazyAllocationTable<PageTables16MB>, 6> PageTables1GB;
    392 
    393 #if CPU(ARM)
    394 typedef PageTables16MB FixedVMPoolPageTables;
    395 #elif CPU(X86_64)
    396 typedef PageTables1GB FixedVMPoolPageTables;
    397 #else
    398 typedef PageTables32MB FixedVMPoolPageTables;
    399 #endif
    400 
    401 
    402 class FixedVMPoolAllocator
    403 {
    404 public:
    405     FixedVMPoolAllocator()
    406     {
    407         ASSERT(PageTables256KB::size() == 256 * 1024);
    408         ASSERT(PageTables16MB::size() == 16 * 1024 * 1024);
    409         ASSERT(PageTables32MB::size() == 32 * 1024 * 1024);
    410         ASSERT(PageTables1GB::size() == 1024 * 1024 * 1024);
    411 
    412         m_reservation = PageReservation::reserve(FixedVMPoolPageTables::size(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
    413 #if !ENABLE(INTERPRETER)
    414         if (!isValid())
    415             CRASH();
    416 #endif
    417     }
    418 
    419     ExecutablePool::Allocation alloc(size_t requestedSize)
    420     {
    421         ASSERT(requestedSize);
    422         AllocationTableSizeClass sizeClass = classForSize(requestedSize);
    423         size_t size = sizeClass.size();
    424         ASSERT(size);
    425 
    426         if (size >= FixedVMPoolPageTables::size())
    427             CRASH();
    428         if (m_pages.isFull())
    429             CRASH();
    430 
    431         size_t offset = m_pages.allocate(sizeClass);
    432         if (offset == notFound)
    433             CRASH();
    434 
    435         void* pointer = offsetToPointer(offset);
    436         m_reservation.commit(pointer, size);
    437         return ExecutablePool::Allocation(pointer, size);
    438     }
    439 
    440     void free(ExecutablePool::Allocation allocation)
    441     {
    442         void* pointer = allocation.base();
    443         size_t size = allocation.size();
    444         ASSERT(size);
    445 
    446         m_reservation.decommit(pointer, size);
    447 
    448         AllocationTableSizeClass sizeClass = classForSize(size);
    449         ASSERT(sizeClass.size() == size);
    450         m_pages.free(pointerToOffset(pointer), sizeClass);
    451     }
    452 
    453     size_t allocated()
    454     {
    455         return m_reservation.committed();
    456     }
    457 
    458     bool isValid() const
    459     {
    460         return !!m_reservation;
    461     }
    462 
    463 private:
    464     AllocationTableSizeClass classForSize(size_t size)
    465     {
    466         return FixedVMPoolPageTables::classForSize(size);
    467     }
    468 
    469     void* offsetToPointer(size_t offset)
    470     {
    471         return reinterpret_cast<void*>(reinterpret_cast<intptr_t>(m_reservation.base()) + offset);
    472     }
    473 
    474     size_t pointerToOffset(void* pointer)
    475     {
    476         return reinterpret_cast<intptr_t>(pointer) - reinterpret_cast<intptr_t>(m_reservation.base());
    477     }
    478 
    479     PageReservation m_reservation;
    480     FixedVMPoolPageTables m_pages;
    481 };
    482 
    483 
    484 static SpinLock spinlock = SPINLOCK_INITIALIZER;
    485 static FixedVMPoolAllocator* allocator = 0;
    486 
    487 
    488 size_t ExecutableAllocator::committedByteCount()
    489 {
    490     SpinLockHolder lockHolder(&spinlock);
    491     return allocator ? allocator->allocated() : 0;
    492 }
    493 
    494 void ExecutableAllocator::intializePageSize()
    495 {
    496     ExecutableAllocator::pageSize = getpagesize();
    497 }
    498 
    499 bool ExecutableAllocator::isValid() const
    500 {
    501     SpinLockHolder lock_holder(&spinlock);
    502     if (!allocator)
    503         allocator = new FixedVMPoolAllocator();
    504     return allocator->isValid();
    505 }
    506 
    507 bool ExecutableAllocator::underMemoryPressure()
    508 {
    509     // Technically we should take the spin lock here, but we don't care if we get stale data.
    510     // This is only really a heuristic anyway.
    511     return allocator && (allocator->allocated() > (FixedVMPoolPageTables::size() / 2));
    512 }
    513 
    514 ExecutablePool::Allocation ExecutablePool::systemAlloc(size_t size)
    515 {
    516     SpinLockHolder lock_holder(&spinlock);
    517     ASSERT(allocator);
    518     return allocator->alloc(size);
    519 }
    520 
    521 void ExecutablePool::systemRelease(ExecutablePool::Allocation& allocation)
    522 {
    523     SpinLockHolder lock_holder(&spinlock);
    524     ASSERT(allocator);
    525     allocator->free(allocation);
    526 }
    527 
    528 }
    529 
    530 
    531 #endif // HAVE(ASSEMBLER)
    532