Home | History | Annotate | Download | only in heap
      1 /*
      2  *  Copyright (C) 1999-2000 Harri Porten (porten (at) kde.org)
      3  *  Copyright (C) 2001 Peter Kelly (pmk (at) post.com)
      4  *  Copyright (C) 2003, 2004, 2005, 2006, 2007, 2008, 2009 Apple Inc. All rights reserved.
      5  *
      6  *  This library is free software; you can redistribute it and/or
      7  *  modify it under the terms of the GNU Lesser General Public
      8  *  License as published by the Free Software Foundation; either
      9  *  version 2 of the License, or (at your option) any later version.
     10  *
     11  *  This library is distributed in the hope that it will be useful,
     12  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
     13  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
     14  *  Lesser General Public License for more details.
     15  *
     16  *  You should have received a copy of the GNU Lesser General Public
     17  *  License along with this library; if not, write to the Free Software
     18  *  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301  USA
     19  *
     20  */
     21 
     22 #ifndef MarkedBlock_h
     23 #define MarkedBlock_h
     24 
     25 #include <wtf/Bitmap.h>
     26 #include <wtf/PageAllocationAligned.h>
     27 #include <wtf/StdLibExtras.h>
     28 
     29 namespace JSC {
     30 
     31     class Heap;
     32     class JSCell;
     33     class JSGlobalData;
     34 
     35     typedef uintptr_t Bits;
     36 
     37     static const size_t KB = 1024;
     38 
     39     class MarkedBlock {
     40     public:
     41         static const size_t atomSize = sizeof(double); // Ensures natural alignment for all built-in types.
     42 
     43         static MarkedBlock* create(JSGlobalData*, size_t cellSize);
     44         static void destroy(MarkedBlock*);
     45 
     46         static bool isAtomAligned(const void*);
     47         static MarkedBlock* blockFor(const void*);
     48         static size_t firstAtom();
     49 
     50         Heap* heap() const;
     51 
     52         void setPrev(MarkedBlock*);
     53         void setNext(MarkedBlock*);
     54         MarkedBlock* prev() const;
     55         MarkedBlock* next() const;
     56 
     57         void* allocate();
     58         void reset();
     59         void sweep();
     60 
     61         bool isEmpty();
     62 
     63         void clearMarks();
     64         size_t markCount();
     65 
     66         size_t cellSize();
     67 
     68         size_t size();
     69         size_t capacity();
     70 
     71         bool contains(const void*);
     72         size_t atomNumber(const void*);
     73         bool isMarked(const void*);
     74         bool testAndSetMarked(const void*);
     75         void setMarked(const void*);
     76 
     77         template <typename Functor> void forEach(Functor&);
     78 
     79     private:
     80         static const size_t blockSize = 16 * KB;
     81         static const size_t blockMask = ~(blockSize - 1); // blockSize must be a power of two.
     82 
     83         static const size_t atomMask = ~(atomSize - 1); // atomSize must be a power of two.
     84 
     85         static const size_t atomsPerBlock = blockSize / atomSize;
     86 
     87         typedef char Atom[atomSize];
     88 
     89         MarkedBlock(const PageAllocationAligned&, JSGlobalData*, size_t cellSize);
     90         Atom* atoms();
     91 
     92         size_t m_nextAtom;
     93         size_t m_endAtom; // This is a fuzzy end. Always test for < m_endAtom.
     94         size_t m_atomsPerCell;
     95         WTF::Bitmap<blockSize / atomSize> m_marks;
     96         PageAllocationAligned m_allocation;
     97         Heap* m_heap;
     98         MarkedBlock* m_prev;
     99         MarkedBlock* m_next;
    100     };
    101 
    102     inline size_t MarkedBlock::firstAtom()
    103     {
    104         return WTF::roundUpToMultipleOf<atomSize>(sizeof(MarkedBlock)) / atomSize;
    105     }
    106 
    107     inline MarkedBlock::Atom* MarkedBlock::atoms()
    108     {
    109         return reinterpret_cast<Atom*>(this);
    110     }
    111 
    112     inline bool MarkedBlock::isAtomAligned(const void* p)
    113     {
    114         return !((intptr_t)(p) & ~atomMask);
    115     }
    116 
    117     inline MarkedBlock* MarkedBlock::blockFor(const void* p)
    118     {
    119         return reinterpret_cast<MarkedBlock*>(reinterpret_cast<uintptr_t>(p) & blockMask);
    120     }
    121 
    122     inline Heap* MarkedBlock::heap() const
    123     {
    124         return m_heap;
    125     }
    126 
    127     inline void MarkedBlock::setPrev(MarkedBlock* prev)
    128     {
    129         m_prev = prev;
    130     }
    131 
    132     inline void MarkedBlock::setNext(MarkedBlock* next)
    133     {
    134         m_next = next;
    135     }
    136 
    137     inline MarkedBlock* MarkedBlock::prev() const
    138     {
    139         return m_prev;
    140     }
    141 
    142     inline MarkedBlock* MarkedBlock::next() const
    143     {
    144         return m_next;
    145     }
    146 
    147     inline void MarkedBlock::reset()
    148     {
    149         m_nextAtom = firstAtom();
    150     }
    151 
    152     inline bool MarkedBlock::isEmpty()
    153     {
    154         return m_marks.isEmpty();
    155     }
    156 
    157     inline void MarkedBlock::clearMarks()
    158     {
    159         m_marks.clearAll();
    160     }
    161 
    162     inline size_t MarkedBlock::markCount()
    163     {
    164         return m_marks.count();
    165     }
    166 
    167     inline size_t MarkedBlock::cellSize()
    168     {
    169         return m_atomsPerCell * atomSize;
    170     }
    171 
    172     inline size_t MarkedBlock::size()
    173     {
    174         return markCount() * cellSize();
    175     }
    176 
    177     inline size_t MarkedBlock::capacity()
    178     {
    179         return m_allocation.size();
    180     }
    181 
    182     inline bool MarkedBlock::contains(const void* p)
    183     {
    184         // Since we mark the first atom of every cell when allocating and/or
    185         // marking, any pointer to a marked atom points to the head of a valid,
    186         // live cell. Checking the mark bit guards against reviving an object
    187         // in a zombie state.
    188 
    189         ASSERT(p && isAtomAligned(p));
    190         return isMarked(p);
    191     }
    192 
    193     inline size_t MarkedBlock::atomNumber(const void* p)
    194     {
    195         return (reinterpret_cast<uintptr_t>(p) - reinterpret_cast<uintptr_t>(this)) / atomSize;
    196     }
    197 
    198     inline bool MarkedBlock::isMarked(const void* p)
    199     {
    200         return m_marks.get(atomNumber(p));
    201     }
    202 
    203     inline bool MarkedBlock::testAndSetMarked(const void* p)
    204     {
    205         return m_marks.testAndSet(atomNumber(p));
    206     }
    207 
    208     inline void MarkedBlock::setMarked(const void* p)
    209     {
    210         m_marks.set(atomNumber(p));
    211     }
    212 
    213     template <typename Functor> inline void MarkedBlock::forEach(Functor& functor)
    214     {
    215         for (size_t i = firstAtom(); i < m_endAtom; i += m_atomsPerCell) {
    216             if (!m_marks.get(i))
    217                 continue;
    218             functor(reinterpret_cast<JSCell*>(&atoms()[i]));
    219         }
    220     }
    221 
    222 } // namespace JSC
    223 
    224 #endif // MarkedSpace_h
    225