Home | History | Annotate | Download | only in heap
      1 /*
      2  * Copyright (C) 2013 Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are
      6  * met:
      7  *
      8  *     * Redistributions of source code must retain the above copyright
      9  * notice, this list of conditions and the following disclaimer.
     10  *     * Redistributions in binary form must reproduce the above
     11  * copyright notice, this list of conditions and the following disclaimer
     12  * in the documentation and/or other materials provided with the
     13  * distribution.
     14  *     * Neither the name of Google Inc. nor the names of its
     15  * contributors may be used to endorse or promote products derived from
     16  * this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include "config.h"
     32 #include "platform/heap/ThreadState.h"
     33 
     34 #include "platform/ScriptForbiddenScope.h"
     35 #include "platform/TraceEvent.h"
     36 #include "platform/heap/AddressSanitizer.h"
     37 #include "platform/heap/CallbackStack.h"
     38 #include "platform/heap/Handle.h"
     39 #include "platform/heap/Heap.h"
     40 #include "public/platform/Platform.h"
     41 #include "public/platform/WebThread.h"
     42 #include "wtf/ThreadingPrimitives.h"
     43 #if ENABLE(GC_PROFILE_HEAP)
     44 #include "platform/TracedValue.h"
     45 #endif
     46 
     47 #if OS(WIN)
     48 #include <stddef.h>
     49 #include <windows.h>
     50 #include <winnt.h>
     51 #elif defined(__GLIBC__)
     52 extern "C" void* __libc_stack_end;  // NOLINT
     53 #endif
     54 
     55 #if defined(MEMORY_SANITIZER)
     56 #include <sanitizer/msan_interface.h>
     57 #endif
     58 
     59 namespace blink {
     60 
     61 static void* getStackStart()
     62 {
     63 #if defined(__GLIBC__) || OS(ANDROID)
     64     pthread_attr_t attr;
     65     if (!pthread_getattr_np(pthread_self(), &attr)) {
     66         void* base;
     67         size_t size;
     68         int error = pthread_attr_getstack(&attr, &base, &size);
     69         RELEASE_ASSERT(!error);
     70         pthread_attr_destroy(&attr);
     71         return reinterpret_cast<Address>(base) + size;
     72     }
     73 #if defined(__GLIBC__)
     74     // pthread_getattr_np can fail for the main thread. In this case
     75     // just like NaCl we rely on the __libc_stack_end to give us
     76     // the start of the stack.
     77     // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
     78     return __libc_stack_end;
     79 #else
     80     ASSERT_NOT_REACHED();
     81     return 0;
     82 #endif
     83 #elif OS(MACOSX)
     84     return pthread_get_stackaddr_np(pthread_self());
     85 #elif OS(WIN) && COMPILER(MSVC)
     86     // On Windows stack limits for the current thread are available in
     87     // the thread information block (TIB). Its fields can be accessed through
     88     // FS segment register on x86 and GS segment register on x86_64.
     89 #ifdef _WIN64
     90     return reinterpret_cast<void*>(__readgsqword(offsetof(NT_TIB64, StackBase)));
     91 #else
     92     return reinterpret_cast<void*>(__readfsdword(offsetof(NT_TIB, StackBase)));
     93 #endif
     94 #else
     95 #error Unsupported getStackStart on this platform.
     96 #endif
     97 }
     98 
     99 // The maximum number of WrapperPersistentRegions to keep around in the
    100 // m_pooledWrapperPersistentRegions pool.
    101 static const size_t MaxPooledWrapperPersistentRegionCount = 2;
    102 
    103 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0;
    104 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
    105 SafePointBarrier* ThreadState::s_safePointBarrier = 0;
    106 bool ThreadState::s_inGC = false;
    107 
    108 static Mutex& threadAttachMutex()
    109 {
    110     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
    111     return mutex;
    112 }
    113 
    114 static double lockingTimeout()
    115 {
    116     // Wait time for parking all threads is at most 100 MS.
    117     return 0.100;
    118 }
    119 
    120 
    121 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr_t*);
    122 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegistersCallback);
    123 
    124 class SafePointBarrier {
    125 public:
    126     SafePointBarrier() : m_canResume(1), m_unparkedThreadCount(0) { }
    127     ~SafePointBarrier() { }
    128 
    129     // Request other attached threads that are not at safe points to park themselves on safepoints.
    130     bool parkOthers()
    131     {
    132         ASSERT(ThreadState::current()->isAtSafePoint());
    133 
    134         // Lock threadAttachMutex() to prevent threads from attaching.
    135         threadAttachMutex().lock();
    136 
    137         ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
    138 
    139         MutexLocker locker(m_mutex);
    140         atomicAdd(&m_unparkedThreadCount, threads.size());
    141         releaseStore(&m_canResume, 0);
    142 
    143         ThreadState* current = ThreadState::current();
    144         for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
    145             if (*it == current)
    146                 continue;
    147 
    148             const Vector<ThreadState::Interruptor*>& interruptors = (*it)->interruptors();
    149             for (size_t i = 0; i < interruptors.size(); i++)
    150                 interruptors[i]->requestInterrupt();
    151         }
    152 
    153         while (acquireLoad(&m_unparkedThreadCount) > 0) {
    154             double expirationTime = currentTime() + lockingTimeout();
    155             if (!m_parked.timedWait(m_mutex, expirationTime)) {
    156                 // One of the other threads did not return to a safepoint within the maximum
    157                 // time we allow for threads to be parked. Abandon the GC and resume the
    158                 // currently parked threads.
    159                 resumeOthers(true);
    160                 return false;
    161             }
    162         }
    163         return true;
    164     }
    165 
    166     void resumeOthers(bool barrierLocked = false)
    167     {
    168         ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
    169         atomicSubtract(&m_unparkedThreadCount, threads.size());
    170         releaseStore(&m_canResume, 1);
    171 
    172         // FIXME: Resumed threads will all contend for m_mutex just to unlock it
    173         // later which is a waste of resources.
    174         if (UNLIKELY(barrierLocked)) {
    175             m_resume.broadcast();
    176         } else {
    177             // FIXME: Resumed threads will all contend for
    178             // m_mutex just to unlock it later which is a waste of
    179             // resources.
    180             MutexLocker locker(m_mutex);
    181             m_resume.broadcast();
    182         }
    183 
    184         ThreadState* current = ThreadState::current();
    185         for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
    186             if (*it == current)
    187                 continue;
    188 
    189             const Vector<ThreadState::Interruptor*>& interruptors = (*it)->interruptors();
    190             for (size_t i = 0; i < interruptors.size(); i++)
    191                 interruptors[i]->clearInterrupt();
    192         }
    193 
    194         threadAttachMutex().unlock();
    195         ASSERT(ThreadState::current()->isAtSafePoint());
    196     }
    197 
    198     void checkAndPark(ThreadState* state, SafePointAwareMutexLocker* locker = 0)
    199     {
    200         ASSERT(!state->isSweepInProgress());
    201         if (!acquireLoad(&m_canResume)) {
    202             // If we are leaving the safepoint from a SafePointAwareMutexLocker
    203             // call out to release the lock before going to sleep. This enables the
    204             // lock to be acquired in the sweep phase, e.g. during weak processing
    205             // or finalization. The SafePointAwareLocker will reenter the safepoint
    206             // and reacquire the lock after leaving this safepoint.
    207             if (locker)
    208                 locker->reset();
    209             pushAllRegisters(this, state, parkAfterPushRegisters);
    210         }
    211     }
    212 
    213     void enterSafePoint(ThreadState* state)
    214     {
    215         ASSERT(!state->isSweepInProgress());
    216         pushAllRegisters(this, state, enterSafePointAfterPushRegisters);
    217     }
    218 
    219     void leaveSafePoint(ThreadState* state, SafePointAwareMutexLocker* locker = 0)
    220     {
    221         if (atomicIncrement(&m_unparkedThreadCount) > 0)
    222             checkAndPark(state, locker);
    223     }
    224 
    225 private:
    226     void doPark(ThreadState* state, intptr_t* stackEnd)
    227     {
    228         state->recordStackEnd(stackEnd);
    229         MutexLocker locker(m_mutex);
    230         if (!atomicDecrement(&m_unparkedThreadCount))
    231             m_parked.signal();
    232         while (!acquireLoad(&m_canResume))
    233             m_resume.wait(m_mutex);
    234         atomicIncrement(&m_unparkedThreadCount);
    235     }
    236 
    237     static void parkAfterPushRegisters(SafePointBarrier* barrier, ThreadState* state, intptr_t* stackEnd)
    238     {
    239         barrier->doPark(state, stackEnd);
    240     }
    241 
    242     void doEnterSafePoint(ThreadState* state, intptr_t* stackEnd)
    243     {
    244         state->recordStackEnd(stackEnd);
    245         state->copyStackUntilSafePointScope();
    246         // m_unparkedThreadCount tracks amount of unparked threads. It is
    247         // positive if and only if we have requested other threads to park
    248         // at safe-points in preparation for GC. The last thread to park
    249         // itself will make the counter hit zero and should notify GC thread
    250         // that it is safe to proceed.
    251         // If no other thread is waiting for other threads to park then
    252         // this counter can be negative: if N threads are at safe-points
    253         // the counter will be -N.
    254         if (!atomicDecrement(&m_unparkedThreadCount)) {
    255             MutexLocker locker(m_mutex);
    256             m_parked.signal(); // Safe point reached.
    257         }
    258     }
    259 
    260     static void enterSafePointAfterPushRegisters(SafePointBarrier* barrier, ThreadState* state, intptr_t* stackEnd)
    261     {
    262         barrier->doEnterSafePoint(state, stackEnd);
    263     }
    264 
    265     volatile int m_canResume;
    266     volatile int m_unparkedThreadCount;
    267     Mutex m_mutex;
    268     ThreadCondition m_parked;
    269     ThreadCondition m_resume;
    270 };
    271 
    272 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadState* state)
    273     : m_storage(storage)
    274     , m_gcInfo(gcInfo)
    275     , m_threadState(state)
    276     , m_terminating(false)
    277     , m_tracedAfterOrphaned(false)
    278 {
    279     ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
    280 }
    281 
    282 // Statically unfold the heap initialization loop so the compiler statically
    283 // knows the heap index when using HeapIndexTrait.
    284 template<int num> struct InitializeHeaps {
    285     static const int index = num - 1;
    286     static void init(BaseHeap** heaps, ThreadState* state)
    287     {
    288         InitializeHeaps<index>::init(heaps, state);
    289         heaps[index] = new typename HeapIndexTrait<index>::HeapType(state, index);
    290     }
    291 };
    292 template<> struct InitializeHeaps<0> {
    293     static void init(BaseHeap** heaps, ThreadState* state) { }
    294 };
    295 
    296 ThreadState::ThreadState()
    297     : m_thread(currentThread())
    298     , m_liveWrapperPersistents(new WrapperPersistentRegion())
    299     , m_pooledWrapperPersistents(0)
    300     , m_pooledWrapperPersistentRegionCount(0)
    301     , m_persistents(adoptPtr(new PersistentAnchor()))
    302     , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
    303     , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
    304     , m_safePointScopeMarker(0)
    305     , m_atSafePoint(false)
    306     , m_interruptors()
    307     , m_gcRequested(false)
    308     , m_forcePreciseGCForTesting(false)
    309     , m_sweepRequested(0)
    310     , m_sweepInProgress(false)
    311     , m_noAllocationCount(0)
    312     , m_inGC(false)
    313     , m_heapContainsCache(adoptPtr(new HeapContainsCache()))
    314     , m_isTerminating(false)
    315     , m_lowCollectionRate(false)
    316     , m_numberOfSweeperTasks(0)
    317 #if defined(ADDRESS_SANITIZER)
    318     , m_asanFakeStack(__asan_get_current_fake_stack())
    319 #endif
    320 {
    321     ASSERT(!**s_threadSpecific);
    322     **s_threadSpecific = this;
    323 
    324     InitializeHeaps<NumberOfHeaps>::init(m_heaps, this);
    325 
    326     m_weakCallbackStack = new CallbackStack();
    327 
    328     if (blink::Platform::current())
    329         m_sweeperThread = adoptPtr(blink::Platform::current()->createThread("Blink GC Sweeper"));
    330 }
    331 
    332 ThreadState::~ThreadState()
    333 {
    334     checkThread();
    335     delete m_weakCallbackStack;
    336     m_weakCallbackStack = 0;
    337     for (int i = 0; i < NumberOfHeaps; i++)
    338         delete m_heaps[i];
    339     deleteAllValues(m_interruptors);
    340     while (m_liveWrapperPersistents) {
    341         WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m_liveWrapperPersistents);
    342         delete region;
    343     }
    344     while (m_pooledWrapperPersistents) {
    345         WrapperPersistentRegion* region = WrapperPersistentRegion::removeHead(&m_pooledWrapperPersistents);
    346         delete region;
    347     }
    348     **s_threadSpecific = 0;
    349 }
    350 
    351 void ThreadState::init()
    352 {
    353     s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>();
    354     s_safePointBarrier = new SafePointBarrier;
    355 }
    356 
    357 void ThreadState::shutdown()
    358 {
    359     delete s_safePointBarrier;
    360     s_safePointBarrier = 0;
    361 
    362     // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpecific().
    363 }
    364 
    365 void ThreadState::attachMainThread()
    366 {
    367     RELEASE_ASSERT(!Heap::s_shutdownCalled);
    368     MutexLocker locker(threadAttachMutex());
    369     ThreadState* state = new(s_mainThreadStateStorage) ThreadState();
    370     attachedThreads().add(state);
    371 }
    372 
    373 void ThreadState::detachMainThread()
    374 {
    375     // Enter a safe point before trying to acquire threadAttachMutex
    376     // to avoid dead lock if another thread is preparing for GC, has acquired
    377     // threadAttachMutex and waiting for other threads to pause or reach a
    378     // safepoint.
    379     ThreadState* state = mainThreadState();
    380 
    381     {
    382         SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnStack);
    383 
    384         // First add the main thread's heap pages to the orphaned pool.
    385         state->cleanupPages();
    386 
    387         // Second detach thread.
    388         ASSERT(attachedThreads().contains(state));
    389         attachedThreads().remove(state);
    390         state->~ThreadState();
    391     }
    392     shutdownHeapIfNecessary();
    393 }
    394 
    395 void ThreadState::shutdownHeapIfNecessary()
    396 {
    397     // We don't need to enter a safe point before acquiring threadAttachMutex
    398     // because this thread is already detached.
    399 
    400     MutexLocker locker(threadAttachMutex());
    401     // We start shutting down the heap if there is no running thread
    402     // and Heap::shutdown() is already called.
    403     if (!attachedThreads().size() && Heap::s_shutdownCalled)
    404         Heap::doShutdown();
    405 }
    406 
    407 void ThreadState::attach()
    408 {
    409     RELEASE_ASSERT(!Heap::s_shutdownCalled);
    410     MutexLocker locker(threadAttachMutex());
    411     ThreadState* state = new ThreadState();
    412     attachedThreads().add(state);
    413 }
    414 
    415 void ThreadState::cleanupPages()
    416 {
    417     for (int i = 0; i < NumberOfHeaps; ++i)
    418         m_heaps[i]->cleanupPages();
    419 }
    420 
    421 void ThreadState::cleanup()
    422 {
    423     for (size_t i = 0; i < m_cleanupTasks.size(); i++)
    424         m_cleanupTasks[i]->preCleanup();
    425 
    426     {
    427         // Grab the threadAttachMutex to ensure only one thread can shutdown at
    428         // a time and that no other thread can do a global GC. It also allows
    429         // safe iteration of the attachedThreads set which happens as part of
    430         // thread local GC asserts. We enter a safepoint while waiting for the
    431         // lock to avoid a dead-lock where another thread has already requested
    432         // GC.
    433         SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnStack);
    434 
    435         // From here on ignore all conservatively discovered
    436         // pointers into the heap owned by this thread.
    437         m_isTerminating = true;
    438 
    439         // Set the terminate flag on all heap pages of this thread. This is used to
    440         // ensure we don't trace pages on other threads that are not part of the
    441         // thread local GC.
    442         setupHeapsForTermination();
    443 
    444         // Do thread local GC's as long as the count of thread local Persistents
    445         // changes and is above zero.
    446         PersistentAnchor* anchor = static_cast<PersistentAnchor*>(m_persistents.get());
    447         int oldCount = -1;
    448         int currentCount = anchor->numberOfPersistents();
    449         ASSERT(currentCount >= 0);
    450         while (currentCount != oldCount) {
    451             Heap::collectGarbageForTerminatingThread(this);
    452             oldCount = currentCount;
    453             currentCount = anchor->numberOfPersistents();
    454         }
    455         // We should not have any persistents left when getting to this point,
    456         // if we have it is probably a bug so adding a debug ASSERT to catch this.
    457         ASSERT(!currentCount);
    458 
    459         // Add pages to the orphaned page pool to ensure any global GCs from this point
    460         // on will not trace objects on this thread's heaps.
    461         cleanupPages();
    462 
    463         ASSERT(attachedThreads().contains(this));
    464         attachedThreads().remove(this);
    465     }
    466 
    467     for (size_t i = 0; i < m_cleanupTasks.size(); i++)
    468         m_cleanupTasks[i]->postCleanup();
    469     m_cleanupTasks.clear();
    470 }
    471 
    472 
    473 void ThreadState::detach()
    474 {
    475     ThreadState* state = current();
    476     state->cleanup();
    477     delete state;
    478     shutdownHeapIfNecessary();
    479 }
    480 
    481 void ThreadState::visitPersistentRoots(Visitor* visitor)
    482 {
    483     TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots");
    484     {
    485         // All threads are at safepoints so this is not strictly necessary.
    486         // However we acquire the mutex to make mutation and traversal of this
    487         // list symmetrical.
    488         MutexLocker locker(globalRootsMutex());
    489         globalRoots()->trace(visitor);
    490     }
    491 
    492     AttachedThreadStateSet& threads = attachedThreads();
    493     for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
    494         (*it)->visitPersistents(visitor);
    495 }
    496 
    497 void ThreadState::visitStackRoots(Visitor* visitor)
    498 {
    499     TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots");
    500     AttachedThreadStateSet& threads = attachedThreads();
    501     for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
    502         (*it)->visitStack(visitor);
    503 }
    504 
    505 NO_SANITIZE_ADDRESS
    506 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr)
    507 {
    508 #if defined(ADDRESS_SANITIZER)
    509     Address* start = reinterpret_cast<Address*>(m_startOfStack);
    510     Address* end = reinterpret_cast<Address*>(m_endOfStack);
    511     Address* fakeFrameStart = 0;
    512     Address* fakeFrameEnd = 0;
    513     Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr);
    514     Address* realFrameForFakeFrame =
    515         reinterpret_cast<Address*>(
    516             __asan_addr_is_in_fake_stack(
    517                 m_asanFakeStack, maybeFakeFrame,
    518                 reinterpret_cast<void**>(&fakeFrameStart),
    519                 reinterpret_cast<void**>(&fakeFrameEnd)));
    520     if (realFrameForFakeFrame) {
    521         // This is a fake frame from the asan fake stack.
    522         if (realFrameForFakeFrame > end && start > realFrameForFakeFrame) {
    523             // The real stack address for the asan fake frame is
    524             // within the stack range that we need to scan so we need
    525             // to visit the values in the fake frame.
    526             for (Address* p = fakeFrameStart; p < fakeFrameEnd; p++)
    527                 Heap::checkAndMarkPointer(visitor, *p);
    528         }
    529     }
    530 #endif
    531 }
    532 
    533 NO_SANITIZE_ADDRESS
    534 void ThreadState::visitStack(Visitor* visitor)
    535 {
    536     if (m_stackState == NoHeapPointersOnStack)
    537         return;
    538 
    539     Address* start = reinterpret_cast<Address*>(m_startOfStack);
    540     // If there is a safepoint scope marker we should stop the stack
    541     // scanning there to not touch active parts of the stack. Anything
    542     // interesting beyond that point is in the safepoint stack copy.
    543     // If there is no scope marker the thread is blocked and we should
    544     // scan all the way to the recorded end stack pointer.
    545     Address* end = reinterpret_cast<Address*>(m_endOfStack);
    546     Address* safePointScopeMarker = reinterpret_cast<Address*>(m_safePointScopeMarker);
    547     Address* current = safePointScopeMarker ? safePointScopeMarker : end;
    548 
    549     // Ensure that current is aligned by address size otherwise the loop below
    550     // will read past start address.
    551     current = reinterpret_cast<Address*>(reinterpret_cast<intptr_t>(current) & ~(sizeof(Address) - 1));
    552 
    553     for (; current < start; ++current) {
    554         Address ptr = *current;
    555 #if defined(MEMORY_SANITIZER)
    556         // |ptr| may be uninitialized by design. Mark it as initialized to keep
    557         // MSan from complaining.
    558         // Note: it may be tempting to get rid of |ptr| and simply use |current|
    559         // here, but that would be incorrect. We intentionally use a local
    560         // variable because we don't want to unpoison the original stack.
    561         __msan_unpoison(&ptr, sizeof(ptr));
    562 #endif
    563         Heap::checkAndMarkPointer(visitor, ptr);
    564         visitAsanFakeStackForPointer(visitor, ptr);
    565     }
    566 
    567     for (Vector<Address>::iterator it = m_safePointStackCopy.begin(); it != m_safePointStackCopy.end(); ++it) {
    568         Address ptr = *it;
    569 #if defined(MEMORY_SANITIZER)
    570         // See the comment above.
    571         __msan_unpoison(&ptr, sizeof(ptr));
    572 #endif
    573         Heap::checkAndMarkPointer(visitor, ptr);
    574         visitAsanFakeStackForPointer(visitor, ptr);
    575     }
    576 }
    577 
    578 void ThreadState::visitPersistents(Visitor* visitor)
    579 {
    580     m_persistents->trace(visitor);
    581     WrapperPersistentRegion::trace(m_liveWrapperPersistents, visitor);
    582 }
    583 
    584 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address)
    585 {
    586     // If thread is terminating ignore conservative pointers.
    587     if (m_isTerminating)
    588         return false;
    589 
    590     // This checks for normal pages and for large objects which span the extent
    591     // of several normal pages.
    592     BaseHeapPage* page = heapPageFromAddress(address);
    593     if (page) {
    594         page->checkAndMarkPointer(visitor, address);
    595         // Whether or not the pointer was within an object it was certainly
    596         // within a page that is part of the heap, so we don't want to ask the
    597         // other other heaps or put this address in the
    598         // HeapDoesNotContainCache.
    599         return true;
    600     }
    601 
    602     return false;
    603 }
    604 
    605 #if ENABLE(GC_PROFILE_MARKING)
    606 const GCInfo* ThreadState::findGCInfo(Address address)
    607 {
    608     BaseHeapPage* page = heapPageFromAddress(address);
    609     if (page) {
    610         return page->findGCInfo(address);
    611     }
    612     return 0;
    613 }
    614 #endif
    615 
    616 #if ENABLE(GC_PROFILE_HEAP)
    617 size_t ThreadState::SnapshotInfo::getClassTag(const GCInfo* gcinfo)
    618 {
    619     HashMap<const GCInfo*, size_t>::AddResult result = classTags.add(gcinfo, classTags.size());
    620     if (result.isNewEntry) {
    621         liveCount.append(0);
    622         deadCount.append(0);
    623         liveSize.append(0);
    624         deadSize.append(0);
    625         generations.append(Vector<int, 8>());
    626         generations.last().fill(0, 8);
    627     }
    628     return result.storedValue->value;
    629 }
    630 
    631 void ThreadState::snapshot()
    632 {
    633     SnapshotInfo info(this);
    634     RefPtr<TracedValue> json = TracedValue::create();
    635 
    636 #define SNAPSHOT_HEAP(HeapType)                                           \
    637     {                                                                     \
    638         json->beginDictionary();                                          \
    639         json->setString("name", #HeapType);                               \
    640         m_heaps[HeapType##Heap]->snapshot(json.get(), &info);             \
    641         json->endDictionary();                                            \
    642         json->beginDictionary();                                          \
    643         json->setString("name", #HeapType"NonFinalized");                 \
    644         m_heaps[HeapType##HeapNonFinalized]->snapshot(json.get(), &info); \
    645         json->endDictionary();                                            \
    646     }
    647     json->beginArray("heaps");
    648     SNAPSHOT_HEAP(General);
    649     SNAPSHOT_HEAP(CollectionBacking);
    650     FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP);
    651     json->endArray();
    652 #undef SNAPSHOT_HEAP
    653 
    654     json->setInteger("allocatedSpace", m_stats.totalAllocatedSpace());
    655     json->setInteger("objectSpace", m_stats.totalObjectSpace());
    656     json->setInteger("pageCount", info.pageCount);
    657     json->setInteger("freeSize", info.freeSize);
    658 
    659     Vector<String> classNameVector(info.classTags.size());
    660     for (HashMap<const GCInfo*, size_t>::iterator it = info.classTags.begin(); it != info.classTags.end(); ++it)
    661         classNameVector[it->value] = it->key->m_className;
    662 
    663     size_t liveSize = 0;
    664     size_t deadSize = 0;
    665     json->beginArray("classes");
    666     for (size_t i = 0; i < classNameVector.size(); ++i) {
    667         json->beginDictionary();
    668         json->setString("name", classNameVector[i]);
    669         json->setInteger("liveCount", info.liveCount[i]);
    670         json->setInteger("deadCount", info.deadCount[i]);
    671         json->setInteger("liveSize", info.liveSize[i]);
    672         json->setInteger("deadSize", info.deadSize[i]);
    673         liveSize += info.liveSize[i];
    674         deadSize += info.deadSize[i];
    675 
    676         json->beginArray("generations");
    677         for (size_t j = 0; j < heapObjectGenerations; ++j)
    678             json->pushInteger(info.generations[i][j]);
    679         json->endArray();
    680         json->endDictionary();
    681     }
    682     json->endArray();
    683     json->setInteger("liveSize", liveSize);
    684     json->setInteger("deadSize", deadSize);
    685 
    686     TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "ThreadState", this, json.release());
    687 }
    688 #endif
    689 
    690 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallback callback)
    691 {
    692     CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry();
    693     *slot = CallbackStack::Item(object, callback);
    694 }
    695 
    696 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor)
    697 {
    698     // For weak processing we should never reach orphaned pages since orphaned
    699     // pages are not traced and thus objects on those pages are never be
    700     // registered as objects on orphaned pages. We cannot assert this here since
    701     // we might have an off-heap collection. We assert it in
    702     // Heap::pushWeakObjectPointerCallback.
    703     if (CallbackStack::Item* item = m_weakCallbackStack->pop()) {
    704         item->call(visitor);
    705         return true;
    706     }
    707     return false;
    708 }
    709 
    710 WrapperPersistentRegion* ThreadState::takeWrapperPersistentRegion()
    711 {
    712     WrapperPersistentRegion* region;
    713     if (m_pooledWrapperPersistentRegionCount) {
    714         region = WrapperPersistentRegion::removeHead(&m_pooledWrapperPersistents);
    715         m_pooledWrapperPersistentRegionCount--;
    716     } else {
    717         region = new WrapperPersistentRegion();
    718     }
    719     ASSERT(region);
    720     WrapperPersistentRegion::insertHead(&m_liveWrapperPersistents, region);
    721     return region;
    722 }
    723 
    724 void ThreadState::freeWrapperPersistentRegion(WrapperPersistentRegion* region)
    725 {
    726     if (!region->removeIfNotLast(&m_liveWrapperPersistents))
    727         return;
    728 
    729     // Region was removed, ie. it was not the last region in the list.
    730     if (m_pooledWrapperPersistentRegionCount < MaxPooledWrapperPersistentRegionCount) {
    731         WrapperPersistentRegion::insertHead(&m_pooledWrapperPersistents, region);
    732         m_pooledWrapperPersistentRegionCount++;
    733     } else {
    734         delete region;
    735     }
    736 }
    737 
    738 PersistentNode* ThreadState::globalRoots()
    739 {
    740     AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor);
    741     return anchor;
    742 }
    743 
    744 Mutex& ThreadState::globalRootsMutex()
    745 {
    746     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
    747     return mutex;
    748 }
    749 
    750 // Trigger garbage collection on a 50% increase in size, but not for
    751 // less than 512kbytes.
    752 bool ThreadState::increasedEnoughToGC(size_t newSize, size_t oldSize)
    753 {
    754     if (newSize < 1 << 19)
    755         return false;
    756     size_t limit = oldSize + (oldSize >> 1);
    757     return newSize > limit;
    758 }
    759 
    760 // FIXME: The heuristics are local for a thread at this
    761 // point. Consider using heuristics that take memory for all threads
    762 // into account.
    763 bool ThreadState::shouldGC()
    764 {
    765     // Do not GC during sweeping. We allow allocation during
    766     // finalization, but those allocations are not allowed
    767     // to lead to nested garbage collections.
    768     return !m_sweepInProgress && increasedEnoughToGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
    769 }
    770 
    771 // Trigger conservative garbage collection on a 100% increase in size,
    772 // but not for less than 4Mbytes. If the system currently has a low
    773 // collection rate, then require a 300% increase in size.
    774 bool ThreadState::increasedEnoughToForceConservativeGC(size_t newSize, size_t oldSize)
    775 {
    776     if (newSize < 1 << 22)
    777         return false;
    778     size_t limit = (m_lowCollectionRate ? 4 : 2) * oldSize;
    779     return newSize > limit;
    780 }
    781 
    782 // FIXME: The heuristics are local for a thread at this
    783 // point. Consider using heuristics that take memory for all threads
    784 // into account.
    785 bool ThreadState::shouldForceConservativeGC()
    786 {
    787     // Do not GC during sweeping. We allow allocation during
    788     // finalization, but those allocations are not allowed
    789     // to lead to nested garbage collections.
    790     return !m_sweepInProgress && increasedEnoughToForceConservativeGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
    791 }
    792 
    793 bool ThreadState::sweepRequested()
    794 {
    795     ASSERT(isAnyThreadInGC() || checkThread());
    796     return m_sweepRequested;
    797 }
    798 
    799 void ThreadState::setSweepRequested()
    800 {
    801     // Sweep requested is set from the thread that initiates garbage
    802     // collection which could be different from the thread for this
    803     // thread state. Therefore the setting of m_sweepRequested needs a
    804     // barrier.
    805     atomicTestAndSetToOne(&m_sweepRequested);
    806 }
    807 
    808 void ThreadState::clearSweepRequested()
    809 {
    810     checkThread();
    811     m_sweepRequested = 0;
    812 }
    813 
    814 bool ThreadState::gcRequested()
    815 {
    816     checkThread();
    817     return m_gcRequested;
    818 }
    819 
    820 void ThreadState::setGCRequested()
    821 {
    822     checkThread();
    823     m_gcRequested = true;
    824 }
    825 
    826 void ThreadState::clearGCRequested()
    827 {
    828     checkThread();
    829     m_gcRequested = false;
    830 }
    831 
    832 void ThreadState::performPendingGC(StackState stackState)
    833 {
    834     if (stackState == NoHeapPointersOnStack) {
    835         if (forcePreciseGCForTesting()) {
    836             setForcePreciseGCForTesting(false);
    837             Heap::collectAllGarbage();
    838         } else if (gcRequested()) {
    839             Heap::collectGarbage(NoHeapPointersOnStack);
    840         }
    841     }
    842 }
    843 
    844 void ThreadState::setForcePreciseGCForTesting(bool value)
    845 {
    846     checkThread();
    847     m_forcePreciseGCForTesting = value;
    848 }
    849 
    850 bool ThreadState::forcePreciseGCForTesting()
    851 {
    852     checkThread();
    853     return m_forcePreciseGCForTesting;
    854 }
    855 
    856 void ThreadState::makeConsistentForSweeping()
    857 {
    858     for (int i = 0; i < NumberOfHeaps; i++)
    859         m_heaps[i]->makeConsistentForSweeping();
    860 }
    861 
    862 #if ENABLE(ASSERT)
    863 bool ThreadState::isConsistentForSweeping()
    864 {
    865     for (int i = 0; i < NumberOfHeaps; i++) {
    866         if (!m_heaps[i]->isConsistentForSweeping())
    867             return false;
    868     }
    869     return true;
    870 }
    871 #endif
    872 
    873 void ThreadState::prepareForGC()
    874 {
    875     for (int i = 0; i < NumberOfHeaps; i++) {
    876         BaseHeap* heap = m_heaps[i];
    877         heap->makeConsistentForSweeping();
    878         // If a new GC is requested before this thread got around to sweep, ie. due to the
    879         // thread doing a long running operation, we clear the mark bits and mark any of
    880         // the dead objects as dead. The latter is used to ensure the next GC marking does
    881         // not trace already dead objects. If we trace a dead object we could end up tracing
    882         // into garbage or the middle of another object via the newly conservatively found
    883         // object.
    884         if (sweepRequested())
    885             heap->clearLiveAndMarkDead();
    886     }
    887     setSweepRequested();
    888 }
    889 
    890 void ThreadState::setupHeapsForTermination()
    891 {
    892     for (int i = 0; i < NumberOfHeaps; i++)
    893         m_heaps[i]->prepareHeapForTermination();
    894 }
    895 
    896 BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
    897 {
    898     BaseHeapPage* cachedPage = heapContainsCache()->lookup(address);
    899 #if !ENABLE(ASSERT)
    900     if (cachedPage)
    901         return cachedPage;
    902 #endif
    903 
    904     for (int i = 0; i < NumberOfHeaps; i++) {
    905         BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address);
    906         if (page) {
    907             // Asserts that make sure heapPageFromAddress takes addresses from
    908             // the whole aligned blinkPageSize memory area. This is necessary
    909             // for the negative cache to work.
    910             ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFromAddress(roundToBlinkPageStart(address)));
    911             if (roundToBlinkPageStart(address) != roundToBlinkPageEnd(address))
    912                 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFromAddress(roundToBlinkPageEnd(address) - 1));
    913             ASSERT(!cachedPage || page == cachedPage);
    914             if (!cachedPage)
    915                 heapContainsCache()->addEntry(address, page);
    916             return page;
    917         }
    918     }
    919     ASSERT(!cachedPage);
    920     return 0;
    921 }
    922 
    923 void ThreadState::getStats(HeapStats& stats)
    924 {
    925     stats = m_stats;
    926 #if ENABLE(ASSERT)
    927     if (isConsistentForSweeping()) {
    928         HeapStats scannedStats;
    929         for (int i = 0; i < NumberOfHeaps; i++)
    930             m_heaps[i]->getScannedStats(scannedStats);
    931         ASSERT(scannedStats == stats);
    932     }
    933 #endif
    934 }
    935 
    936 bool ThreadState::stopThreads()
    937 {
    938     return s_safePointBarrier->parkOthers();
    939 }
    940 
    941 void ThreadState::resumeThreads()
    942 {
    943     s_safePointBarrier->resumeOthers();
    944 }
    945 
    946 void ThreadState::safePoint(StackState stackState)
    947 {
    948     checkThread();
    949     performPendingGC(stackState);
    950     ASSERT(!m_atSafePoint);
    951     m_stackState = stackState;
    952     m_atSafePoint = true;
    953     s_safePointBarrier->checkAndPark(this);
    954     m_atSafePoint = false;
    955     m_stackState = HeapPointersOnStack;
    956     performPendingSweep();
    957 }
    958 
    959 #ifdef ADDRESS_SANITIZER
    960 // When we are running under AddressSanitizer with detect_stack_use_after_return=1
    961 // then stack marker obtained from SafePointScope will point into a fake stack.
    962 // Detect this case by checking if it falls in between current stack frame
    963 // and stack start and use an arbitrary high enough value for it.
    964 // Don't adjust stack marker in any other case to match behavior of code running
    965 // without AddressSanitizer.
    966 NO_SANITIZE_ADDRESS static void* adjustScopeMarkerForAdressSanitizer(void* scopeMarker)
    967 {
    968     Address start = reinterpret_cast<Address>(getStackStart());
    969     Address end = reinterpret_cast<Address>(&start);
    970     RELEASE_ASSERT(end < start);
    971 
    972     if (end <= scopeMarker && scopeMarker < start)
    973         return scopeMarker;
    974 
    975     // 256 is as good an approximation as any else.
    976     const size_t bytesToCopy = sizeof(Address) * 256;
    977     if (static_cast<size_t>(start - end) < bytesToCopy)
    978         return start;
    979 
    980     return end + bytesToCopy;
    981 }
    982 #endif
    983 
    984 void ThreadState::enterSafePoint(StackState stackState, void* scopeMarker)
    985 {
    986 #ifdef ADDRESS_SANITIZER
    987     if (stackState == HeapPointersOnStack)
    988         scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker);
    989 #endif
    990     ASSERT(stackState == NoHeapPointersOnStack || scopeMarker);
    991     performPendingGC(stackState);
    992     checkThread();
    993     ASSERT(!m_atSafePoint);
    994     m_atSafePoint = true;
    995     m_stackState = stackState;
    996     m_safePointScopeMarker = scopeMarker;
    997     s_safePointBarrier->enterSafePoint(this);
    998 }
    999 
   1000 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker)
   1001 {
   1002     checkThread();
   1003     ASSERT(m_atSafePoint);
   1004     s_safePointBarrier->leaveSafePoint(this, locker);
   1005     m_atSafePoint = false;
   1006     m_stackState = HeapPointersOnStack;
   1007     clearSafePointScopeMarker();
   1008     performPendingSweep();
   1009 }
   1010 
   1011 void ThreadState::copyStackUntilSafePointScope()
   1012 {
   1013     if (!m_safePointScopeMarker || m_stackState == NoHeapPointersOnStack)
   1014         return;
   1015 
   1016     Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker);
   1017     Address* from = reinterpret_cast<Address*>(m_endOfStack);
   1018     RELEASE_ASSERT(from < to);
   1019     RELEASE_ASSERT(to <= reinterpret_cast<Address*>(m_startOfStack));
   1020     size_t slotCount = static_cast<size_t>(to - from);
   1021     // Catch potential performance issues.
   1022 #if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
   1023     // ASan/LSan use more space on the stack and we therefore
   1024     // increase the allowed stack copying for those builds.
   1025     ASSERT(slotCount < 2048);
   1026 #else
   1027     ASSERT(slotCount < 1024);
   1028 #endif
   1029 
   1030     ASSERT(!m_safePointStackCopy.size());
   1031     m_safePointStackCopy.resize(slotCount);
   1032     for (size_t i = 0; i < slotCount; ++i) {
   1033         m_safePointStackCopy[i] = from[i];
   1034     }
   1035 }
   1036 
   1037 void ThreadState::registerSweepingTask()
   1038 {
   1039     MutexLocker locker(m_sweepMutex);
   1040     ++m_numberOfSweeperTasks;
   1041 }
   1042 
   1043 void ThreadState::unregisterSweepingTask()
   1044 {
   1045     MutexLocker locker(m_sweepMutex);
   1046     ASSERT(m_numberOfSweeperTasks > 0);
   1047     if (!--m_numberOfSweeperTasks)
   1048         m_sweepThreadCondition.signal();
   1049 }
   1050 
   1051 void ThreadState::waitUntilSweepersDone()
   1052 {
   1053     MutexLocker locker(m_sweepMutex);
   1054     while (m_numberOfSweeperTasks > 0)
   1055         m_sweepThreadCondition.wait(m_sweepMutex);
   1056 }
   1057 
   1058 
   1059 class SweepNonFinalizedHeapTask FINAL : public WebThread::Task {
   1060 public:
   1061     SweepNonFinalizedHeapTask(ThreadState* state, BaseHeap* heap, HeapStats* stats)
   1062         : m_threadState(state)
   1063         , m_heap(heap)
   1064         , m_stats(stats)
   1065     {
   1066         m_threadState->registerSweepingTask();
   1067     }
   1068 
   1069     virtual ~SweepNonFinalizedHeapTask()
   1070     {
   1071         m_threadState->unregisterSweepingTask();
   1072     }
   1073 
   1074     virtual void run() { m_heap->sweep(m_stats); }
   1075 
   1076 private:
   1077     ThreadState* m_threadState;
   1078     BaseHeap* m_heap;
   1079     HeapStats* m_stats;
   1080 };
   1081 
   1082 void ThreadState::performPendingSweep()
   1083 {
   1084     if (!sweepRequested())
   1085         return;
   1086 
   1087 #if ENABLE(GC_PROFILE_HEAP)
   1088     // We snapshot the heap prior to sweeping to get numbers for both resources
   1089     // that have been allocated since the last GC and for resources that are
   1090     // going to be freed.
   1091     bool gcTracingEnabled;
   1092     TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled);
   1093     if (gcTracingEnabled && m_stats.totalObjectSpace() > 0)
   1094         snapshot();
   1095 #endif
   1096 
   1097     TRACE_EVENT0("blink_gc", "ThreadState::performPendingSweep");
   1098 
   1099     double timeStamp = WTF::currentTimeMS();
   1100     const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
   1101     if (isMainThread()) {
   1102         ScriptForbiddenScope::enter();
   1103         TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping");
   1104     }
   1105 
   1106     size_t objectSpaceBeforeSweep = m_stats.totalObjectSpace();
   1107     {
   1108         NoSweepScope scope(this);
   1109 
   1110         // Disallow allocation during weak processing.
   1111         enterNoAllocationScope();
   1112         {
   1113             TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing");
   1114             // Perform thread-specific weak processing.
   1115             while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { }
   1116         }
   1117         leaveNoAllocationScope();
   1118 
   1119         // Perform sweeping and finalization.
   1120 
   1121         // Sweeping will recalculate the stats
   1122         m_stats.clear();
   1123 
   1124         // Sweep the non-finalized heap pages on multiple threads.
   1125         // Attempt to load-balance by having the sweeper thread sweep as
   1126         // close to half of the pages as possible.
   1127         int nonFinalizedPages = 0;
   1128         for (int i = 0; i < NumberOfNonFinalizedHeaps; i++)
   1129             nonFinalizedPages += m_heaps[FirstNonFinalizedHeap + i]->normalPageCount();
   1130 
   1131         int finalizedPages = 0;
   1132         for (int i = 0; i < NumberOfFinalizedHeaps; i++)
   1133             finalizedPages += m_heaps[FirstFinalizedHeap + i]->normalPageCount();
   1134 
   1135         int pagesToSweepInParallel = nonFinalizedPages < finalizedPages ? nonFinalizedPages : ((nonFinalizedPages + finalizedPages) / 2);
   1136 
   1137         // Start the sweeper thread for the non finalized heaps. No
   1138         // finalizers need to run and therefore the pages can be
   1139         // swept on other threads.
   1140         static const int minNumberOfPagesForParallelSweep = 10;
   1141         HeapStats heapStatsVector[NumberOfNonFinalizedHeaps];
   1142         BaseHeap* splitOffHeaps[NumberOfNonFinalizedHeaps] = { 0 };
   1143         for (int i = 0; i < NumberOfNonFinalizedHeaps && pagesToSweepInParallel > 0; i++) {
   1144             BaseHeap* heap = m_heaps[FirstNonFinalizedHeap + i];
   1145             int pageCount = heap->normalPageCount();
   1146             // Only use the sweeper thread if it exists and there are
   1147             // pages to sweep.
   1148             if (m_sweeperThread && pageCount > minNumberOfPagesForParallelSweep) {
   1149                 // Create a new thread heap instance to make sure that the
   1150                 // state modified while sweeping is separate for the
   1151                 // sweeper thread and the owner thread.
   1152                 int pagesToSplitOff = std::min(pageCount, pagesToSweepInParallel);
   1153                 pagesToSweepInParallel -= pagesToSplitOff;
   1154                 BaseHeap* splitOff = heap->split(pagesToSplitOff);
   1155                 splitOffHeaps[i] = splitOff;
   1156                 HeapStats* stats = &heapStatsVector[i];
   1157                 m_sweeperThread->postTask(new SweepNonFinalizedHeapTask(this, splitOff, stats));
   1158             }
   1159         }
   1160 
   1161         {
   1162             // Sweep the remainder of the non-finalized pages (or all of them
   1163             // if there is no sweeper thread).
   1164             TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps");
   1165             for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) {
   1166                 HeapStats stats;
   1167                 m_heaps[FirstNonFinalizedHeap + i]->sweep(&stats);
   1168                 m_stats.add(&stats);
   1169             }
   1170         }
   1171 
   1172         {
   1173             // Sweep the finalized pages.
   1174             TRACE_EVENT0("blink_gc", "ThreadState::sweepFinalizedHeaps");
   1175             for (int i = 0; i < NumberOfFinalizedHeaps; i++) {
   1176                 HeapStats stats;
   1177                 m_heaps[FirstFinalizedHeap + i]->sweep(&stats);
   1178                 m_stats.add(&stats);
   1179             }
   1180         }
   1181 
   1182         // Wait for the sweeper threads and update the heap stats with the
   1183         // stats for the heap portions swept by those threads.
   1184         waitUntilSweepersDone();
   1185         for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) {
   1186             m_stats.add(&heapStatsVector[i]);
   1187             if (BaseHeap* splitOff = splitOffHeaps[i])
   1188                 m_heaps[FirstNonFinalizedHeap + i]->merge(splitOff);
   1189         }
   1190 
   1191         for (int i = 0; i < NumberOfHeaps; i++)
   1192             m_heaps[i]->postSweepProcessing();
   1193 
   1194         getStats(m_statsAfterLastGC);
   1195 
   1196     } // End NoSweepScope
   1197     clearGCRequested();
   1198     clearSweepRequested();
   1199     // If we collected less than 50% of objects, record that the
   1200     // collection rate is low which we use to determine when to
   1201     // perform the next GC.
   1202     setLowCollectionRate(m_stats.totalObjectSpace() > (objectSpaceBeforeSweep >> 1));
   1203 
   1204     if (blink::Platform::current()) {
   1205         blink::Platform::current()->histogramCustomCounts("BlinkGC.PerformPendingSweep", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
   1206     }
   1207 
   1208     if (isMainThread()) {
   1209         TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
   1210         ScriptForbiddenScope::exit();
   1211     }
   1212 }
   1213 
   1214 void ThreadState::addInterruptor(Interruptor* interruptor)
   1215 {
   1216     SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting);
   1217 
   1218     {
   1219         MutexLocker locker(threadAttachMutex());
   1220         m_interruptors.append(interruptor);
   1221     }
   1222 }
   1223 
   1224 void ThreadState::removeInterruptor(Interruptor* interruptor)
   1225 {
   1226     SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting);
   1227 
   1228     {
   1229         MutexLocker locker(threadAttachMutex());
   1230         size_t index = m_interruptors.find(interruptor);
   1231         RELEASE_ASSERT(index >= 0);
   1232         m_interruptors.remove(index);
   1233     }
   1234 }
   1235 
   1236 void ThreadState::Interruptor::onInterrupted()
   1237 {
   1238     ThreadState* state = ThreadState::current();
   1239     ASSERT(state);
   1240     ASSERT(!state->isAtSafePoint());
   1241     state->safePoint(HeapPointersOnStack);
   1242 }
   1243 
   1244 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads()
   1245 {
   1246     DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ());
   1247     return threads;
   1248 }
   1249 
   1250 #if ENABLE(GC_PROFILE_MARKING)
   1251 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address)
   1252 {
   1253     bool needLockForIteration = !isAnyThreadInGC();
   1254     if (needLockForIteration)
   1255         threadAttachMutex().lock();
   1256 
   1257     ThreadState::AttachedThreadStateSet& threads = attachedThreads();
   1258     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
   1259         if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) {
   1260             if (needLockForIteration)
   1261                 threadAttachMutex().unlock();
   1262             return gcInfo;
   1263         }
   1264     }
   1265     if (needLockForIteration)
   1266         threadAttachMutex().unlock();
   1267     return 0;
   1268 }
   1269 #endif
   1270 
   1271 }
   1272