Home | History | Annotate | Download | only in heap
      1 /*
      2  * Copyright (C) 2013 Google Inc. All rights reserved.
      3  *
      4  * Redistribution and use in source and binary forms, with or without
      5  * modification, are permitted provided that the following conditions are
      6  * met:
      7  *
      8  *     * Redistributions of source code must retain the above copyright
      9  * notice, this list of conditions and the following disclaimer.
     10  *     * Redistributions in binary form must reproduce the above
     11  * copyright notice, this list of conditions and the following disclaimer
     12  * in the documentation and/or other materials provided with the
     13  * distribution.
     14  *     * Neither the name of Google Inc. nor the names of its
     15  * contributors may be used to endorse or promote products derived from
     16  * this software without specific prior written permission.
     17  *
     18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
     19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
     20  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
     21  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
     22  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
     23  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
     24  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
     25  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
     26  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
     27  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
     28  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
     29  */
     30 
     31 #include "config.h"
     32 #include "platform/heap/ThreadState.h"
     33 
     34 #include "platform/TraceEvent.h"
     35 #include "platform/heap/AddressSanitizer.h"
     36 #include "platform/heap/Handle.h"
     37 #include "platform/heap/Heap.h"
     38 #include "public/platform/Platform.h"
     39 #include "wtf/ThreadingPrimitives.h"
     40 
     41 #if OS(WIN)
     42 #include <stddef.h>
     43 #include <windows.h>
     44 #include <winnt.h>
     45 #elif defined(__GLIBC__)
     46 extern "C" void* __libc_stack_end;  // NOLINT
     47 #endif
     48 
     49 #if defined(MEMORY_SANITIZER)
     50 #include <sanitizer/msan_interface.h>
     51 #endif
     52 
     53 namespace WebCore {
     54 
     55 static void* getStackStart()
     56 {
     57 #if defined(__GLIBC__) || OS(ANDROID)
     58     pthread_attr_t attr;
     59     if (!pthread_getattr_np(pthread_self(), &attr)) {
     60         void* base;
     61         size_t size;
     62         int error = pthread_attr_getstack(&attr, &base, &size);
     63         RELEASE_ASSERT(!error);
     64         pthread_attr_destroy(&attr);
     65         return reinterpret_cast<Address>(base) + size;
     66     }
     67 #if defined(__GLIBC__)
     68     // pthread_getattr_np can fail for the main thread. In this case
     69     // just like NaCl we rely on the __libc_stack_end to give us
     70     // the start of the stack.
     71     // See https://code.google.com/p/nativeclient/issues/detail?id=3431.
     72     return __libc_stack_end;
     73 #else
     74     ASSERT_NOT_REACHED();
     75     return 0;
     76 #endif
     77 #elif OS(MACOSX)
     78     return pthread_get_stackaddr_np(pthread_self());
     79 #elif OS(WIN) && COMPILER(MSVC)
     80     // On Windows stack limits for the current thread are available in
     81     // the thread information block (TIB). Its fields can be accessed through
     82     // FS segment register on x86 and GS segment register on x86_64.
     83 #ifdef _WIN64
     84     return reinterpret_cast<void*>(__readgsqword(offsetof(NT_TIB64, StackBase)));
     85 #else
     86     return reinterpret_cast<void*>(__readfsdword(offsetof(NT_TIB, StackBase)));
     87 #endif
     88 #else
     89 #error Unsupported getStackStart on this platform.
     90 #endif
     91 }
     92 
     93 
     94 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0;
     95 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
     96 SafePointBarrier* ThreadState::s_safePointBarrier = 0;
     97 bool ThreadState::s_inGC = false;
     98 
     99 static Mutex& threadAttachMutex()
    100 {
    101     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
    102     return mutex;
    103 }
    104 
    105 static double lockingTimeout()
    106 {
    107     // Wait time for parking all threads is at most 100 MS.
    108     return 0.100;
    109 }
    110 
    111 
    112 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr_t*);
    113 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegistersCallback);
    114 
    115 class SafePointBarrier {
    116 public:
    117     SafePointBarrier() : m_canResume(1), m_unparkedThreadCount(0) { }
    118     ~SafePointBarrier() { }
    119 
    120     // Request other attached threads that are not at safe points to park themselves on safepoints.
    121     bool parkOthers()
    122     {
    123         ASSERT(ThreadState::current()->isAtSafePoint());
    124 
    125         // Lock threadAttachMutex() to prevent threads from attaching.
    126         threadAttachMutex().lock();
    127 
    128         ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
    129 
    130         MutexLocker locker(m_mutex);
    131         atomicAdd(&m_unparkedThreadCount, threads.size());
    132         releaseStore(&m_canResume, 0);
    133 
    134         ThreadState* current = ThreadState::current();
    135         for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
    136             if (*it == current)
    137                 continue;
    138 
    139             const Vector<ThreadState::Interruptor*>& interruptors = (*it)->interruptors();
    140             for (size_t i = 0; i < interruptors.size(); i++)
    141                 interruptors[i]->requestInterrupt();
    142         }
    143 
    144         while (acquireLoad(&m_unparkedThreadCount) > 0) {
    145             double expirationTime = currentTime() + lockingTimeout();
    146             if (!m_parked.timedWait(m_mutex, expirationTime)) {
    147                 // One of the other threads did not return to a safepoint within the maximum
    148                 // time we allow for threads to be parked. Abandon the GC and resume the
    149                 // currently parked threads.
    150                 resumeOthers(true);
    151                 return false;
    152             }
    153         }
    154         return true;
    155     }
    156 
    157     void resumeOthers(bool barrierLocked = false)
    158     {
    159         ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
    160         atomicSubtract(&m_unparkedThreadCount, threads.size());
    161         releaseStore(&m_canResume, 1);
    162 
    163         // FIXME: Resumed threads will all contend for m_mutex just to unlock it
    164         // later which is a waste of resources.
    165         if (UNLIKELY(barrierLocked)) {
    166             m_resume.broadcast();
    167         } else {
    168             // FIXME: Resumed threads will all contend for
    169             // m_mutex just to unlock it later which is a waste of
    170             // resources.
    171             MutexLocker locker(m_mutex);
    172             m_resume.broadcast();
    173         }
    174 
    175         ThreadState* current = ThreadState::current();
    176         for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
    177             if (*it == current)
    178                 continue;
    179 
    180             const Vector<ThreadState::Interruptor*>& interruptors = (*it)->interruptors();
    181             for (size_t i = 0; i < interruptors.size(); i++)
    182                 interruptors[i]->clearInterrupt();
    183         }
    184 
    185         threadAttachMutex().unlock();
    186         ASSERT(ThreadState::current()->isAtSafePoint());
    187     }
    188 
    189     void checkAndPark(ThreadState* state, SafePointAwareMutexLocker* locker = 0)
    190     {
    191         ASSERT(!state->isSweepInProgress());
    192         if (!acquireLoad(&m_canResume)) {
    193             // If we are leaving the safepoint from a SafePointAwareMutexLocker
    194             // call out to release the lock before going to sleep. This enables the
    195             // lock to be acquired in the sweep phase, e.g. during weak processing
    196             // or finalization. The SafePointAwareLocker will reenter the safepoint
    197             // and reacquire the lock after leaving this safepoint.
    198             if (locker)
    199                 locker->reset();
    200             pushAllRegisters(this, state, parkAfterPushRegisters);
    201             state->performPendingSweep();
    202         }
    203     }
    204 
    205     void enterSafePoint(ThreadState* state)
    206     {
    207         ASSERT(!state->isSweepInProgress());
    208         pushAllRegisters(this, state, enterSafePointAfterPushRegisters);
    209     }
    210 
    211     void leaveSafePoint(ThreadState* state, SafePointAwareMutexLocker* locker = 0)
    212     {
    213         if (atomicIncrement(&m_unparkedThreadCount) > 0)
    214             checkAndPark(state, locker);
    215     }
    216 
    217 private:
    218     void doPark(ThreadState* state, intptr_t* stackEnd)
    219     {
    220         state->recordStackEnd(stackEnd);
    221         MutexLocker locker(m_mutex);
    222         if (!atomicDecrement(&m_unparkedThreadCount))
    223             m_parked.signal();
    224         while (!acquireLoad(&m_canResume))
    225             m_resume.wait(m_mutex);
    226         atomicIncrement(&m_unparkedThreadCount);
    227     }
    228 
    229     static void parkAfterPushRegisters(SafePointBarrier* barrier, ThreadState* state, intptr_t* stackEnd)
    230     {
    231         barrier->doPark(state, stackEnd);
    232     }
    233 
    234     void doEnterSafePoint(ThreadState* state, intptr_t* stackEnd)
    235     {
    236         state->recordStackEnd(stackEnd);
    237         state->copyStackUntilSafePointScope();
    238         // m_unparkedThreadCount tracks amount of unparked threads. It is
    239         // positive if and only if we have requested other threads to park
    240         // at safe-points in preparation for GC. The last thread to park
    241         // itself will make the counter hit zero and should notify GC thread
    242         // that it is safe to proceed.
    243         // If no other thread is waiting for other threads to park then
    244         // this counter can be negative: if N threads are at safe-points
    245         // the counter will be -N.
    246         if (!atomicDecrement(&m_unparkedThreadCount)) {
    247             MutexLocker locker(m_mutex);
    248             m_parked.signal(); // Safe point reached.
    249         }
    250     }
    251 
    252     static void enterSafePointAfterPushRegisters(SafePointBarrier* barrier, ThreadState* state, intptr_t* stackEnd)
    253     {
    254         barrier->doEnterSafePoint(state, stackEnd);
    255     }
    256 
    257     volatile int m_canResume;
    258     volatile int m_unparkedThreadCount;
    259     Mutex m_mutex;
    260     ThreadCondition m_parked;
    261     ThreadCondition m_resume;
    262 };
    263 
    264 ThreadState::ThreadState()
    265     : m_thread(currentThread())
    266     , m_persistents(adoptPtr(new PersistentAnchor()))
    267     , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
    268     , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
    269     , m_safePointScopeMarker(0)
    270     , m_atSafePoint(false)
    271     , m_interruptors()
    272     , m_gcRequested(false)
    273     , m_forcePreciseGCForTesting(false)
    274     , m_sweepRequested(0)
    275     , m_sweepInProgress(false)
    276     , m_noAllocationCount(0)
    277     , m_inGC(false)
    278     , m_heapContainsCache(adoptPtr(new HeapContainsCache()))
    279     , m_isCleaningUp(false)
    280 #if defined(ADDRESS_SANITIZER)
    281     , m_asanFakeStack(__asan_get_current_fake_stack())
    282 #endif
    283 {
    284     ASSERT(!**s_threadSpecific);
    285     **s_threadSpecific = this;
    286 
    287     m_stats.clear();
    288     m_statsAfterLastGC.clear();
    289     // First allocate the general heap, second iterate through to
    290     // allocate the type specific heaps
    291     m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this);
    292     for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++)
    293         m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this);
    294 
    295     CallbackStack::init(&m_weakCallbackStack);
    296 }
    297 
    298 ThreadState::~ThreadState()
    299 {
    300     checkThread();
    301     CallbackStack::shutdown(&m_weakCallbackStack);
    302     for (int i = GeneralHeap; i < NumberOfHeaps; i++)
    303         delete m_heaps[i];
    304     deleteAllValues(m_interruptors);
    305     **s_threadSpecific = 0;
    306 }
    307 
    308 void ThreadState::init()
    309 {
    310     s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>();
    311     s_safePointBarrier = new SafePointBarrier;
    312 }
    313 
    314 void ThreadState::shutdown()
    315 {
    316     delete s_safePointBarrier;
    317     s_safePointBarrier = 0;
    318 
    319     // Thread-local storage shouldn't be disposed, so we don't call ~ThreadSpecific().
    320 }
    321 
    322 void ThreadState::attachMainThread()
    323 {
    324     RELEASE_ASSERT(!Heap::s_shutdownCalled);
    325     MutexLocker locker(threadAttachMutex());
    326     ThreadState* state = new(s_mainThreadStateStorage) ThreadState();
    327     attachedThreads().add(state);
    328 }
    329 
    330 void ThreadState::detachMainThread()
    331 {
    332     // Enter a safe point before trying to acquire threadAttachMutex
    333     // to avoid dead lock if another thread is preparing for GC, has acquired
    334     // threadAttachMutex and waiting for other threads to pause or reach a
    335     // safepoint.
    336     ThreadState* state = mainThreadState();
    337     if (!state->isAtSafePoint())
    338         state->enterSafePointWithoutPointers();
    339 
    340     {
    341         MutexLocker locker(threadAttachMutex());
    342         state->leaveSafePoint();
    343         ASSERT(attachedThreads().contains(state));
    344         attachedThreads().remove(state);
    345         state->~ThreadState();
    346     }
    347     shutdownHeapIfNecessary();
    348 }
    349 
    350 void ThreadState::shutdownHeapIfNecessary()
    351 {
    352     // We don't need to enter a safe point before acquiring threadAttachMutex
    353     // because this thread is already detached.
    354 
    355     MutexLocker locker(threadAttachMutex());
    356     // We start shutting down the heap if there is no running thread
    357     // and Heap::shutdown() is already called.
    358     if (!attachedThreads().size() && Heap::s_shutdownCalled)
    359         Heap::doShutdown();
    360 }
    361 
    362 void ThreadState::attach()
    363 {
    364     RELEASE_ASSERT(!Heap::s_shutdownCalled);
    365     MutexLocker locker(threadAttachMutex());
    366     ThreadState* state = new ThreadState();
    367     attachedThreads().add(state);
    368 }
    369 
    370 void ThreadState::cleanup()
    371 {
    372     // From here on ignore all conservatively discovered
    373     // pointers into the heap owned by this thread.
    374     m_isCleaningUp = true;
    375 
    376     // After this GC we expect heap to be empty because
    377     // preCleanup tasks should have cleared all persistent
    378     // handles that were externally owned.
    379     Heap::collectAllGarbage();
    380 
    381     // Verify that all heaps are empty now.
    382     for (int i = 0; i < NumberOfHeaps; i++)
    383         m_heaps[i]->assertEmpty();
    384 }
    385 
    386 void ThreadState::preCleanup()
    387 {
    388     for (size_t i = 0; i < m_cleanupTasks.size(); i++)
    389         m_cleanupTasks[i]->preCleanup();
    390 }
    391 
    392 void ThreadState::postCleanup()
    393 {
    394     for (size_t i = 0; i < m_cleanupTasks.size(); i++)
    395         m_cleanupTasks[i]->postCleanup();
    396     m_cleanupTasks.clear();
    397 }
    398 
    399 void ThreadState::detach()
    400 {
    401     ThreadState* state = current();
    402     state->preCleanup();
    403     state->cleanup();
    404 
    405     // Enter a safe point before trying to acquire threadAttachMutex
    406     // to avoid dead lock if another thread is preparing for GC, has acquired
    407     // threadAttachMutex and waiting for other threads to pause or reach a
    408     // safepoint.
    409     if (!state->isAtSafePoint())
    410         state->enterSafePointWithoutPointers();
    411 
    412     {
    413         MutexLocker locker(threadAttachMutex());
    414         state->leaveSafePoint();
    415         state->postCleanup();
    416         ASSERT(attachedThreads().contains(state));
    417         attachedThreads().remove(state);
    418         delete state;
    419     }
    420     shutdownHeapIfNecessary();
    421 }
    422 
    423 void ThreadState::visitRoots(Visitor* visitor)
    424 {
    425     {
    426         // All threads are at safepoints so this is not strictly necessary.
    427         // However we acquire the mutex to make mutation and traversal of this
    428         // list symmetrical.
    429         MutexLocker locker(globalRootsMutex());
    430         globalRoots()->trace(visitor);
    431     }
    432 
    433     AttachedThreadStateSet& threads = attachedThreads();
    434     for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
    435         (*it)->trace(visitor);
    436 }
    437 
    438 NO_SANITIZE_ADDRESS
    439 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr)
    440 {
    441 #if defined(ADDRESS_SANITIZER)
    442     Address* start = reinterpret_cast<Address*>(m_startOfStack);
    443     Address* end = reinterpret_cast<Address*>(m_endOfStack);
    444     Address* fakeFrameStart = 0;
    445     Address* fakeFrameEnd = 0;
    446     Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr);
    447     Address* realFrameForFakeFrame =
    448         reinterpret_cast<Address*>(
    449             __asan_addr_is_in_fake_stack(
    450                 m_asanFakeStack, maybeFakeFrame,
    451                 reinterpret_cast<void**>(&fakeFrameStart),
    452                 reinterpret_cast<void**>(&fakeFrameEnd)));
    453     if (realFrameForFakeFrame) {
    454         // This is a fake frame from the asan fake stack.
    455         if (realFrameForFakeFrame > end && start > realFrameForFakeFrame) {
    456             // The real stack address for the asan fake frame is
    457             // within the stack range that we need to scan so we need
    458             // to visit the values in the fake frame.
    459             for (Address* p = fakeFrameStart; p < fakeFrameEnd; p++)
    460                 Heap::checkAndMarkPointer(visitor, *p);
    461         }
    462     }
    463 #endif
    464 }
    465 
    466 NO_SANITIZE_ADDRESS
    467 void ThreadState::visitStack(Visitor* visitor)
    468 {
    469     Address* start = reinterpret_cast<Address*>(m_startOfStack);
    470     // If there is a safepoint scope marker we should stop the stack
    471     // scanning there to not touch active parts of the stack. Anything
    472     // interesting beyond that point is in the safepoint stack copy.
    473     // If there is no scope marker the thread is blocked and we should
    474     // scan all the way to the recorded end stack pointer.
    475     Address* end = reinterpret_cast<Address*>(m_endOfStack);
    476     Address* safePointScopeMarker = reinterpret_cast<Address*>(m_safePointScopeMarker);
    477     Address* current = safePointScopeMarker ? safePointScopeMarker : end;
    478 
    479     // Ensure that current is aligned by address size otherwise the loop below
    480     // will read past start address.
    481     current = reinterpret_cast<Address*>(reinterpret_cast<intptr_t>(current) & ~(sizeof(Address) - 1));
    482 
    483     for (; current < start; ++current) {
    484         Address ptr = *current;
    485 #if defined(MEMORY_SANITIZER)
    486         // |ptr| may be uninitialized by design. Mark it as initialized to keep
    487         // MSan from complaining.
    488         // Note: it may be tempting to get rid of |ptr| and simply use |current|
    489         // here, but that would be incorrect. We intentionally use a local
    490         // variable because we don't want to unpoison the original stack.
    491         __msan_unpoison(&ptr, sizeof(ptr));
    492 #endif
    493         Heap::checkAndMarkPointer(visitor, ptr);
    494         visitAsanFakeStackForPointer(visitor, ptr);
    495     }
    496 
    497     for (Vector<Address>::iterator it = m_safePointStackCopy.begin(); it != m_safePointStackCopy.end(); ++it) {
    498         Address ptr = *it;
    499 #if defined(MEMORY_SANITIZER)
    500         // See the comment above.
    501         __msan_unpoison(&ptr, sizeof(ptr));
    502 #endif
    503         Heap::checkAndMarkPointer(visitor, ptr);
    504         visitAsanFakeStackForPointer(visitor, ptr);
    505     }
    506 }
    507 
    508 void ThreadState::visitPersistents(Visitor* visitor)
    509 {
    510     m_persistents->trace(visitor);
    511 }
    512 
    513 void ThreadState::trace(Visitor* visitor)
    514 {
    515     if (m_stackState == HeapPointersOnStack)
    516         visitStack(visitor);
    517     visitPersistents(visitor);
    518 }
    519 
    520 bool ThreadState::checkAndMarkPointer(Visitor* visitor, Address address)
    521 {
    522     // If thread is cleaning up ignore conservative pointers.
    523     if (m_isCleaningUp)
    524         return false;
    525 
    526     // This checks for normal pages and for large objects which span the extent
    527     // of several normal pages.
    528     BaseHeapPage* page = heapPageFromAddress(address);
    529     if (page) {
    530         page->checkAndMarkPointer(visitor, address);
    531         // Whether or not the pointer was within an object it was certainly
    532         // within a page that is part of the heap, so we don't want to ask the
    533         // other other heaps or put this address in the
    534         // HeapDoesNotContainCache.
    535         return true;
    536     }
    537 
    538     return false;
    539 }
    540 
    541 #if ENABLE(GC_TRACING)
    542 const GCInfo* ThreadState::findGCInfo(Address address)
    543 {
    544     BaseHeapPage* page = heapPageFromAddress(address);
    545     if (page) {
    546         return page->findGCInfo(address);
    547     }
    548     return 0;
    549 }
    550 #endif
    551 
    552 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallback callback)
    553 {
    554     CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallbackStack);
    555     *slot = CallbackStack::Item(object, callback);
    556 }
    557 
    558 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor)
    559 {
    560     return m_weakCallbackStack->popAndInvokeCallback(&m_weakCallbackStack, visitor);
    561 }
    562 
    563 PersistentNode* ThreadState::globalRoots()
    564 {
    565     AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor);
    566     return anchor;
    567 }
    568 
    569 Mutex& ThreadState::globalRootsMutex()
    570 {
    571     AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
    572     return mutex;
    573 }
    574 
    575 // Trigger garbage collection on a 50% increase in size, but not for
    576 // less than 512kbytes.
    577 static bool increasedEnoughToGC(size_t newSize, size_t oldSize)
    578 {
    579     if (newSize < 1 << 19)
    580         return false;
    581     return newSize > oldSize + (oldSize >> 1);
    582 }
    583 
    584 // FIXME: The heuristics are local for a thread at this
    585 // point. Consider using heuristics that take memory for all threads
    586 // into account.
    587 bool ThreadState::shouldGC()
    588 {
    589     // Do not GC during sweeping. We allow allocation during
    590     // finalization, but those allocations are not allowed
    591     // to lead to nested garbage collections.
    592     return !m_sweepInProgress && increasedEnoughToGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
    593 }
    594 
    595 // Trigger conservative garbage collection on a 100% increase in size,
    596 // but not for less than 4Mbytes.
    597 static bool increasedEnoughToForceConservativeGC(size_t newSize, size_t oldSize)
    598 {
    599     if (newSize < 1 << 22)
    600         return false;
    601     return newSize > 2 * oldSize;
    602 }
    603 
    604 // FIXME: The heuristics are local for a thread at this
    605 // point. Consider using heuristics that take memory for all threads
    606 // into account.
    607 bool ThreadState::shouldForceConservativeGC()
    608 {
    609     // Do not GC during sweeping. We allow allocation during
    610     // finalization, but those allocations are not allowed
    611     // to lead to nested garbage collections.
    612     return !m_sweepInProgress && increasedEnoughToForceConservativeGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
    613 }
    614 
    615 bool ThreadState::sweepRequested()
    616 {
    617     ASSERT(isAnyThreadInGC() || checkThread());
    618     return m_sweepRequested;
    619 }
    620 
    621 void ThreadState::setSweepRequested()
    622 {
    623     // Sweep requested is set from the thread that initiates garbage
    624     // collection which could be different from the thread for this
    625     // thread state. Therefore the setting of m_sweepRequested needs a
    626     // barrier.
    627     atomicTestAndSetToOne(&m_sweepRequested);
    628 }
    629 
    630 void ThreadState::clearSweepRequested()
    631 {
    632     checkThread();
    633     m_sweepRequested = 0;
    634 }
    635 
    636 bool ThreadState::gcRequested()
    637 {
    638     checkThread();
    639     return m_gcRequested;
    640 }
    641 
    642 void ThreadState::setGCRequested()
    643 {
    644     checkThread();
    645     m_gcRequested = true;
    646 }
    647 
    648 void ThreadState::clearGCRequested()
    649 {
    650     checkThread();
    651     m_gcRequested = false;
    652 }
    653 
    654 void ThreadState::performPendingGC(StackState stackState)
    655 {
    656     if (stackState == NoHeapPointersOnStack) {
    657         if (forcePreciseGCForTesting()) {
    658             setForcePreciseGCForTesting(false);
    659             Heap::collectAllGarbage();
    660         } else if (gcRequested()) {
    661             Heap::collectGarbage(NoHeapPointersOnStack);
    662         }
    663     }
    664 }
    665 
    666 void ThreadState::setForcePreciseGCForTesting(bool value)
    667 {
    668     checkThread();
    669     m_forcePreciseGCForTesting = value;
    670 }
    671 
    672 bool ThreadState::forcePreciseGCForTesting()
    673 {
    674     checkThread();
    675     return m_forcePreciseGCForTesting;
    676 }
    677 
    678 bool ThreadState::isConsistentForGC()
    679 {
    680     for (int i = 0; i < NumberOfHeaps; i++) {
    681         if (!m_heaps[i]->isConsistentForGC())
    682             return false;
    683     }
    684     return true;
    685 }
    686 
    687 void ThreadState::makeConsistentForGC()
    688 {
    689     for (int i = 0; i < NumberOfHeaps; i++)
    690         m_heaps[i]->makeConsistentForGC();
    691 }
    692 
    693 void ThreadState::prepareForGC()
    694 {
    695     for (int i = 0; i < NumberOfHeaps; i++) {
    696         BaseHeap* heap = m_heaps[i];
    697         heap->makeConsistentForGC();
    698         // If there are parked threads with outstanding sweep requests, clear their mark bits.
    699         // This happens if a thread did not have time to wake up and sweep,
    700         // before the next GC arrived.
    701         if (sweepRequested())
    702             heap->clearMarks();
    703     }
    704     setSweepRequested();
    705 }
    706 
    707 BaseHeapPage* ThreadState::heapPageFromAddress(Address address)
    708 {
    709     BaseHeapPage* cachedPage = heapContainsCache()->lookup(address);
    710 #ifdef NDEBUG
    711     if (cachedPage)
    712         return cachedPage;
    713 #endif
    714 
    715     for (int i = 0; i < NumberOfHeaps; i++) {
    716         BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address);
    717         if (page) {
    718             // Asserts that make sure heapPageFromAddress takes addresses from
    719             // the whole aligned blinkPageSize memory area. This is necessary
    720             // for the negative cache to work.
    721             ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFromAddress(roundToBlinkPageStart(address)));
    722             if (roundToBlinkPageStart(address) != roundToBlinkPageEnd(address))
    723                 ASSERT(page->isLargeObject() || page == m_heaps[i]->heapPageFromAddress(roundToBlinkPageEnd(address) - 1));
    724             ASSERT(!cachedPage || page == cachedPage);
    725             if (!cachedPage)
    726                 heapContainsCache()->addEntry(address, page);
    727             return page;
    728         }
    729     }
    730     ASSERT(!cachedPage);
    731     return 0;
    732 }
    733 
    734 void ThreadState::getStats(HeapStats& stats)
    735 {
    736     stats = m_stats;
    737 #ifndef NDEBUG
    738     if (isConsistentForGC()) {
    739         HeapStats scannedStats;
    740         scannedStats.clear();
    741         for (int i = 0; i < NumberOfHeaps; i++)
    742             m_heaps[i]->getScannedStats(scannedStats);
    743         ASSERT(scannedStats == stats);
    744     }
    745 #endif
    746 }
    747 
    748 bool ThreadState::stopThreads()
    749 {
    750     return s_safePointBarrier->parkOthers();
    751 }
    752 
    753 void ThreadState::resumeThreads()
    754 {
    755     s_safePointBarrier->resumeOthers();
    756 }
    757 
    758 void ThreadState::safePoint(StackState stackState)
    759 {
    760     checkThread();
    761     performPendingGC(stackState);
    762     ASSERT(!m_atSafePoint);
    763     m_stackState = stackState;
    764     m_atSafePoint = true;
    765     s_safePointBarrier->checkAndPark(this);
    766     m_atSafePoint = false;
    767     m_stackState = HeapPointersOnStack;
    768 }
    769 
    770 #ifdef ADDRESS_SANITIZER
    771 // When we are running under AddressSanitizer with detect_stack_use_after_return=1
    772 // then stack marker obtained from SafePointScope will point into a fake stack.
    773 // Detect this case by checking if it falls in between current stack frame
    774 // and stack start and use an arbitrary high enough value for it.
    775 // Don't adjust stack marker in any other case to match behavior of code running
    776 // without AddressSanitizer.
    777 NO_SANITIZE_ADDRESS static void* adjustScopeMarkerForAdressSanitizer(void* scopeMarker)
    778 {
    779     Address start = reinterpret_cast<Address>(getStackStart());
    780     Address end = reinterpret_cast<Address>(&start);
    781     RELEASE_ASSERT(end < start);
    782 
    783     if (end <= scopeMarker && scopeMarker < start)
    784         return scopeMarker;
    785 
    786     // 256 is as good an approximation as any else.
    787     const size_t bytesToCopy = sizeof(Address) * 256;
    788     if (static_cast<size_t>(start - end) < bytesToCopy)
    789         return start;
    790 
    791     return end + bytesToCopy;
    792 }
    793 #endif
    794 
    795 void ThreadState::enterSafePoint(StackState stackState, void* scopeMarker)
    796 {
    797 #ifdef ADDRESS_SANITIZER
    798     if (stackState == HeapPointersOnStack)
    799         scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker);
    800 #endif
    801     ASSERT(stackState == NoHeapPointersOnStack || scopeMarker);
    802     performPendingGC(stackState);
    803     checkThread();
    804     ASSERT(!m_atSafePoint);
    805     m_atSafePoint = true;
    806     m_stackState = stackState;
    807     m_safePointScopeMarker = scopeMarker;
    808     s_safePointBarrier->enterSafePoint(this);
    809 }
    810 
    811 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker)
    812 {
    813     checkThread();
    814     ASSERT(m_atSafePoint);
    815     s_safePointBarrier->leaveSafePoint(this, locker);
    816     m_atSafePoint = false;
    817     m_stackState = HeapPointersOnStack;
    818     clearSafePointScopeMarker();
    819     performPendingSweep();
    820 }
    821 
    822 void ThreadState::copyStackUntilSafePointScope()
    823 {
    824     if (!m_safePointScopeMarker || m_stackState == NoHeapPointersOnStack)
    825         return;
    826 
    827     Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker);
    828     Address* from = reinterpret_cast<Address*>(m_endOfStack);
    829     RELEASE_ASSERT(from < to);
    830     RELEASE_ASSERT(to <= reinterpret_cast<Address*>(m_startOfStack));
    831     size_t slotCount = static_cast<size_t>(to - from);
    832     ASSERT(slotCount < 1024); // Catch potential performance issues.
    833 
    834     ASSERT(!m_safePointStackCopy.size());
    835     m_safePointStackCopy.resize(slotCount);
    836     for (size_t i = 0; i < slotCount; ++i) {
    837         m_safePointStackCopy[i] = from[i];
    838     }
    839 }
    840 
    841 void ThreadState::performPendingSweep()
    842 {
    843     if (!sweepRequested())
    844         return;
    845 
    846     TRACE_EVENT0("Blink", "ThreadState::performPendingSweep");
    847     double timeStamp = WTF::currentTimeMS();
    848     const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
    849     if (isMainThread())
    850         TRACE_EVENT_SET_SAMPLING_STATE("Blink", "BlinkGCSweeping");
    851 
    852     m_sweepInProgress = true;
    853     // Disallow allocation during weak processing.
    854     enterNoAllocationScope();
    855     // Perform thread-specific weak processing.
    856     while (popAndInvokeWeakPointerCallback(Heap::s_markingVisitor)) { }
    857     leaveNoAllocationScope();
    858     // Perform sweeping and finalization.
    859     m_stats.clear(); // Sweeping will recalculate the stats
    860     for (int i = 0; i < NumberOfHeaps; i++)
    861         m_heaps[i]->sweep();
    862     getStats(m_statsAfterLastGC);
    863     m_sweepInProgress = false;
    864     clearGCRequested();
    865     clearSweepRequested();
    866 
    867     if (blink::Platform::current()) {
    868         blink::Platform::current()->histogramCustomCounts("BlinkGC.PerformPendingSweep", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
    869     }
    870 
    871     if (isMainThread())
    872         TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
    873 }
    874 
    875 void ThreadState::addInterruptor(Interruptor* interruptor)
    876 {
    877     SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting);
    878 
    879     {
    880         MutexLocker locker(threadAttachMutex());
    881         m_interruptors.append(interruptor);
    882     }
    883 }
    884 
    885 void ThreadState::removeInterruptor(Interruptor* interruptor)
    886 {
    887     SafePointScope scope(HeapPointersOnStack, SafePointScope::AllowNesting);
    888 
    889     {
    890         MutexLocker locker(threadAttachMutex());
    891         size_t index = m_interruptors.find(interruptor);
    892         RELEASE_ASSERT(index >= 0);
    893         m_interruptors.remove(index);
    894     }
    895 }
    896 
    897 void ThreadState::Interruptor::onInterrupted()
    898 {
    899     ThreadState* state = ThreadState::current();
    900     ASSERT(state);
    901     ASSERT(!state->isAtSafePoint());
    902     state->safePoint(HeapPointersOnStack);
    903 }
    904 
    905 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads()
    906 {
    907     DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ());
    908     return threads;
    909 }
    910 
    911 #if ENABLE(GC_TRACING)
    912 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address)
    913 {
    914     bool needLockForIteration = !isAnyThreadInGC();
    915     if (needLockForIteration)
    916         threadAttachMutex().lock();
    917 
    918     ThreadState::AttachedThreadStateSet& threads = attachedThreads();
    919     for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
    920         if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) {
    921             if (needLockForIteration)
    922                 threadAttachMutex().unlock();
    923             return gcInfo;
    924         }
    925     }
    926     if (needLockForIteration)
    927         threadAttachMutex().unlock();
    928     return 0;
    929 }
    930 #endif
    931 }
    932