]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - interpreter/JSStack.cpp
JavaScriptCore-7600.1.4.9.tar.gz
[apple/javascriptcore.git] / interpreter / JSStack.cpp
index ec2962a92c7f401d8e5f5490a5fffcf84af0522f..c6a703cbc603074432c0b281ec20919ee3bec812 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013, 2014 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -10,7 +10,7 @@
  * 2.  Redistributions in binary form must reproduce the above copyright
  *     notice, this list of conditions and the following disclaimer in the
  *     documentation and/or other materials provided with the distribution.
- * 3.  Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3.  Neither the name of Apple Inc. ("Apple") nor the names of
  *     its contributors may be used to endorse or promote products derived
  *     from this software without specific prior written permission.
  *
  */
 
 #include "config.h"
-#include "JSStack.h"
 #include "JSStackInlines.h"
 
 #include "ConservativeRoots.h"
 #include "Interpreter.h"
+#include "JSCInlines.h"
+#include "Options.h"
 
 namespace JSC {
 
+#if !ENABLE(JIT)
 static size_t committedBytesCount = 0;
 
 static Mutex& stackStatisticsMutex()
 {
-    DEFINE_STATIC_LOCAL(Mutex, staticMutex, ());
+    DEPRECATED_DEFINE_STATIC_LOCAL(Mutex, staticMutex, ());
     return staticMutex;
 }    
+#endif // !ENABLE(JIT)
 
-JSStack::JSStack(VM& vm, size_t capacity)
-    : m_end(0)
+JSStack::JSStack(VM& vm)
+    : m_vm(vm)
     , m_topCallFrame(vm.topCallFrame)
+#if !ENABLE(JIT)
+    , m_end(0)
+    , m_reservedZoneSizeInRegisters(0)
+#endif
 {
+#if !ENABLE(JIT)
+    size_t capacity = Options::maxPerThreadStackUsage();
     ASSERT(capacity && isPageAligned(capacity));
 
-    m_reservation = PageReservation::reserve(roundUpAllocationSize(capacity * sizeof(Register), commitSize), OSAllocator::JSVMStackPages);
-    m_end = static_cast<Register*>(m_reservation.base());
-    m_commitEnd = static_cast<Register*>(m_reservation.base());
-
-    disableErrorStackReserve();
+    m_reservation = PageReservation::reserve(WTF::roundUpToMultipleOf(commitSize, capacity), OSAllocator::JSVMStackPages);
+    setStackLimit(highAddress());
+    m_commitTop = highAddress();
+    
+    m_lastStackTop = baseOfStack();
+#endif // !ENABLE(JIT)
 
     m_topCallFrame = 0;
 }
 
+#if !ENABLE(JIT)
 JSStack::~JSStack()
 {
-    void* base = m_reservation.base();
-    m_reservation.decommit(base, reinterpret_cast<intptr_t>(m_commitEnd) - reinterpret_cast<intptr_t>(base));
-    addToCommittedByteCount(-(reinterpret_cast<intptr_t>(m_commitEnd) - reinterpret_cast<intptr_t>(base)));
+    ptrdiff_t sizeToDecommit = reinterpret_cast<char*>(highAddress()) - reinterpret_cast<char*>(m_commitTop);
+    m_reservation.decommit(reinterpret_cast<void*>(m_commitTop), sizeToDecommit);
+    addToCommittedByteCount(-sizeToDecommit);
     m_reservation.deallocate();
 }
 
-bool JSStack::growSlowCase(Register* newEnd)
+bool JSStack::growSlowCase(Register* newTopOfStack)
 {
+    Register* newTopOfStackWithReservedZone = newTopOfStack - m_reservedZoneSizeInRegisters;
+
     // If we have already committed enough memory to satisfy this request,
     // just update the end pointer and return.
-    if (newEnd <= m_commitEnd) {
-        m_end = newEnd;
+    if (newTopOfStackWithReservedZone >= m_commitTop) {
+        setStackLimit(newTopOfStack);
         return true;
     }
 
     // Compute the chunk size of additional memory to commit, and see if we
     // have it is still within our budget. If not, we'll fail to grow and
     // return false.
-    long delta = roundUpAllocationSize(reinterpret_cast<char*>(newEnd) - reinterpret_cast<char*>(m_commitEnd), commitSize);
-    if (reinterpret_cast<char*>(m_commitEnd) + delta > reinterpret_cast<char*>(m_useableEnd))
+    ptrdiff_t delta = reinterpret_cast<char*>(m_commitTop) - reinterpret_cast<char*>(newTopOfStackWithReservedZone);
+    delta = WTF::roundUpToMultipleOf(commitSize, delta);
+    Register* newCommitTop = m_commitTop - (delta / sizeof(Register));
+    if (newCommitTop < reservationTop())
         return false;
 
     // Otherwise, the growth is still within our budget. Go ahead and commit
     // it and return true.
-    m_reservation.commit(m_commitEnd, delta);
+    m_reservation.commit(newCommitTop, delta);
     addToCommittedByteCount(delta);
-    m_commitEnd = reinterpret_cast_ptr<Register*>(reinterpret_cast<char*>(m_commitEnd) + delta);
-    m_end = newEnd;
+    m_commitTop = newCommitTop;
+    setStackLimit(newTopOfStack);
     return true;
 }
 
 void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots)
 {
-    conservativeRoots.add(begin(), getTopOfStack());
+    conservativeRoots.add(topOfStack() + 1, highAddress());
+}
+
+void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks)
+{
+    conservativeRoots.add(topOfStack() + 1, highAddress(), jitStubRoutines, codeBlocks);
 }
 
-void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, DFGCodeBlocks& dfgCodeBlocks)
+void JSStack::sanitizeStack()
 {
-    conservativeRoots.add(begin(), getTopOfStack(), jitStubRoutines, dfgCodeBlocks);
+#if !defined(ADDRESS_SANITIZER)
+    ASSERT(topOfStack() <= baseOfStack());
+    
+    if (m_lastStackTop < topOfStack()) {
+        char* begin = reinterpret_cast<char*>(m_lastStackTop + 1);
+        char* end = reinterpret_cast<char*>(topOfStack() + 1);
+        memset(begin, 0, end - begin);
+    }
+    
+    m_lastStackTop = topOfStack();
+#endif
 }
 
 void JSStack::releaseExcessCapacity()
 {
-    ptrdiff_t delta = reinterpret_cast<uintptr_t>(m_commitEnd) - reinterpret_cast<uintptr_t>(m_reservation.base());
-    m_reservation.decommit(m_reservation.base(), delta);
+    Register* highAddressWithReservedZone = highAddress() - m_reservedZoneSizeInRegisters;
+    ptrdiff_t delta = reinterpret_cast<char*>(highAddressWithReservedZone) - reinterpret_cast<char*>(m_commitTop);
+    m_reservation.decommit(m_commitTop, delta);
     addToCommittedByteCount(-delta);
-    m_commitEnd = static_cast<Register*>(m_reservation.base());
+    m_commitTop = highAddressWithReservedZone;
 }
 
 void JSStack::initializeThreading()
@@ -114,12 +145,6 @@ void JSStack::initializeThreading()
     stackStatisticsMutex();
 }
 
-size_t JSStack::committedByteCount()
-{
-    MutexLocker locker(stackStatisticsMutex());
-    return committedBytesCount;
-}
-
 void JSStack::addToCommittedByteCount(long byteCount)
 {
     MutexLocker locker(stackStatisticsMutex());
@@ -127,25 +152,41 @@ void JSStack::addToCommittedByteCount(long byteCount)
     committedBytesCount += byteCount;
 }
 
-void JSStack::enableErrorStackReserve()
+void JSStack::setReservedZoneSize(size_t reservedZoneSize)
 {
-    m_useableEnd = reservationEnd();
+    m_reservedZoneSizeInRegisters = reservedZoneSize / sizeof(Register);
+    if (m_commitTop >= (m_end + 1) - m_reservedZoneSizeInRegisters)
+        growSlowCase(m_end + 1);
 }
+#endif // !ENABLE(JIT)
 
-void JSStack::disableErrorStackReserve()
+#if ENABLE(JIT)
+Register* JSStack::lowAddress() const
 {
-    char* useableEnd = reinterpret_cast<char*>(reservationEnd()) - commitSize;
-    m_useableEnd = reinterpret_cast_ptr<Register*>(useableEnd);
-
-    // By the time we get here, we are guaranteed to be destructing the last
-    // Interpreter::ErrorHandlingMode that enabled this reserve in the first
-    // place. That means the stack space beyond m_useableEnd before we
-    // enabled the reserve was not previously in use. Hence, it is safe to
-    // shrink back to that m_useableEnd.
-    if (m_end > m_useableEnd) {
-        ASSERT(m_topCallFrame->frameExtent() <= m_useableEnd);
-        shrink(m_useableEnd);
-    }
+    ASSERT(wtfThreadData().stack().isGrowingDownward());
+    return reinterpret_cast<Register*>(m_vm.stackLimit());
+}
+
+Register* JSStack::highAddress() const
+{
+    ASSERT(wtfThreadData().stack().isGrowingDownward());
+    return reinterpret_cast<Register*>(wtfThreadData().stack().origin());
+}
+#endif // ENABLE(JIT)
+
+size_t JSStack::committedByteCount()
+{
+#if !ENABLE(JIT)
+    MutexLocker locker(stackStatisticsMutex());
+    return committedBytesCount;
+#else
+    // When using the C stack, we don't know how many stack pages are actually
+    // committed. So, we use the current stack usage as an estimate.
+    ASSERT(wtfThreadData().stack().isGrowingDownward());
+    int8_t* current = reinterpret_cast<int8_t*>(&current);
+    int8_t* high = reinterpret_cast<int8_t*>(wtfThreadData().stack().origin());
+    return high - current;
+#endif
 }
 
 } // namespace JSC