X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/93a3786624b2768d89bfa27e46598dc64e2fb70a..81345200c95645a1b0d2635520f96ad55dfde63f:/interpreter/JSStack.cpp diff --git a/interpreter/JSStack.cpp b/interpreter/JSStack.cpp index ec2962a..c6a703c 100644 --- a/interpreter/JSStack.cpp +++ b/interpreter/JSStack.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -27,86 +27,117 @@ */ #include "config.h" -#include "JSStack.h" #include "JSStackInlines.h" #include "ConservativeRoots.h" #include "Interpreter.h" +#include "JSCInlines.h" +#include "Options.h" namespace JSC { +#if !ENABLE(JIT) static size_t committedBytesCount = 0; static Mutex& stackStatisticsMutex() { - DEFINE_STATIC_LOCAL(Mutex, staticMutex, ()); + DEPRECATED_DEFINE_STATIC_LOCAL(Mutex, staticMutex, ()); return staticMutex; } +#endif // !ENABLE(JIT) -JSStack::JSStack(VM& vm, size_t capacity) - : m_end(0) +JSStack::JSStack(VM& vm) + : m_vm(vm) , m_topCallFrame(vm.topCallFrame) +#if !ENABLE(JIT) + , m_end(0) + , m_reservedZoneSizeInRegisters(0) +#endif { +#if !ENABLE(JIT) + size_t capacity = Options::maxPerThreadStackUsage(); ASSERT(capacity && isPageAligned(capacity)); - m_reservation = PageReservation::reserve(roundUpAllocationSize(capacity * sizeof(Register), commitSize), OSAllocator::JSVMStackPages); - m_end = static_cast(m_reservation.base()); - m_commitEnd = static_cast(m_reservation.base()); - - disableErrorStackReserve(); + m_reservation = PageReservation::reserve(WTF::roundUpToMultipleOf(commitSize, capacity), OSAllocator::JSVMStackPages); + setStackLimit(highAddress()); + m_commitTop = highAddress(); + + m_lastStackTop = baseOfStack(); +#endif // !ENABLE(JIT) m_topCallFrame = 0; } +#if !ENABLE(JIT) JSStack::~JSStack() { - void* base = m_reservation.base(); - m_reservation.decommit(base, reinterpret_cast(m_commitEnd) - reinterpret_cast(base)); - addToCommittedByteCount(-(reinterpret_cast(m_commitEnd) - reinterpret_cast(base))); + ptrdiff_t sizeToDecommit = reinterpret_cast(highAddress()) - reinterpret_cast(m_commitTop); + m_reservation.decommit(reinterpret_cast(m_commitTop), sizeToDecommit); + addToCommittedByteCount(-sizeToDecommit); m_reservation.deallocate(); } -bool JSStack::growSlowCase(Register* newEnd) +bool JSStack::growSlowCase(Register* newTopOfStack) { + Register* newTopOfStackWithReservedZone = newTopOfStack - m_reservedZoneSizeInRegisters; + // If we have already committed enough memory to satisfy this request, // just update the end pointer and return. - if (newEnd <= m_commitEnd) { - m_end = newEnd; + if (newTopOfStackWithReservedZone >= m_commitTop) { + setStackLimit(newTopOfStack); return true; } // Compute the chunk size of additional memory to commit, and see if we // have it is still within our budget. If not, we'll fail to grow and // return false. - long delta = roundUpAllocationSize(reinterpret_cast(newEnd) - reinterpret_cast(m_commitEnd), commitSize); - if (reinterpret_cast(m_commitEnd) + delta > reinterpret_cast(m_useableEnd)) + ptrdiff_t delta = reinterpret_cast(m_commitTop) - reinterpret_cast(newTopOfStackWithReservedZone); + delta = WTF::roundUpToMultipleOf(commitSize, delta); + Register* newCommitTop = m_commitTop - (delta / sizeof(Register)); + if (newCommitTop < reservationTop()) return false; // Otherwise, the growth is still within our budget. Go ahead and commit // it and return true. - m_reservation.commit(m_commitEnd, delta); + m_reservation.commit(newCommitTop, delta); addToCommittedByteCount(delta); - m_commitEnd = reinterpret_cast_ptr(reinterpret_cast(m_commitEnd) + delta); - m_end = newEnd; + m_commitTop = newCommitTop; + setStackLimit(newTopOfStack); return true; } void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots) { - conservativeRoots.add(begin(), getTopOfStack()); + conservativeRoots.add(topOfStack() + 1, highAddress()); +} + +void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks) +{ + conservativeRoots.add(topOfStack() + 1, highAddress(), jitStubRoutines, codeBlocks); } -void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, DFGCodeBlocks& dfgCodeBlocks) +void JSStack::sanitizeStack() { - conservativeRoots.add(begin(), getTopOfStack(), jitStubRoutines, dfgCodeBlocks); +#if !defined(ADDRESS_SANITIZER) + ASSERT(topOfStack() <= baseOfStack()); + + if (m_lastStackTop < topOfStack()) { + char* begin = reinterpret_cast(m_lastStackTop + 1); + char* end = reinterpret_cast(topOfStack() + 1); + memset(begin, 0, end - begin); + } + + m_lastStackTop = topOfStack(); +#endif } void JSStack::releaseExcessCapacity() { - ptrdiff_t delta = reinterpret_cast(m_commitEnd) - reinterpret_cast(m_reservation.base()); - m_reservation.decommit(m_reservation.base(), delta); + Register* highAddressWithReservedZone = highAddress() - m_reservedZoneSizeInRegisters; + ptrdiff_t delta = reinterpret_cast(highAddressWithReservedZone) - reinterpret_cast(m_commitTop); + m_reservation.decommit(m_commitTop, delta); addToCommittedByteCount(-delta); - m_commitEnd = static_cast(m_reservation.base()); + m_commitTop = highAddressWithReservedZone; } void JSStack::initializeThreading() @@ -114,12 +145,6 @@ void JSStack::initializeThreading() stackStatisticsMutex(); } -size_t JSStack::committedByteCount() -{ - MutexLocker locker(stackStatisticsMutex()); - return committedBytesCount; -} - void JSStack::addToCommittedByteCount(long byteCount) { MutexLocker locker(stackStatisticsMutex()); @@ -127,25 +152,41 @@ void JSStack::addToCommittedByteCount(long byteCount) committedBytesCount += byteCount; } -void JSStack::enableErrorStackReserve() +void JSStack::setReservedZoneSize(size_t reservedZoneSize) { - m_useableEnd = reservationEnd(); + m_reservedZoneSizeInRegisters = reservedZoneSize / sizeof(Register); + if (m_commitTop >= (m_end + 1) - m_reservedZoneSizeInRegisters) + growSlowCase(m_end + 1); } +#endif // !ENABLE(JIT) -void JSStack::disableErrorStackReserve() +#if ENABLE(JIT) +Register* JSStack::lowAddress() const { - char* useableEnd = reinterpret_cast(reservationEnd()) - commitSize; - m_useableEnd = reinterpret_cast_ptr(useableEnd); - - // By the time we get here, we are guaranteed to be destructing the last - // Interpreter::ErrorHandlingMode that enabled this reserve in the first - // place. That means the stack space beyond m_useableEnd before we - // enabled the reserve was not previously in use. Hence, it is safe to - // shrink back to that m_useableEnd. - if (m_end > m_useableEnd) { - ASSERT(m_topCallFrame->frameExtent() <= m_useableEnd); - shrink(m_useableEnd); - } + ASSERT(wtfThreadData().stack().isGrowingDownward()); + return reinterpret_cast(m_vm.stackLimit()); +} + +Register* JSStack::highAddress() const +{ + ASSERT(wtfThreadData().stack().isGrowingDownward()); + return reinterpret_cast(wtfThreadData().stack().origin()); +} +#endif // ENABLE(JIT) + +size_t JSStack::committedByteCount() +{ +#if !ENABLE(JIT) + MutexLocker locker(stackStatisticsMutex()); + return committedBytesCount; +#else + // When using the C stack, we don't know how many stack pages are actually + // committed. So, we use the current stack usage as an estimate. + ASSERT(wtfThreadData().stack().isGrowingDownward()); + int8_t* current = reinterpret_cast(¤t); + int8_t* high = reinterpret_cast(wtfThreadData().stack().origin()); + return high - current; +#endif } } // namespace JSC