- char* useableEnd = reinterpret_cast<char*>(reservationEnd()) - commitSize;
- m_useableEnd = reinterpret_cast_ptr<Register*>(useableEnd);
-
- // By the time we get here, we are guaranteed to be destructing the last
- // Interpreter::ErrorHandlingMode that enabled this reserve in the first
- // place. That means the stack space beyond m_useableEnd before we
- // enabled the reserve was not previously in use. Hence, it is safe to
- // shrink back to that m_useableEnd.
- if (m_end > m_useableEnd) {
- ASSERT(m_topCallFrame->frameExtent() <= m_useableEnd);
- shrink(m_useableEnd);
- }
+ ASSERT(wtfThreadData().stack().isGrowingDownward());
+ return reinterpret_cast<Register*>(m_vm.stackLimit());
+}
+
+Register* JSStack::highAddress() const
+{
+ ASSERT(wtfThreadData().stack().isGrowingDownward());
+ return reinterpret_cast<Register*>(wtfThreadData().stack().origin());
+}
+#endif // ENABLE(JIT)
+
+size_t JSStack::committedByteCount()
+{
+#if !ENABLE(JIT)
+ MutexLocker locker(stackStatisticsMutex());
+ return committedBytesCount;
+#else
+ // When using the C stack, we don't know how many stack pages are actually
+ // committed. So, we use the current stack usage as an estimate.
+ ASSERT(wtfThreadData().stack().isGrowingDownward());
+ int8_t* current = reinterpret_cast<int8_t*>(¤t);
+ int8_t* high = reinterpret_cast<int8_t*>(wtfThreadData().stack().origin());
+ return high - current;
+#endif