]> git.saurik.com Git - apple/javascriptcore.git/blob - interpreter/JSStack.cpp
JavaScriptCore-7600.1.4.13.1.tar.gz
[apple/javascriptcore.git] / interpreter / JSStack.cpp
1 /*
2 * Copyright (C) 2008, 2013, 2014 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 *
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
14 * its contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
16 *
17 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include "config.h"
30 #include "JSStackInlines.h"
31
32 #include "ConservativeRoots.h"
33 #include "Interpreter.h"
34 #include "JSCInlines.h"
35 #include "Options.h"
36
37 namespace JSC {
38
39 #if !ENABLE(JIT)
40 static size_t committedBytesCount = 0;
41
42 static Mutex& stackStatisticsMutex()
43 {
44 DEPRECATED_DEFINE_STATIC_LOCAL(Mutex, staticMutex, ());
45 return staticMutex;
46 }
47 #endif // !ENABLE(JIT)
48
49 JSStack::JSStack(VM& vm)
50 : m_vm(vm)
51 , m_topCallFrame(vm.topCallFrame)
52 #if !ENABLE(JIT)
53 , m_end(0)
54 , m_reservedZoneSizeInRegisters(0)
55 #endif
56 {
57 #if !ENABLE(JIT)
58 size_t capacity = Options::maxPerThreadStackUsage();
59 ASSERT(capacity && isPageAligned(capacity));
60
61 m_reservation = PageReservation::reserve(WTF::roundUpToMultipleOf(commitSize, capacity), OSAllocator::JSVMStackPages);
62 setStackLimit(highAddress());
63 m_commitTop = highAddress();
64
65 m_lastStackTop = baseOfStack();
66 #endif // !ENABLE(JIT)
67
68 m_topCallFrame = 0;
69 }
70
71 #if !ENABLE(JIT)
72 JSStack::~JSStack()
73 {
74 ptrdiff_t sizeToDecommit = reinterpret_cast<char*>(highAddress()) - reinterpret_cast<char*>(m_commitTop);
75 m_reservation.decommit(reinterpret_cast<void*>(m_commitTop), sizeToDecommit);
76 addToCommittedByteCount(-sizeToDecommit);
77 m_reservation.deallocate();
78 }
79
80 bool JSStack::growSlowCase(Register* newTopOfStack)
81 {
82 Register* newTopOfStackWithReservedZone = newTopOfStack - m_reservedZoneSizeInRegisters;
83
84 // If we have already committed enough memory to satisfy this request,
85 // just update the end pointer and return.
86 if (newTopOfStackWithReservedZone >= m_commitTop) {
87 setStackLimit(newTopOfStack);
88 return true;
89 }
90
91 // Compute the chunk size of additional memory to commit, and see if we
92 // have it is still within our budget. If not, we'll fail to grow and
93 // return false.
94 ptrdiff_t delta = reinterpret_cast<char*>(m_commitTop) - reinterpret_cast<char*>(newTopOfStackWithReservedZone);
95 delta = WTF::roundUpToMultipleOf(commitSize, delta);
96 Register* newCommitTop = m_commitTop - (delta / sizeof(Register));
97 if (newCommitTop < reservationTop())
98 return false;
99
100 // Otherwise, the growth is still within our budget. Go ahead and commit
101 // it and return true.
102 m_reservation.commit(newCommitTop, delta);
103 addToCommittedByteCount(delta);
104 m_commitTop = newCommitTop;
105 setStackLimit(newTopOfStack);
106 return true;
107 }
108
109 void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots)
110 {
111 conservativeRoots.add(topOfStack() + 1, highAddress());
112 }
113
114 void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks)
115 {
116 conservativeRoots.add(topOfStack() + 1, highAddress(), jitStubRoutines, codeBlocks);
117 }
118
119 void JSStack::sanitizeStack()
120 {
121 #if !defined(ADDRESS_SANITIZER)
122 ASSERT(topOfStack() <= baseOfStack());
123
124 if (m_lastStackTop < topOfStack()) {
125 char* begin = reinterpret_cast<char*>(m_lastStackTop + 1);
126 char* end = reinterpret_cast<char*>(topOfStack() + 1);
127 memset(begin, 0, end - begin);
128 }
129
130 m_lastStackTop = topOfStack();
131 #endif
132 }
133
134 void JSStack::releaseExcessCapacity()
135 {
136 Register* highAddressWithReservedZone = highAddress() - m_reservedZoneSizeInRegisters;
137 ptrdiff_t delta = reinterpret_cast<char*>(highAddressWithReservedZone) - reinterpret_cast<char*>(m_commitTop);
138 m_reservation.decommit(m_commitTop, delta);
139 addToCommittedByteCount(-delta);
140 m_commitTop = highAddressWithReservedZone;
141 }
142
143 void JSStack::initializeThreading()
144 {
145 stackStatisticsMutex();
146 }
147
148 void JSStack::addToCommittedByteCount(long byteCount)
149 {
150 MutexLocker locker(stackStatisticsMutex());
151 ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1);
152 committedBytesCount += byteCount;
153 }
154
155 void JSStack::setReservedZoneSize(size_t reservedZoneSize)
156 {
157 m_reservedZoneSizeInRegisters = reservedZoneSize / sizeof(Register);
158 if (m_commitTop >= (m_end + 1) - m_reservedZoneSizeInRegisters)
159 growSlowCase(m_end + 1);
160 }
161 #endif // !ENABLE(JIT)
162
163 #if ENABLE(JIT)
164 Register* JSStack::lowAddress() const
165 {
166 ASSERT(wtfThreadData().stack().isGrowingDownward());
167 return reinterpret_cast<Register*>(m_vm.stackLimit());
168 }
169
170 Register* JSStack::highAddress() const
171 {
172 ASSERT(wtfThreadData().stack().isGrowingDownward());
173 return reinterpret_cast<Register*>(wtfThreadData().stack().origin());
174 }
175 #endif // ENABLE(JIT)
176
177 size_t JSStack::committedByteCount()
178 {
179 #if !ENABLE(JIT)
180 MutexLocker locker(stackStatisticsMutex());
181 return committedBytesCount;
182 #else
183 // When using the C stack, we don't know how many stack pages are actually
184 // committed. So, we use the current stack usage as an estimate.
185 ASSERT(wtfThreadData().stack().isGrowingDownward());
186 int8_t* current = reinterpret_cast<int8_t*>(&current);
187 int8_t* high = reinterpret_cast<int8_t*>(wtfThreadData().stack().origin());
188 return high - current;
189 #endif
190 }
191
192 } // namespace JSC