]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008, 2013, 2014 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. Neither the name of Apple Inc. ("Apple") nor the names of | |
14 | * its contributors may be used to endorse or promote products derived | |
15 | * from this software without specific prior written permission. | |
16 | * | |
17 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY | |
18 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
19 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
20 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY | |
21 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
22 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
23 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
24 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | */ | |
28 | ||
29 | #include "config.h" | |
30 | #include "JSStackInlines.h" | |
31 | ||
32 | #include "ConservativeRoots.h" | |
33 | #include "Interpreter.h" | |
34 | #include "JSCInlines.h" | |
35 | #include "Options.h" | |
36 | ||
37 | namespace JSC { | |
38 | ||
39 | #if !ENABLE(JIT) | |
40 | static size_t committedBytesCount = 0; | |
41 | ||
42 | static Mutex& stackStatisticsMutex() | |
43 | { | |
44 | DEPRECATED_DEFINE_STATIC_LOCAL(Mutex, staticMutex, ()); | |
45 | return staticMutex; | |
46 | } | |
47 | #endif // !ENABLE(JIT) | |
48 | ||
49 | JSStack::JSStack(VM& vm) | |
50 | : m_vm(vm) | |
51 | , m_topCallFrame(vm.topCallFrame) | |
52 | #if !ENABLE(JIT) | |
53 | , m_end(0) | |
54 | , m_reservedZoneSizeInRegisters(0) | |
55 | #endif | |
56 | { | |
57 | #if !ENABLE(JIT) | |
58 | size_t capacity = Options::maxPerThreadStackUsage(); | |
59 | ASSERT(capacity && isPageAligned(capacity)); | |
60 | ||
61 | m_reservation = PageReservation::reserve(WTF::roundUpToMultipleOf(commitSize, capacity), OSAllocator::JSVMStackPages); | |
62 | setStackLimit(highAddress()); | |
63 | m_commitTop = highAddress(); | |
64 | ||
65 | m_lastStackTop = baseOfStack(); | |
66 | #endif // !ENABLE(JIT) | |
67 | ||
68 | m_topCallFrame = 0; | |
69 | } | |
70 | ||
71 | #if !ENABLE(JIT) | |
72 | JSStack::~JSStack() | |
73 | { | |
74 | ptrdiff_t sizeToDecommit = reinterpret_cast<char*>(highAddress()) - reinterpret_cast<char*>(m_commitTop); | |
75 | m_reservation.decommit(reinterpret_cast<void*>(m_commitTop), sizeToDecommit); | |
76 | addToCommittedByteCount(-sizeToDecommit); | |
77 | m_reservation.deallocate(); | |
78 | } | |
79 | ||
80 | bool JSStack::growSlowCase(Register* newTopOfStack) | |
81 | { | |
82 | Register* newTopOfStackWithReservedZone = newTopOfStack - m_reservedZoneSizeInRegisters; | |
83 | ||
84 | // If we have already committed enough memory to satisfy this request, | |
85 | // just update the end pointer and return. | |
86 | if (newTopOfStackWithReservedZone >= m_commitTop) { | |
87 | setStackLimit(newTopOfStack); | |
88 | return true; | |
89 | } | |
90 | ||
91 | // Compute the chunk size of additional memory to commit, and see if we | |
92 | // have it is still within our budget. If not, we'll fail to grow and | |
93 | // return false. | |
94 | ptrdiff_t delta = reinterpret_cast<char*>(m_commitTop) - reinterpret_cast<char*>(newTopOfStackWithReservedZone); | |
95 | delta = WTF::roundUpToMultipleOf(commitSize, delta); | |
96 | Register* newCommitTop = m_commitTop - (delta / sizeof(Register)); | |
97 | if (newCommitTop < reservationTop()) | |
98 | return false; | |
99 | ||
100 | // Otherwise, the growth is still within our budget. Commit it and return true. | |
101 | m_reservation.commit(newCommitTop, delta); | |
102 | addToCommittedByteCount(delta); | |
103 | m_commitTop = newCommitTop; | |
104 | setStackLimit(newTopOfStack); | |
105 | return true; | |
106 | } | |
107 | ||
108 | void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots) | |
109 | { | |
110 | conservativeRoots.add(topOfStack() + 1, highAddress()); | |
111 | } | |
112 | ||
113 | void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, CodeBlockSet& codeBlocks) | |
114 | { | |
115 | conservativeRoots.add(topOfStack() + 1, highAddress(), jitStubRoutines, codeBlocks); | |
116 | } | |
117 | ||
118 | void JSStack::sanitizeStack() | |
119 | { | |
120 | #if !ASAN_ENABLED | |
121 | ASSERT(topOfStack() <= baseOfStack()); | |
122 | ||
123 | if (m_lastStackTop < topOfStack()) { | |
124 | char* begin = reinterpret_cast<char*>(m_lastStackTop + 1); | |
125 | char* end = reinterpret_cast<char*>(topOfStack() + 1); | |
126 | memset(begin, 0, end - begin); | |
127 | } | |
128 | ||
129 | m_lastStackTop = topOfStack(); | |
130 | #endif | |
131 | } | |
132 | ||
133 | void JSStack::releaseExcessCapacity() | |
134 | { | |
135 | Register* highAddressWithReservedZone = highAddress() - m_reservedZoneSizeInRegisters; | |
136 | ptrdiff_t delta = reinterpret_cast<char*>(highAddressWithReservedZone) - reinterpret_cast<char*>(m_commitTop); | |
137 | m_reservation.decommit(m_commitTop, delta); | |
138 | addToCommittedByteCount(-delta); | |
139 | m_commitTop = highAddressWithReservedZone; | |
140 | } | |
141 | ||
142 | void JSStack::initializeThreading() | |
143 | { | |
144 | stackStatisticsMutex(); | |
145 | } | |
146 | ||
147 | void JSStack::addToCommittedByteCount(long byteCount) | |
148 | { | |
149 | MutexLocker locker(stackStatisticsMutex()); | |
150 | ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1); | |
151 | committedBytesCount += byteCount; | |
152 | } | |
153 | ||
154 | void JSStack::setReservedZoneSize(size_t reservedZoneSize) | |
155 | { | |
156 | m_reservedZoneSizeInRegisters = reservedZoneSize / sizeof(Register); | |
157 | if (m_commitTop >= (m_end + 1) - m_reservedZoneSizeInRegisters) | |
158 | growSlowCase(m_end + 1); | |
159 | } | |
160 | #endif // !ENABLE(JIT) | |
161 | ||
162 | #if ENABLE(JIT) | |
163 | Register* JSStack::lowAddress() const | |
164 | { | |
165 | ASSERT(wtfThreadData().stack().isGrowingDownward()); | |
166 | return reinterpret_cast<Register*>(m_vm.stackLimit()); | |
167 | } | |
168 | ||
169 | Register* JSStack::highAddress() const | |
170 | { | |
171 | ASSERT(wtfThreadData().stack().isGrowingDownward()); | |
172 | return reinterpret_cast<Register*>(wtfThreadData().stack().origin()); | |
173 | } | |
174 | #endif // ENABLE(JIT) | |
175 | ||
176 | size_t JSStack::committedByteCount() | |
177 | { | |
178 | #if !ENABLE(JIT) | |
179 | MutexLocker locker(stackStatisticsMutex()); | |
180 | return committedBytesCount; | |
181 | #else | |
182 | // When using the C stack, we don't know how many stack pages are actually | |
183 | // committed. So, we use the current stack usage as an estimate. | |
184 | ASSERT(wtfThreadData().stack().isGrowingDownward()); | |
185 | int8_t* current = reinterpret_cast<int8_t*>(¤t); | |
186 | int8_t* high = reinterpret_cast<int8_t*>(wtfThreadData().stack().origin()); | |
187 | return high - current; | |
188 | #endif | |
189 | } | |
190 | ||
191 | } // namespace JSC |