]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * | |
8 | * 1. Redistributions of source code must retain the above copyright | |
9 | * notice, this list of conditions and the following disclaimer. | |
10 | * 2. Redistributions in binary form must reproduce the above copyright | |
11 | * notice, this list of conditions and the following disclaimer in the | |
12 | * documentation and/or other materials provided with the distribution. | |
13 | * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of | |
14 | * its contributors may be used to endorse or promote products derived | |
15 | * from this software without specific prior written permission. | |
16 | * | |
17 | * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY | |
18 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
19 | * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
20 | * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY | |
21 | * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
22 | * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
23 | * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
24 | * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
26 | * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | */ | |
28 | ||
29 | #include "config.h" | |
30 | #include "JSStack.h" | |
31 | #include "JSStackInlines.h" | |
32 | ||
33 | #include "ConservativeRoots.h" | |
34 | #include "Interpreter.h" | |
35 | ||
36 | namespace JSC { | |
37 | ||
38 | static size_t committedBytesCount = 0; | |
39 | ||
40 | static Mutex& stackStatisticsMutex() | |
41 | { | |
42 | DEFINE_STATIC_LOCAL(Mutex, staticMutex, ()); | |
43 | return staticMutex; | |
44 | } | |
45 | ||
46 | JSStack::JSStack(VM& vm, size_t capacity) | |
47 | : m_end(0) | |
48 | , m_topCallFrame(vm.topCallFrame) | |
49 | { | |
50 | ASSERT(capacity && isPageAligned(capacity)); | |
51 | ||
52 | m_reservation = PageReservation::reserve(roundUpAllocationSize(capacity * sizeof(Register), commitSize), OSAllocator::JSVMStackPages); | |
53 | m_end = static_cast<Register*>(m_reservation.base()); | |
54 | m_commitEnd = static_cast<Register*>(m_reservation.base()); | |
55 | ||
56 | disableErrorStackReserve(); | |
57 | ||
58 | m_topCallFrame = 0; | |
59 | } | |
60 | ||
61 | JSStack::~JSStack() | |
62 | { | |
63 | void* base = m_reservation.base(); | |
64 | m_reservation.decommit(base, reinterpret_cast<intptr_t>(m_commitEnd) - reinterpret_cast<intptr_t>(base)); | |
65 | addToCommittedByteCount(-(reinterpret_cast<intptr_t>(m_commitEnd) - reinterpret_cast<intptr_t>(base))); | |
66 | m_reservation.deallocate(); | |
67 | } | |
68 | ||
69 | bool JSStack::growSlowCase(Register* newEnd) | |
70 | { | |
71 | // If we have already committed enough memory to satisfy this request, | |
72 | // just update the end pointer and return. | |
73 | if (newEnd <= m_commitEnd) { | |
74 | m_end = newEnd; | |
75 | return true; | |
76 | } | |
77 | ||
78 | // Compute the chunk size of additional memory to commit, and see if we | |
79 | // have it is still within our budget. If not, we'll fail to grow and | |
80 | // return false. | |
81 | long delta = roundUpAllocationSize(reinterpret_cast<char*>(newEnd) - reinterpret_cast<char*>(m_commitEnd), commitSize); | |
82 | if (reinterpret_cast<char*>(m_commitEnd) + delta > reinterpret_cast<char*>(m_useableEnd)) | |
83 | return false; | |
84 | ||
85 | // Otherwise, the growth is still within our budget. Go ahead and commit | |
86 | // it and return true. | |
87 | m_reservation.commit(m_commitEnd, delta); | |
88 | addToCommittedByteCount(delta); | |
89 | m_commitEnd = reinterpret_cast_ptr<Register*>(reinterpret_cast<char*>(m_commitEnd) + delta); | |
90 | m_end = newEnd; | |
91 | return true; | |
92 | } | |
93 | ||
94 | void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots) | |
95 | { | |
96 | conservativeRoots.add(begin(), getTopOfStack()); | |
97 | } | |
98 | ||
99 | void JSStack::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, DFGCodeBlocks& dfgCodeBlocks) | |
100 | { | |
101 | conservativeRoots.add(begin(), getTopOfStack(), jitStubRoutines, dfgCodeBlocks); | |
102 | } | |
103 | ||
104 | void JSStack::releaseExcessCapacity() | |
105 | { | |
106 | ptrdiff_t delta = reinterpret_cast<uintptr_t>(m_commitEnd) - reinterpret_cast<uintptr_t>(m_reservation.base()); | |
107 | m_reservation.decommit(m_reservation.base(), delta); | |
108 | addToCommittedByteCount(-delta); | |
109 | m_commitEnd = static_cast<Register*>(m_reservation.base()); | |
110 | } | |
111 | ||
112 | void JSStack::initializeThreading() | |
113 | { | |
114 | stackStatisticsMutex(); | |
115 | } | |
116 | ||
117 | size_t JSStack::committedByteCount() | |
118 | { | |
119 | MutexLocker locker(stackStatisticsMutex()); | |
120 | return committedBytesCount; | |
121 | } | |
122 | ||
123 | void JSStack::addToCommittedByteCount(long byteCount) | |
124 | { | |
125 | MutexLocker locker(stackStatisticsMutex()); | |
126 | ASSERT(static_cast<long>(committedBytesCount) + byteCount > -1); | |
127 | committedBytesCount += byteCount; | |
128 | } | |
129 | ||
130 | void JSStack::enableErrorStackReserve() | |
131 | { | |
132 | m_useableEnd = reservationEnd(); | |
133 | } | |
134 | ||
135 | void JSStack::disableErrorStackReserve() | |
136 | { | |
137 | char* useableEnd = reinterpret_cast<char*>(reservationEnd()) - commitSize; | |
138 | m_useableEnd = reinterpret_cast_ptr<Register*>(useableEnd); | |
139 | ||
140 | // By the time we get here, we are guaranteed to be destructing the last | |
141 | // Interpreter::ErrorHandlingMode that enabled this reserve in the first | |
142 | // place. That means the stack space beyond m_useableEnd before we | |
143 | // enabled the reserve was not previously in use. Hence, it is safe to | |
144 | // shrink back to that m_useableEnd. | |
145 | if (m_end > m_useableEnd) { | |
146 | ASSERT(m_topCallFrame->frameExtent() <= m_useableEnd); | |
147 | shrink(m_useableEnd); | |
148 | } | |
149 | } | |
150 | ||
151 | } // namespace JSC |