2 * Copyright (C) 2008, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of Apple Inc. ("Apple") nor the names of
14 * its contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
30 #include "JSStackInlines.h"
32 #include "ConservativeRoots.h"
33 #include "Interpreter.h"
34 #include "JSCInlines.h"
40 static size_t committedBytesCount
= 0;
42 static Mutex
& stackStatisticsMutex()
44 DEPRECATED_DEFINE_STATIC_LOCAL(Mutex
, staticMutex
, ());
47 #endif // !ENABLE(JIT)
49 JSStack::JSStack(VM
& vm
)
51 , m_topCallFrame(vm
.topCallFrame
)
54 , m_reservedZoneSizeInRegisters(0)
58 size_t capacity
= Options::maxPerThreadStackUsage();
59 ASSERT(capacity
&& isPageAligned(capacity
));
61 m_reservation
= PageReservation::reserve(WTF::roundUpToMultipleOf(commitSize
, capacity
), OSAllocator::JSVMStackPages
);
62 setStackLimit(highAddress());
63 m_commitTop
= highAddress();
65 m_lastStackTop
= baseOfStack();
66 #endif // !ENABLE(JIT)
74 ptrdiff_t sizeToDecommit
= reinterpret_cast<char*>(highAddress()) - reinterpret_cast<char*>(m_commitTop
);
75 m_reservation
.decommit(reinterpret_cast<void*>(m_commitTop
), sizeToDecommit
);
76 addToCommittedByteCount(-sizeToDecommit
);
77 m_reservation
.deallocate();
80 bool JSStack::growSlowCase(Register
* newTopOfStack
)
82 Register
* newTopOfStackWithReservedZone
= newTopOfStack
- m_reservedZoneSizeInRegisters
;
84 // If we have already committed enough memory to satisfy this request,
85 // just update the end pointer and return.
86 if (newTopOfStackWithReservedZone
>= m_commitTop
) {
87 setStackLimit(newTopOfStack
);
91 // Compute the chunk size of additional memory to commit, and see if we
92 // have it is still within our budget. If not, we'll fail to grow and
94 ptrdiff_t delta
= reinterpret_cast<char*>(m_commitTop
) - reinterpret_cast<char*>(newTopOfStackWithReservedZone
);
95 delta
= WTF::roundUpToMultipleOf(commitSize
, delta
);
96 Register
* newCommitTop
= m_commitTop
- (delta
/ sizeof(Register
));
97 if (newCommitTop
< reservationTop())
100 // Otherwise, the growth is still within our budget. Commit it and return true.
101 m_reservation
.commit(newCommitTop
, delta
);
102 addToCommittedByteCount(delta
);
103 m_commitTop
= newCommitTop
;
104 setStackLimit(newTopOfStack
);
108 void JSStack::gatherConservativeRoots(ConservativeRoots
& conservativeRoots
)
110 conservativeRoots
.add(topOfStack() + 1, highAddress());
113 void JSStack::gatherConservativeRoots(ConservativeRoots
& conservativeRoots
, JITStubRoutineSet
& jitStubRoutines
, CodeBlockSet
& codeBlocks
)
115 conservativeRoots
.add(topOfStack() + 1, highAddress(), jitStubRoutines
, codeBlocks
);
118 void JSStack::sanitizeStack()
121 ASSERT(topOfStack() <= baseOfStack());
123 if (m_lastStackTop
< topOfStack()) {
124 char* begin
= reinterpret_cast<char*>(m_lastStackTop
+ 1);
125 char* end
= reinterpret_cast<char*>(topOfStack() + 1);
126 memset(begin
, 0, end
- begin
);
129 m_lastStackTop
= topOfStack();
133 void JSStack::releaseExcessCapacity()
135 Register
* highAddressWithReservedZone
= highAddress() - m_reservedZoneSizeInRegisters
;
136 ptrdiff_t delta
= reinterpret_cast<char*>(highAddressWithReservedZone
) - reinterpret_cast<char*>(m_commitTop
);
137 m_reservation
.decommit(m_commitTop
, delta
);
138 addToCommittedByteCount(-delta
);
139 m_commitTop
= highAddressWithReservedZone
;
142 void JSStack::initializeThreading()
144 stackStatisticsMutex();
147 void JSStack::addToCommittedByteCount(long byteCount
)
149 MutexLocker
locker(stackStatisticsMutex());
150 ASSERT(static_cast<long>(committedBytesCount
) + byteCount
> -1);
151 committedBytesCount
+= byteCount
;
154 void JSStack::setReservedZoneSize(size_t reservedZoneSize
)
156 m_reservedZoneSizeInRegisters
= reservedZoneSize
/ sizeof(Register
);
157 if (m_commitTop
>= (m_end
+ 1) - m_reservedZoneSizeInRegisters
)
158 growSlowCase(m_end
+ 1);
160 #endif // !ENABLE(JIT)
163 Register
* JSStack::lowAddress() const
165 ASSERT(wtfThreadData().stack().isGrowingDownward());
166 return reinterpret_cast<Register
*>(m_vm
.stackLimit());
169 Register
* JSStack::highAddress() const
171 ASSERT(wtfThreadData().stack().isGrowingDownward());
172 return reinterpret_cast<Register
*>(wtfThreadData().stack().origin());
174 #endif // ENABLE(JIT)
176 size_t JSStack::committedByteCount()
179 MutexLocker
locker(stackStatisticsMutex());
180 return committedBytesCount
;
182 // When using the C stack, we don't know how many stack pages are actually
183 // committed. So, we use the current stack usage as an estimate.
184 ASSERT(wtfThreadData().stack().isGrowingDownward());
185 int8_t* current
= reinterpret_cast<int8_t*>(¤t
);
186 int8_t* high
= reinterpret_cast<int8_t*>(wtfThreadData().stack().origin());
187 return high
- current
;