]>
git.saurik.com Git - apple/javascriptcore.git/blob - interpreter/JSStack.cpp
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
14 * its contributors may be used to endorse or promote products derived
15 * from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
18 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
19 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
20 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
21 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
22 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
23 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "JSStackInlines.h"
33 #include "ConservativeRoots.h"
34 #include "Interpreter.h"
38 static size_t committedBytesCount
= 0;
40 static Mutex
& stackStatisticsMutex()
42 DEFINE_STATIC_LOCAL(Mutex
, staticMutex
, ());
46 JSStack::JSStack(VM
& vm
, size_t capacity
)
48 , m_topCallFrame(vm
.topCallFrame
)
50 ASSERT(capacity
&& isPageAligned(capacity
));
52 m_reservation
= PageReservation::reserve(roundUpAllocationSize(capacity
* sizeof(Register
), commitSize
), OSAllocator::JSVMStackPages
);
53 m_end
= static_cast<Register
*>(m_reservation
.base());
54 m_commitEnd
= static_cast<Register
*>(m_reservation
.base());
56 disableErrorStackReserve();
63 void* base
= m_reservation
.base();
64 m_reservation
.decommit(base
, reinterpret_cast<intptr_t>(m_commitEnd
) - reinterpret_cast<intptr_t>(base
));
65 addToCommittedByteCount(-(reinterpret_cast<intptr_t>(m_commitEnd
) - reinterpret_cast<intptr_t>(base
)));
66 m_reservation
.deallocate();
69 bool JSStack::growSlowCase(Register
* newEnd
)
71 // If we have already committed enough memory to satisfy this request,
72 // just update the end pointer and return.
73 if (newEnd
<= m_commitEnd
) {
78 // Compute the chunk size of additional memory to commit, and see if we
79 // have it is still within our budget. If not, we'll fail to grow and
81 long delta
= roundUpAllocationSize(reinterpret_cast<char*>(newEnd
) - reinterpret_cast<char*>(m_commitEnd
), commitSize
);
82 if (reinterpret_cast<char*>(m_commitEnd
) + delta
> reinterpret_cast<char*>(m_useableEnd
))
85 // Otherwise, the growth is still within our budget. Go ahead and commit
86 // it and return true.
87 m_reservation
.commit(m_commitEnd
, delta
);
88 addToCommittedByteCount(delta
);
89 m_commitEnd
= reinterpret_cast_ptr
<Register
*>(reinterpret_cast<char*>(m_commitEnd
) + delta
);
94 void JSStack::gatherConservativeRoots(ConservativeRoots
& conservativeRoots
)
96 conservativeRoots
.add(begin(), getTopOfStack());
99 void JSStack::gatherConservativeRoots(ConservativeRoots
& conservativeRoots
, JITStubRoutineSet
& jitStubRoutines
, DFGCodeBlocks
& dfgCodeBlocks
)
101 conservativeRoots
.add(begin(), getTopOfStack(), jitStubRoutines
, dfgCodeBlocks
);
104 void JSStack::releaseExcessCapacity()
106 ptrdiff_t delta
= reinterpret_cast<uintptr_t>(m_commitEnd
) - reinterpret_cast<uintptr_t>(m_reservation
.base());
107 m_reservation
.decommit(m_reservation
.base(), delta
);
108 addToCommittedByteCount(-delta
);
109 m_commitEnd
= static_cast<Register
*>(m_reservation
.base());
112 void JSStack::initializeThreading()
114 stackStatisticsMutex();
117 size_t JSStack::committedByteCount()
119 MutexLocker
locker(stackStatisticsMutex());
120 return committedBytesCount
;
123 void JSStack::addToCommittedByteCount(long byteCount
)
125 MutexLocker
locker(stackStatisticsMutex());
126 ASSERT(static_cast<long>(committedBytesCount
) + byteCount
> -1);
127 committedBytesCount
+= byteCount
;
130 void JSStack::enableErrorStackReserve()
132 m_useableEnd
= reservationEnd();
135 void JSStack::disableErrorStackReserve()
137 char* useableEnd
= reinterpret_cast<char*>(reservationEnd()) - commitSize
;
138 m_useableEnd
= reinterpret_cast_ptr
<Register
*>(useableEnd
);
140 // By the time we get here, we are guaranteed to be destructing the last
141 // Interpreter::ErrorHandlingMode that enabled this reserve in the first
142 // place. That means the stack space beyond m_useableEnd before we
143 // enabled the reserve was not previously in use. Hence, it is safe to
144 // shrink back to that m_useableEnd.
145 if (m_end
> m_useableEnd
) {
146 ASSERT(m_topCallFrame
->frameExtent() <= m_useableEnd
);
147 shrink(m_useableEnd
);