2 * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef DFGPreciseLocalClobberize_h
27 #define DFGPreciseLocalClobberize_h
31 #include "DFGClobberize.h"
32 #include "DFGMayExit.h"
34 namespace JSC
{ namespace DFG
{
36 template<typename ReadFunctor
, typename WriteFunctor
, typename DefFunctor
>
37 class PreciseLocalClobberizeAdaptor
{
39 PreciseLocalClobberizeAdaptor(
40 Graph
& graph
, Node
* node
,
41 const ReadFunctor
& read
, const WriteFunctor
& write
, const DefFunctor
& def
)
50 void read(AbstractHeap heap
)
52 if (heap
.kind() == Stack
) {
53 if (heap
.payload().isTop()) {
58 callIfAppropriate(m_read
, VirtualRegister(heap
.payload().value32()));
62 if (heap
.overlaps(Stack
)) {
68 void write(AbstractHeap heap
)
70 // We expect stack writes to already be precisely characterized by DFG::clobberize().
71 if (heap
.kind() == Stack
) {
72 RELEASE_ASSERT(!heap
.payload().isTop());
73 callIfAppropriate(m_write
, VirtualRegister(heap
.payload().value32()));
77 RELEASE_ASSERT(!heap
.overlaps(Stack
));
82 // PureValue defs never have anything to do with locals, so ignore this.
85 void def(HeapLocation location
, LazyNode node
)
87 if (location
.kind() != StackLoc
)
90 RELEASE_ASSERT(location
.heap().kind() == Stack
);
92 m_def(VirtualRegister(location
.heap().payload().value32()), node
);
96 template<typename Functor
>
97 void callIfAppropriate(const Functor
& functor
, VirtualRegister operand
)
99 if (operand
.isLocal() && static_cast<unsigned>(operand
.toLocal()) >= m_graph
.block(0)->variablesAtHead
.numberOfLocals())
102 if (operand
.isArgument() && !operand
.isHeader() && static_cast<unsigned>(operand
.toArgument()) >= m_graph
.block(0)->variablesAtHead
.numberOfArguments())
110 switch (m_node
->op()) {
111 case GetMyArgumentByVal
:
113 case CallForwardVarargs
:
114 case ConstructForwardVarargs
: {
115 InlineCallFrame
* inlineCallFrame
= m_node
->child1()->origin
.semantic
.inlineCallFrame
;
116 if (!inlineCallFrame
) {
117 // Read the outermost arguments and argument count.
118 for (unsigned i
= m_graph
.m_codeBlock
->numParameters(); i
-- > 1;)
119 m_read(virtualRegisterForArgument(i
));
120 m_read(VirtualRegister(JSStack::ArgumentCount
));
124 for (unsigned i
= inlineCallFrame
->arguments
.size(); i
-- > 1;)
125 m_read(VirtualRegister(inlineCallFrame
->stackOffset
+ virtualRegisterForArgument(i
).offset()));
126 if (inlineCallFrame
->isVarargs())
127 m_read(VirtualRegister(inlineCallFrame
->stackOffset
+ JSStack::ArgumentCount
));
132 // All of the outermost arguments, except this, are definitely read.
133 for (unsigned i
= m_graph
.m_codeBlock
->numParameters(); i
-- > 1;)
134 m_read(virtualRegisterForArgument(i
));
136 // The stack header is read.
137 for (unsigned i
= 0; i
< JSStack::ThisArgument
; ++i
)
138 m_read(VirtualRegister(i
));
140 // Read all of the inline arguments and call frame headers that we didn't already capture.
141 for (InlineCallFrame
* inlineCallFrame
= m_node
->origin
.semantic
.inlineCallFrame
; inlineCallFrame
; inlineCallFrame
= inlineCallFrame
->caller
.inlineCallFrame
) {
142 for (unsigned i
= inlineCallFrame
->arguments
.size(); i
-- > 1;)
143 m_read(VirtualRegister(inlineCallFrame
->stackOffset
+ virtualRegisterForArgument(i
).offset()));
144 if (inlineCallFrame
->isClosureCall
)
145 m_read(VirtualRegister(inlineCallFrame
->stackOffset
+ JSStack::Callee
));
146 if (inlineCallFrame
->isVarargs())
147 m_read(VirtualRegister(inlineCallFrame
->stackOffset
+ JSStack::ArgumentCount
));
155 const ReadFunctor
& m_read
;
156 const WriteFunctor
& m_write
;
157 const DefFunctor
& m_def
;
160 template<typename ReadFunctor
, typename WriteFunctor
, typename DefFunctor
>
161 void preciseLocalClobberize(
162 Graph
& graph
, Node
* node
,
163 const ReadFunctor
& read
, const WriteFunctor
& write
, const DefFunctor
& def
)
165 PreciseLocalClobberizeAdaptor
<ReadFunctor
, WriteFunctor
, DefFunctor
>
166 adaptor(graph
, node
, read
, write
, def
);
167 clobberize(graph
, node
, adaptor
);
170 } } // namespace JSC::DFG
172 #endif // ENABLE(DFG_JIT)
174 #endif // DFGPreciseLocalClobberize_h