2 * Copyright (C) 2011, 2012 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "CallLinkStatus.h"
32 #include "CodeBlock.h"
33 #include "DFGByteCodeCache.h"
34 #include "DFGCapabilities.h"
35 #include "GetByIdStatus.h"
36 #include "MethodCallLinkStatus.h"
37 #include "PutByIdStatus.h"
38 #include <wtf/HashMap.h>
39 #include <wtf/MathExtras.h>
41 namespace JSC
{ namespace DFG
{
43 // === ByteCodeParser ===
45 // This class is used to compile the dataflow graph from a CodeBlock.
46 class ByteCodeParser
{
48 ByteCodeParser(Graph
& graph
)
49 : m_globalData(&graph
.m_globalData
)
50 , m_codeBlock(graph
.m_codeBlock
)
51 , m_profiledBlock(graph
.m_profiledBlock
)
55 , m_currentProfilingIndex(0)
56 , m_constantUndefined(UINT_MAX
)
57 , m_constantNull(UINT_MAX
)
58 , m_constantNaN(UINT_MAX
)
59 , m_constant1(UINT_MAX
)
60 , m_constants(m_codeBlock
->numberOfConstantRegisters())
61 , m_numArguments(m_codeBlock
->numParameters())
62 , m_numLocals(m_codeBlock
->m_numCalleeRegisters
)
63 , m_preservedVars(m_codeBlock
->m_numVars
)
65 , m_numPassedVarArgs(0)
66 , m_globalResolveNumber(0)
68 , m_haveBuiltOperandMaps(false)
69 , m_emptyJSValueIndex(UINT_MAX
)
71 ASSERT(m_profiledBlock
);
73 for (int i
= 0; i
< m_codeBlock
->m_numVars
; ++i
)
74 m_preservedVars
.set(i
);
77 // Parse a full CodeBlock of bytecode.
81 // Just parse from m_currentIndex to the end of the current CodeBlock.
82 void parseCodeBlock();
84 // Helper for min and max.
85 bool handleMinMax(bool usesResult
, int resultOperand
, NodeType op
, int registerOffset
, int argumentCountIncludingThis
);
87 // Handle calls. This resolves issues surrounding inlining and intrinsics.
88 void handleCall(Interpreter
*, Instruction
* currentInstruction
, NodeType op
, CodeSpecializationKind
);
89 void emitFunctionCheck(JSFunction
* expectedFunction
, NodeIndex callTarget
, int registerOffset
, CodeSpecializationKind
);
90 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
91 bool handleInlining(bool usesResult
, int callTarget
, NodeIndex callTargetNodeIndex
, int resultOperand
, bool certainAboutExpectedFunction
, JSFunction
*, int registerOffset
, int argumentCountIncludingThis
, unsigned nextOffset
, CodeSpecializationKind
);
92 // Handle setting the result of an intrinsic.
93 void setIntrinsicResult(bool usesResult
, int resultOperand
, NodeIndex
);
94 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
95 bool handleIntrinsic(bool usesResult
, int resultOperand
, Intrinsic
, int registerOffset
, int argumentCountIncludingThis
, PredictedType prediction
);
96 // Prepare to parse a block.
97 void prepareToParseBlock();
98 // Parse a single basic block of bytecode instructions.
99 bool parseBlock(unsigned limit
);
100 // Find reachable code and setup predecessor links in the graph's BasicBlocks.
101 void determineReachability();
102 // Enqueue a block onto the worklist, if necessary.
103 void handleSuccessor(Vector
<BlockIndex
, 16>& worklist
, BlockIndex
, BlockIndex successor
);
104 // Link block successors.
105 void linkBlock(BasicBlock
*, Vector
<BlockIndex
>& possibleTargets
);
106 void linkBlocks(Vector
<UnlinkedBlock
>& unlinkedBlocks
, Vector
<BlockIndex
>& possibleTargets
);
107 // Link GetLocal & SetLocal nodes, to ensure live values are generated.
112 template<PhiStackType stackType
>
113 void processPhiStack();
115 void fixVariableAccessPredictions();
116 // Add spill locations to nodes.
117 void allocateVirtualRegisters();
119 VariableAccessData
* newVariableAccessData(int operand
)
121 ASSERT(operand
< FirstConstantRegisterIndex
);
123 m_graph
.m_variableAccessData
.append(VariableAccessData(static_cast<VirtualRegister
>(operand
)));
124 return &m_graph
.m_variableAccessData
.last();
127 // Get/Set the operands/result of a bytecode instruction.
128 NodeIndex
getDirect(int operand
)
130 // Is this a constant?
131 if (operand
>= FirstConstantRegisterIndex
) {
132 unsigned constant
= operand
- FirstConstantRegisterIndex
;
133 ASSERT(constant
< m_constants
.size());
134 return getJSConstant(constant
);
137 // Is this an argument?
138 if (operandIsArgument(operand
))
139 return getArgument(operand
);
142 return getLocal((unsigned)operand
);
144 NodeIndex
get(int operand
)
146 return getDirect(m_inlineStackTop
->remapOperand(operand
));
148 void setDirect(int operand
, NodeIndex value
)
150 // Is this an argument?
151 if (operandIsArgument(operand
)) {
152 setArgument(operand
, value
);
157 setLocal((unsigned)operand
, value
);
159 void set(int operand
, NodeIndex value
)
161 setDirect(m_inlineStackTop
->remapOperand(operand
), value
);
164 NodeIndex
injectLazyOperandPrediction(NodeIndex nodeIndex
)
166 Node
& node
= m_graph
[nodeIndex
];
167 ASSERT(node
.op() == GetLocal
);
168 ASSERT(node
.codeOrigin
.bytecodeIndex
== m_currentIndex
);
169 PredictedType prediction
=
170 m_inlineStackTop
->m_lazyOperands
.prediction(
171 LazyOperandValueProfileKey(m_currentIndex
, node
.local()));
172 #if DFG_ENABLE(DEBUG_VERBOSE)
173 dataLog("Lazy operand [@%u, bc#%u, r%d] prediction: %s\n",
174 nodeIndex
, m_currentIndex
, node
.local(), predictionToString(prediction
));
176 node
.variableAccessData()->predict(prediction
);
180 // Used in implementing get/set, above, where the operand is a local variable.
181 NodeIndex
getLocal(unsigned operand
)
183 NodeIndex nodeIndex
= m_currentBlock
->variablesAtTail
.local(operand
);
185 if (nodeIndex
!= NoNode
) {
186 Node
* nodePtr
= &m_graph
[nodeIndex
];
187 if (nodePtr
->op() == Flush
) {
188 // Two possibilities: either the block wants the local to be live
189 // but has not loaded its value, or it has loaded its value, in
190 // which case we're done.
191 nodeIndex
= nodePtr
->child1().index();
192 Node
& flushChild
= m_graph
[nodeIndex
];
193 if (flushChild
.op() == Phi
) {
194 VariableAccessData
* variableAccessData
= flushChild
.variableAccessData();
195 nodeIndex
= injectLazyOperandPrediction(addToGraph(GetLocal
, OpInfo(variableAccessData
), nodeIndex
));
196 m_currentBlock
->variablesAtTail
.local(operand
) = nodeIndex
;
199 nodePtr
= &flushChild
;
202 ASSERT(&m_graph
[nodeIndex
] == nodePtr
);
203 ASSERT(nodePtr
->op() != Flush
);
205 if (m_graph
.localIsCaptured(operand
)) {
206 // We wish to use the same variable access data as the previous access,
207 // but for all other purposes we want to issue a load since for all we
208 // know, at this stage of compilation, the local has been clobbered.
210 // Make sure we link to the Phi node, not to the GetLocal.
211 if (nodePtr
->op() == GetLocal
)
212 nodeIndex
= nodePtr
->child1().index();
214 return injectLazyOperandPrediction(addToGraph(GetLocal
, OpInfo(nodePtr
->variableAccessData()), nodeIndex
));
217 if (nodePtr
->op() == GetLocal
)
219 ASSERT(nodePtr
->op() == SetLocal
);
220 return nodePtr
->child1().index();
223 // Check for reads of temporaries from prior blocks,
224 // expand m_preservedVars to cover these.
225 m_preservedVars
.set(operand
);
227 VariableAccessData
* variableAccessData
= newVariableAccessData(operand
);
229 NodeIndex phi
= addToGraph(Phi
, OpInfo(variableAccessData
));
230 m_localPhiStack
.append(PhiStackEntry(m_currentBlock
, phi
, operand
));
231 nodeIndex
= injectLazyOperandPrediction(addToGraph(GetLocal
, OpInfo(variableAccessData
), phi
));
232 m_currentBlock
->variablesAtTail
.local(operand
) = nodeIndex
;
234 m_currentBlock
->variablesAtHead
.setLocalFirstTime(operand
, nodeIndex
);
238 void setLocal(unsigned operand
, NodeIndex value
)
240 VariableAccessData
* variableAccessData
= newVariableAccessData(operand
);
241 NodeIndex nodeIndex
= addToGraph(SetLocal
, OpInfo(variableAccessData
), value
);
242 m_currentBlock
->variablesAtTail
.local(operand
) = nodeIndex
;
244 bool shouldFlush
= m_graph
.localIsCaptured(operand
);
247 // If this is in argument position, then it should be flushed.
248 for (InlineStackEntry
* stack
= m_inlineStackTop
; ; stack
= stack
->m_caller
) {
249 InlineCallFrame
* inlineCallFrame
= stack
->m_inlineCallFrame
;
250 if (!inlineCallFrame
)
252 if (static_cast<int>(operand
) >= inlineCallFrame
->stackOffset
- RegisterFile::CallFrameHeaderSize
)
254 if (static_cast<int>(operand
) == inlineCallFrame
->stackOffset
+ CallFrame::thisArgumentOffset())
256 if (operand
< inlineCallFrame
->stackOffset
- RegisterFile::CallFrameHeaderSize
- inlineCallFrame
->arguments
.size())
258 int argument
= operandToArgument(operand
- inlineCallFrame
->stackOffset
);
259 stack
->m_argumentPositions
[argument
]->addVariable(variableAccessData
);
266 addToGraph(Flush
, OpInfo(variableAccessData
), nodeIndex
);
269 // Used in implementing get/set, above, where the operand is an argument.
270 NodeIndex
getArgument(unsigned operand
)
272 unsigned argument
= operandToArgument(operand
);
273 ASSERT(argument
< m_numArguments
);
275 NodeIndex nodeIndex
= m_currentBlock
->variablesAtTail
.argument(argument
);
277 if (nodeIndex
!= NoNode
) {
278 Node
* nodePtr
= &m_graph
[nodeIndex
];
279 if (nodePtr
->op() == Flush
) {
280 // Two possibilities: either the block wants the local to be live
281 // but has not loaded its value, or it has loaded its value, in
282 // which case we're done.
283 nodeIndex
= nodePtr
->child1().index();
284 Node
& flushChild
= m_graph
[nodeIndex
];
285 if (flushChild
.op() == Phi
) {
286 VariableAccessData
* variableAccessData
= flushChild
.variableAccessData();
287 nodeIndex
= injectLazyOperandPrediction(addToGraph(GetLocal
, OpInfo(variableAccessData
), nodeIndex
));
288 m_currentBlock
->variablesAtTail
.local(operand
) = nodeIndex
;
291 nodePtr
= &flushChild
;
294 ASSERT(&m_graph
[nodeIndex
] == nodePtr
);
295 ASSERT(nodePtr
->op() != Flush
);
297 if (nodePtr
->op() == SetArgument
) {
298 // We're getting an argument in the first basic block; link
299 // the GetLocal to the SetArgument.
300 ASSERT(nodePtr
->local() == static_cast<VirtualRegister
>(operand
));
301 nodeIndex
= injectLazyOperandPrediction(addToGraph(GetLocal
, OpInfo(nodePtr
->variableAccessData()), nodeIndex
));
302 m_currentBlock
->variablesAtTail
.argument(argument
) = nodeIndex
;
306 if (m_graph
.argumentIsCaptured(argument
)) {
307 if (nodePtr
->op() == GetLocal
)
308 nodeIndex
= nodePtr
->child1().index();
309 return injectLazyOperandPrediction(addToGraph(GetLocal
, OpInfo(nodePtr
->variableAccessData()), nodeIndex
));
312 if (nodePtr
->op() == GetLocal
)
315 ASSERT(nodePtr
->op() == SetLocal
);
316 return nodePtr
->child1().index();
319 VariableAccessData
* variableAccessData
= newVariableAccessData(operand
);
321 NodeIndex phi
= addToGraph(Phi
, OpInfo(variableAccessData
));
322 m_argumentPhiStack
.append(PhiStackEntry(m_currentBlock
, phi
, argument
));
323 nodeIndex
= injectLazyOperandPrediction(addToGraph(GetLocal
, OpInfo(variableAccessData
), phi
));
324 m_currentBlock
->variablesAtTail
.argument(argument
) = nodeIndex
;
326 m_currentBlock
->variablesAtHead
.setArgumentFirstTime(argument
, nodeIndex
);
330 void setArgument(int operand
, NodeIndex value
)
332 unsigned argument
= operandToArgument(operand
);
333 ASSERT(argument
< m_numArguments
);
335 VariableAccessData
* variableAccessData
= newVariableAccessData(operand
);
336 InlineStackEntry
* stack
= m_inlineStackTop
;
337 while (stack
->m_inlineCallFrame
) // find the machine stack entry.
338 stack
= stack
->m_caller
;
339 stack
->m_argumentPositions
[argument
]->addVariable(variableAccessData
);
340 NodeIndex nodeIndex
= addToGraph(SetLocal
, OpInfo(variableAccessData
), value
);
341 m_currentBlock
->variablesAtTail
.argument(argument
) = nodeIndex
;
342 // Always flush arguments.
343 addToGraph(Flush
, OpInfo(variableAccessData
), nodeIndex
);
346 VariableAccessData
* flushArgument(int operand
)
348 // FIXME: This should check if the same operand had already been flushed to
349 // some other local variable.
351 operand
= m_inlineStackTop
->remapOperand(operand
);
353 ASSERT(operand
< FirstConstantRegisterIndex
);
357 if (operandIsArgument(operand
)) {
358 index
= operandToArgument(operand
);
359 nodeIndex
= m_currentBlock
->variablesAtTail
.argument(index
);
362 nodeIndex
= m_currentBlock
->variablesAtTail
.local(index
);
363 m_preservedVars
.set(operand
);
366 if (nodeIndex
!= NoNode
) {
367 Node
& node
= m_graph
[nodeIndex
];
370 nodeIndex
= node
.child1().index();
373 nodeIndex
= node
.child1().index();
379 ASSERT(m_graph
[nodeIndex
].op() != Flush
380 && m_graph
[nodeIndex
].op() != GetLocal
);
382 // Emit a Flush regardless of whether we already flushed it.
383 // This gives us guidance to see that the variable also needs to be flushed
384 // for arguments, even if it already had to be flushed for other reasons.
385 VariableAccessData
* variableAccessData
= node
.variableAccessData();
386 addToGraph(Flush
, OpInfo(variableAccessData
), nodeIndex
);
387 return variableAccessData
;
390 VariableAccessData
* variableAccessData
= newVariableAccessData(operand
);
391 NodeIndex phi
= addToGraph(Phi
, OpInfo(variableAccessData
));
392 nodeIndex
= addToGraph(Flush
, OpInfo(variableAccessData
), phi
);
393 if (operandIsArgument(operand
)) {
394 m_argumentPhiStack
.append(PhiStackEntry(m_currentBlock
, phi
, index
));
395 m_currentBlock
->variablesAtTail
.argument(index
) = nodeIndex
;
396 m_currentBlock
->variablesAtHead
.setArgumentFirstTime(index
, nodeIndex
);
398 m_localPhiStack
.append(PhiStackEntry(m_currentBlock
, phi
, index
));
399 m_currentBlock
->variablesAtTail
.local(index
) = nodeIndex
;
400 m_currentBlock
->variablesAtHead
.setLocalFirstTime(index
, nodeIndex
);
402 return variableAccessData
;
405 // Get an operand, and perform a ToInt32/ToNumber conversion on it.
406 NodeIndex
getToInt32(int operand
)
408 return toInt32(get(operand
));
411 // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32.
412 NodeIndex
toInt32(NodeIndex index
)
414 Node
& node
= m_graph
[index
];
416 if (node
.hasInt32Result())
419 if (node
.op() == UInt32ToNumber
)
420 return node
.child1().index();
422 // Check for numeric constants boxed as JSValues.
423 if (node
.op() == JSConstant
) {
424 JSValue v
= valueOfJSConstant(index
);
426 return getJSConstant(node
.constantNumber());
428 return getJSConstantForValue(JSValue(JSC::toInt32(v
.asNumber())));
431 return addToGraph(ValueToInt32
, index
);
434 NodeIndex
getJSConstantForValue(JSValue constantValue
)
436 unsigned constantIndex
= m_codeBlock
->addOrFindConstant(constantValue
);
437 if (constantIndex
>= m_constants
.size())
438 m_constants
.append(ConstantRecord());
440 ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters());
442 return getJSConstant(constantIndex
);
445 NodeIndex
getJSConstant(unsigned constant
)
447 NodeIndex index
= m_constants
[constant
].asJSValue
;
451 NodeIndex resultIndex
= addToGraph(JSConstant
, OpInfo(constant
));
452 m_constants
[constant
].asJSValue
= resultIndex
;
456 // Helper functions to get/set the this value.
459 return get(m_inlineStackTop
->m_codeBlock
->thisRegister());
461 void setThis(NodeIndex value
)
463 set(m_inlineStackTop
->m_codeBlock
->thisRegister(), value
);
466 // Convenience methods for checking nodes for constants.
467 bool isJSConstant(NodeIndex index
)
469 return m_graph
[index
].op() == JSConstant
;
471 bool isInt32Constant(NodeIndex nodeIndex
)
473 return isJSConstant(nodeIndex
) && valueOfJSConstant(nodeIndex
).isInt32();
475 // Convenience methods for getting constant values.
476 JSValue
valueOfJSConstant(NodeIndex index
)
478 ASSERT(isJSConstant(index
));
479 return m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_graph
[index
].constantNumber());
481 int32_t valueOfInt32Constant(NodeIndex nodeIndex
)
483 ASSERT(isInt32Constant(nodeIndex
));
484 return valueOfJSConstant(nodeIndex
).asInt32();
487 // This method returns a JSConstant with the value 'undefined'.
488 NodeIndex
constantUndefined()
490 // Has m_constantUndefined been set up yet?
491 if (m_constantUndefined
== UINT_MAX
) {
492 // Search the constant pool for undefined, if we find it, we can just reuse this!
493 unsigned numberOfConstants
= m_codeBlock
->numberOfConstantRegisters();
494 for (m_constantUndefined
= 0; m_constantUndefined
< numberOfConstants
; ++m_constantUndefined
) {
495 JSValue testMe
= m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantUndefined
);
496 if (testMe
.isUndefined())
497 return getJSConstant(m_constantUndefined
);
500 // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
501 ASSERT(m_constants
.size() == numberOfConstants
);
502 m_codeBlock
->addConstant(jsUndefined());
503 m_constants
.append(ConstantRecord());
504 ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters());
507 // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
508 ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantUndefined
).isUndefined());
509 return getJSConstant(m_constantUndefined
);
512 // This method returns a JSConstant with the value 'null'.
513 NodeIndex
constantNull()
515 // Has m_constantNull been set up yet?
516 if (m_constantNull
== UINT_MAX
) {
517 // Search the constant pool for null, if we find it, we can just reuse this!
518 unsigned numberOfConstants
= m_codeBlock
->numberOfConstantRegisters();
519 for (m_constantNull
= 0; m_constantNull
< numberOfConstants
; ++m_constantNull
) {
520 JSValue testMe
= m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantNull
);
522 return getJSConstant(m_constantNull
);
525 // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
526 ASSERT(m_constants
.size() == numberOfConstants
);
527 m_codeBlock
->addConstant(jsNull());
528 m_constants
.append(ConstantRecord());
529 ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters());
532 // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
533 ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantNull
).isNull());
534 return getJSConstant(m_constantNull
);
537 // This method returns a DoubleConstant with the value 1.
540 // Has m_constant1 been set up yet?
541 if (m_constant1
== UINT_MAX
) {
542 // Search the constant pool for the value 1, if we find it, we can just reuse this!
543 unsigned numberOfConstants
= m_codeBlock
->numberOfConstantRegisters();
544 for (m_constant1
= 0; m_constant1
< numberOfConstants
; ++m_constant1
) {
545 JSValue testMe
= m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constant1
);
546 if (testMe
.isInt32() && testMe
.asInt32() == 1)
547 return getJSConstant(m_constant1
);
550 // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
551 ASSERT(m_constants
.size() == numberOfConstants
);
552 m_codeBlock
->addConstant(jsNumber(1));
553 m_constants
.append(ConstantRecord());
554 ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters());
557 // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
558 ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constant1
).isInt32());
559 ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constant1
).asInt32() == 1);
560 return getJSConstant(m_constant1
);
563 // This method returns a DoubleConstant with the value NaN.
564 NodeIndex
constantNaN()
566 JSValue nan
= jsNaN();
568 // Has m_constantNaN been set up yet?
569 if (m_constantNaN
== UINT_MAX
) {
570 // Search the constant pool for the value NaN, if we find it, we can just reuse this!
571 unsigned numberOfConstants
= m_codeBlock
->numberOfConstantRegisters();
572 for (m_constantNaN
= 0; m_constantNaN
< numberOfConstants
; ++m_constantNaN
) {
573 JSValue testMe
= m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantNaN
);
574 if (JSValue::encode(testMe
) == JSValue::encode(nan
))
575 return getJSConstant(m_constantNaN
);
578 // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
579 ASSERT(m_constants
.size() == numberOfConstants
);
580 m_codeBlock
->addConstant(nan
);
581 m_constants
.append(ConstantRecord());
582 ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters());
585 // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
586 ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantNaN
).isDouble());
587 ASSERT(isnan(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantNaN
).asDouble()));
588 return getJSConstant(m_constantNaN
);
591 NodeIndex
cellConstant(JSCell
* cell
)
593 HashMap
<JSCell
*, NodeIndex
>::AddResult result
= m_cellConstantNodes
.add(cell
, NoNode
);
594 if (result
.isNewEntry
)
595 result
.iterator
->second
= addToGraph(WeakJSConstant
, OpInfo(cell
));
597 return result
.iterator
->second
;
600 CodeOrigin
currentCodeOrigin()
602 return CodeOrigin(m_currentIndex
, m_inlineStackTop
->m_inlineCallFrame
, m_currentProfilingIndex
- m_currentIndex
);
605 // These methods create a node and add it to the graph. If nodes of this type are
606 // 'mustGenerate' then the node will implicitly be ref'ed to ensure generation.
607 NodeIndex
addToGraph(NodeType op
, NodeIndex child1
= NoNode
, NodeIndex child2
= NoNode
, NodeIndex child3
= NoNode
)
609 NodeIndex resultIndex
= (NodeIndex
)m_graph
.size();
610 m_graph
.append(Node(op
, currentCodeOrigin(), child1
, child2
, child3
));
612 m_currentBlock
->append(resultIndex
);
614 if (defaultFlags(op
) & NodeMustGenerate
)
615 m_graph
.ref(resultIndex
);
618 NodeIndex
addToGraph(NodeType op
, OpInfo info
, NodeIndex child1
= NoNode
, NodeIndex child2
= NoNode
, NodeIndex child3
= NoNode
)
620 NodeIndex resultIndex
= (NodeIndex
)m_graph
.size();
621 m_graph
.append(Node(op
, currentCodeOrigin(), info
, child1
, child2
, child3
));
623 m_currentBlock
->phis
.append(resultIndex
);
625 m_currentBlock
->append(resultIndex
);
627 if (defaultFlags(op
) & NodeMustGenerate
)
628 m_graph
.ref(resultIndex
);
631 NodeIndex
addToGraph(NodeType op
, OpInfo info1
, OpInfo info2
, NodeIndex child1
= NoNode
, NodeIndex child2
= NoNode
, NodeIndex child3
= NoNode
)
633 NodeIndex resultIndex
= (NodeIndex
)m_graph
.size();
634 m_graph
.append(Node(op
, currentCodeOrigin(), info1
, info2
, child1
, child2
, child3
));
636 m_currentBlock
->append(resultIndex
);
638 if (defaultFlags(op
) & NodeMustGenerate
)
639 m_graph
.ref(resultIndex
);
643 NodeIndex
addToGraph(Node::VarArgTag
, NodeType op
, OpInfo info1
, OpInfo info2
)
645 NodeIndex resultIndex
= (NodeIndex
)m_graph
.size();
646 m_graph
.append(Node(Node::VarArg
, op
, currentCodeOrigin(), info1
, info2
, m_graph
.m_varArgChildren
.size() - m_numPassedVarArgs
, m_numPassedVarArgs
));
648 m_currentBlock
->append(resultIndex
);
650 m_numPassedVarArgs
= 0;
652 if (defaultFlags(op
) & NodeMustGenerate
)
653 m_graph
.ref(resultIndex
);
657 NodeIndex
insertPhiNode(OpInfo info
, BasicBlock
* block
)
659 NodeIndex resultIndex
= (NodeIndex
)m_graph
.size();
660 m_graph
.append(Node(Phi
, currentCodeOrigin(), info
));
661 block
->phis
.append(resultIndex
);
666 void addVarArgChild(NodeIndex child
)
668 m_graph
.m_varArgChildren
.append(Edge(child
));
669 m_numPassedVarArgs
++;
672 NodeIndex
addCall(Interpreter
* interpreter
, Instruction
* currentInstruction
, NodeType op
)
674 Instruction
* putInstruction
= currentInstruction
+ OPCODE_LENGTH(op_call
);
676 PredictedType prediction
= PredictNone
;
677 if (interpreter
->getOpcodeID(putInstruction
->u
.opcode
) == op_call_put_result
) {
678 m_currentProfilingIndex
= m_currentIndex
+ OPCODE_LENGTH(op_call
);
679 prediction
= getPrediction();
682 addVarArgChild(get(currentInstruction
[1].u
.operand
));
683 int argCount
= currentInstruction
[2].u
.operand
;
684 if (RegisterFile::CallFrameHeaderSize
+ (unsigned)argCount
> m_parameterSlots
)
685 m_parameterSlots
= RegisterFile::CallFrameHeaderSize
+ argCount
;
687 int registerOffset
= currentInstruction
[3].u
.operand
;
688 int dummyThisArgument
= op
== Call
? 0 : 1;
689 for (int i
= 0 + dummyThisArgument
; i
< argCount
; ++i
)
690 addVarArgChild(get(registerOffset
+ argumentToOperand(i
)));
692 NodeIndex call
= addToGraph(Node::VarArg
, op
, OpInfo(0), OpInfo(prediction
));
693 if (interpreter
->getOpcodeID(putInstruction
->u
.opcode
) == op_call_put_result
)
694 set(putInstruction
[1].u
.operand
, call
);
698 PredictedType
getPredictionWithoutOSRExit(NodeIndex nodeIndex
, unsigned bytecodeIndex
)
700 UNUSED_PARAM(nodeIndex
);
702 PredictedType prediction
= m_inlineStackTop
->m_profiledBlock
->valueProfilePredictionForBytecodeOffset(bytecodeIndex
);
703 #if DFG_ENABLE(DEBUG_VERBOSE)
704 dataLog("Dynamic [@%u, bc#%u] prediction: %s\n", nodeIndex
, bytecodeIndex
, predictionToString(prediction
));
710 PredictedType
getPrediction(NodeIndex nodeIndex
, unsigned bytecodeIndex
)
712 PredictedType prediction
= getPredictionWithoutOSRExit(nodeIndex
, bytecodeIndex
);
714 if (prediction
== PredictNone
) {
715 // We have no information about what values this node generates. Give up
716 // on executing this code, since we're likely to do more damage than good.
717 addToGraph(ForceOSRExit
);
723 PredictedType
getPredictionWithoutOSRExit()
725 return getPredictionWithoutOSRExit(m_graph
.size(), m_currentProfilingIndex
);
728 PredictedType
getPrediction()
730 return getPrediction(m_graph
.size(), m_currentProfilingIndex
);
733 NodeIndex
makeSafe(NodeIndex nodeIndex
)
735 Node
& node
= m_graph
[nodeIndex
];
737 bool likelyToTakeSlowCase
;
738 if (!isX86() && node
.op() == ArithMod
)
739 likelyToTakeSlowCase
= false;
741 likelyToTakeSlowCase
= m_inlineStackTop
->m_profiledBlock
->likelyToTakeSlowCase(m_currentIndex
);
743 if (!likelyToTakeSlowCase
744 && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
)
745 && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
))
748 switch (m_graph
[nodeIndex
].op()) {
754 case ArithMod
: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
755 m_graph
[nodeIndex
].mergeFlags(NodeMayOverflow
);
759 if (m_inlineStackTop
->m_profiledBlock
->likelyToTakeDeepestSlowCase(m_currentIndex
)
760 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
)) {
761 #if DFG_ENABLE(DEBUG_VERBOSE)
762 dataLog("Making ArithMul @%u take deepest slow case.\n", nodeIndex
);
764 m_graph
[nodeIndex
].mergeFlags(NodeMayOverflow
| NodeMayNegZero
);
765 } else if (m_inlineStackTop
->m_profiledBlock
->likelyToTakeSlowCase(m_currentIndex
)
766 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
)) {
767 #if DFG_ENABLE(DEBUG_VERBOSE)
768 dataLog("Making ArithMul @%u take faster slow case.\n", nodeIndex
);
770 m_graph
[nodeIndex
].mergeFlags(NodeMayNegZero
);
775 ASSERT_NOT_REACHED();
782 NodeIndex
makeDivSafe(NodeIndex nodeIndex
)
784 ASSERT(m_graph
[nodeIndex
].op() == ArithDiv
);
786 // The main slow case counter for op_div in the old JIT counts only when
787 // the operands are not numbers. We don't care about that since we already
788 // have speculations in place that take care of that separately. We only
789 // care about when the outcome of the division is not an integer, which
790 // is what the special fast case counter tells us.
792 if (!m_inlineStackTop
->m_profiledBlock
->likelyToTakeSpecialFastCase(m_currentIndex
)
793 && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
)
794 && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
))
797 #if DFG_ENABLE(DEBUG_VERBOSE)
798 dataLog("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(m_graph
[nodeIndex
].op()), nodeIndex
, m_currentIndex
, m_inlineStackTop
->m_profiledBlock
->specialFastCaseProfileForBytecodeOffset(m_currentIndex
)->m_counter
, m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
), m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
));
801 // FIXME: It might be possible to make this more granular. The DFG certainly can
802 // distinguish between negative zero and overflow in its exit profiles.
803 m_graph
[nodeIndex
].mergeFlags(NodeMayOverflow
| NodeMayNegZero
);
808 bool willNeedFlush(StructureStubInfo
& stubInfo
)
810 PolymorphicAccessStructureList
* list
;
812 switch (stubInfo
.accessType
) {
813 case access_get_by_id_self_list
:
814 list
= stubInfo
.u
.getByIdSelfList
.structureList
;
815 listSize
= stubInfo
.u
.getByIdSelfList
.listSize
;
817 case access_get_by_id_proto_list
:
818 list
= stubInfo
.u
.getByIdProtoList
.structureList
;
819 listSize
= stubInfo
.u
.getByIdProtoList
.listSize
;
824 for (int i
= 0; i
< listSize
; ++i
) {
825 if (!list
->list
[i
].isDirect
)
831 bool structureChainIsStillValid(bool direct
, Structure
* previousStructure
, StructureChain
* chain
)
836 if (!previousStructure
->storedPrototype().isNull() && previousStructure
->storedPrototype().asCell()->structure() != chain
->head()->get())
839 for (WriteBarrier
<Structure
>* it
= chain
->head(); *it
; ++it
) {
840 if (!(*it
)->storedPrototype().isNull() && (*it
)->storedPrototype().asCell()->structure() != it
[1].get())
847 void buildOperandMapsIfNecessary();
849 JSGlobalData
* m_globalData
;
850 CodeBlock
* m_codeBlock
;
851 CodeBlock
* m_profiledBlock
;
854 // The current block being generated.
855 BasicBlock
* m_currentBlock
;
856 // The bytecode index of the current instruction being generated.
857 unsigned m_currentIndex
;
858 // The bytecode index of the value profile of the current instruction being generated.
859 unsigned m_currentProfilingIndex
;
861 // We use these values during code generation, and to avoid the need for
862 // special handling we make sure they are available as constants in the
863 // CodeBlock's constant pool. These variables are initialized to
864 // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
865 // constant pool, as necessary.
866 unsigned m_constantUndefined
;
867 unsigned m_constantNull
;
868 unsigned m_constantNaN
;
869 unsigned m_constant1
;
870 HashMap
<JSCell
*, unsigned> m_cellConstants
;
871 HashMap
<JSCell
*, NodeIndex
> m_cellConstantNodes
;
873 // A constant in the constant pool may be represented by more than one
874 // node in the graph, depending on the context in which it is being used.
875 struct ConstantRecord
{
888 // Track the index of the node whose result is the current value for every
889 // register value in the bytecode - argument, local, and temporary.
890 Vector
<ConstantRecord
, 16> m_constants
;
892 // The number of arguments passed to the function.
893 unsigned m_numArguments
;
894 // The number of locals (vars + temporaries) used in the function.
895 unsigned m_numLocals
;
896 // The set of registers we need to preserve across BasicBlock boundaries;
897 // typically equal to the set of vars, but we expand this to cover all
898 // temporaries that persist across blocks (dues to ?:, &&, ||, etc).
899 BitVector m_preservedVars
;
900 // The number of slots (in units of sizeof(Register)) that we need to
901 // preallocate for calls emanating from this frame. This includes the
902 // size of the CallFrame, only if this is not a leaf function. (I.e.
903 // this is 0 if and only if this function is a leaf.)
904 unsigned m_parameterSlots
;
905 // The number of var args passed to the next var arg node.
906 unsigned m_numPassedVarArgs
;
907 // The index in the global resolve info.
908 unsigned m_globalResolveNumber
;
910 struct PhiStackEntry
{
911 PhiStackEntry(BasicBlock
* block
, NodeIndex phi
, unsigned varNo
)
922 Vector
<PhiStackEntry
, 16> m_argumentPhiStack
;
923 Vector
<PhiStackEntry
, 16> m_localPhiStack
;
925 struct InlineStackEntry
{
926 ByteCodeParser
* m_byteCodeParser
;
928 CodeBlock
* m_codeBlock
;
929 CodeBlock
* m_profiledBlock
;
930 InlineCallFrame
* m_inlineCallFrame
;
931 VirtualRegister m_calleeVR
; // absolute virtual register, not relative to call frame
933 ScriptExecutable
* executable() { return m_codeBlock
->ownerExecutable(); }
935 QueryableExitProfile m_exitProfile
;
937 // Remapping of identifier and constant numbers from the code block being
938 // inlined (inline callee) to the code block that we're inlining into
939 // (the machine code block, which is the transitive, though not necessarily
941 Vector
<unsigned> m_identifierRemap
;
942 Vector
<unsigned> m_constantRemap
;
944 // Blocks introduced by this code block, which need successor linking.
945 // May include up to one basic block that includes the continuation after
946 // the callsite in the caller. These must be appended in the order that they
947 // are created, but their bytecodeBegin values need not be in order as they
949 Vector
<UnlinkedBlock
> m_unlinkedBlocks
;
951 // Potential block linking targets. Must be sorted by bytecodeBegin, and
952 // cannot have two blocks that have the same bytecodeBegin. For this very
953 // reason, this is not equivalent to
954 Vector
<BlockIndex
> m_blockLinkingTargets
;
956 // If the callsite's basic block was split into two, then this will be
957 // the head of the callsite block. It needs its successors linked to the
958 // m_unlinkedBlocks, but not the other way around: there's no way for
959 // any blocks in m_unlinkedBlocks to jump back into this block.
960 BlockIndex m_callsiteBlockHead
;
962 // Does the callsite block head need linking? This is typically true
963 // but will be false for the machine code block's inline stack entry
964 // (since that one is not inlined) and for cases where an inline callee
965 // did the linking for us.
966 bool m_callsiteBlockHeadNeedsLinking
;
968 VirtualRegister m_returnValue
;
970 // Predictions about variable types collected from the profiled code block,
971 // which are based on OSR exit profiles that past DFG compilatins of this
972 // code block had gathered.
973 LazyOperandValueProfileParser m_lazyOperands
;
975 // Did we see any returns? We need to handle the (uncommon but necessary)
976 // case where a procedure that does not return was inlined.
979 // Did we have any early returns?
980 bool m_didEarlyReturn
;
982 // Pointers to the argument position trackers for this slice of code.
983 Vector
<ArgumentPosition
*> m_argumentPositions
;
985 InlineStackEntry
* m_caller
;
987 InlineStackEntry(ByteCodeParser
*, CodeBlock
*, CodeBlock
* profiledBlock
, BlockIndex callsiteBlockHead
, VirtualRegister calleeVR
, JSFunction
* callee
, VirtualRegister returnValueVR
, VirtualRegister inlineCallFrameStart
, CodeSpecializationKind
);
991 m_byteCodeParser
->m_inlineStackTop
= m_caller
;
994 int remapOperand(int operand
) const
996 if (!m_inlineCallFrame
)
999 if (operand
>= FirstConstantRegisterIndex
) {
1000 int result
= m_constantRemap
[operand
- FirstConstantRegisterIndex
];
1001 ASSERT(result
>= FirstConstantRegisterIndex
);
1005 return operand
+ m_inlineCallFrame
->stackOffset
;
1009 InlineStackEntry
* m_inlineStackTop
;
1011 // Have we built operand maps? We initialize them lazily, and only when doing
1013 bool m_haveBuiltOperandMaps
;
1014 // Mapping between identifier names and numbers.
1015 IdentifierMap m_identifierMap
;
1016 // Mapping between values and constant numbers.
1017 JSValueMap m_jsValueMap
;
1018 // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
1019 // work-around for the fact that JSValueMap can't handle "empty" values.
1020 unsigned m_emptyJSValueIndex
;
1022 // Cache of code blocks that we've generated bytecode for.
1023 ByteCodeCache
<canInlineFunctionFor
> m_codeBlockCache
;
1026 #define NEXT_OPCODE(name) \
1027 m_currentIndex += OPCODE_LENGTH(name); \
1030 #define LAST_OPCODE(name) \
1031 m_currentIndex += OPCODE_LENGTH(name); \
1032 return shouldContinueParsing
1035 void ByteCodeParser::handleCall(Interpreter
* interpreter
, Instruction
* currentInstruction
, NodeType op
, CodeSpecializationKind kind
)
1037 ASSERT(OPCODE_LENGTH(op_call
) == OPCODE_LENGTH(op_construct
));
1039 NodeIndex callTarget
= get(currentInstruction
[1].u
.operand
);
1040 enum { ConstantFunction
, LinkedFunction
, UnknownFunction
} callType
;
1042 CallLinkStatus callLinkStatus
= CallLinkStatus::computeFor(
1043 m_inlineStackTop
->m_profiledBlock
, m_currentIndex
);
1045 #if DFG_ENABLE(DEBUG_VERBOSE)
1046 dataLog("For call at @%lu bc#%u: ", m_graph
.size(), m_currentIndex
);
1047 if (callLinkStatus
.isSet()) {
1048 if (callLinkStatus
.couldTakeSlowPath())
1049 dataLog("could take slow path, ");
1050 dataLog("target = %p\n", callLinkStatus
.callTarget());
1052 dataLog("not set.\n");
1055 if (m_graph
.isFunctionConstant(callTarget
))
1056 callType
= ConstantFunction
;
1057 else if (callLinkStatus
.isSet() && !callLinkStatus
.couldTakeSlowPath()
1058 && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
))
1059 callType
= LinkedFunction
;
1061 callType
= UnknownFunction
;
1062 if (callType
!= UnknownFunction
) {
1063 int argumentCountIncludingThis
= currentInstruction
[2].u
.operand
;
1064 int registerOffset
= currentInstruction
[3].u
.operand
;
1066 // Do we have a result?
1067 bool usesResult
= false;
1068 int resultOperand
= 0; // make compiler happy
1069 unsigned nextOffset
= m_currentIndex
+ OPCODE_LENGTH(op_call
);
1070 Instruction
* putInstruction
= currentInstruction
+ OPCODE_LENGTH(op_call
);
1071 PredictedType prediction
= PredictNone
;
1072 if (interpreter
->getOpcodeID(putInstruction
->u
.opcode
) == op_call_put_result
) {
1073 resultOperand
= putInstruction
[1].u
.operand
;
1075 m_currentProfilingIndex
= nextOffset
;
1076 prediction
= getPrediction();
1077 nextOffset
+= OPCODE_LENGTH(op_call_put_result
);
1079 JSFunction
* expectedFunction
;
1080 Intrinsic intrinsic
;
1081 bool certainAboutExpectedFunction
;
1082 if (callType
== ConstantFunction
) {
1083 expectedFunction
= m_graph
.valueOfFunctionConstant(callTarget
);
1084 intrinsic
= expectedFunction
->executable()->intrinsicFor(kind
);
1085 certainAboutExpectedFunction
= true;
1087 ASSERT(callType
== LinkedFunction
);
1088 expectedFunction
= callLinkStatus
.callTarget();
1089 intrinsic
= expectedFunction
->executable()->intrinsicFor(kind
);
1090 certainAboutExpectedFunction
= false;
1093 if (intrinsic
!= NoIntrinsic
) {
1094 if (!certainAboutExpectedFunction
)
1095 emitFunctionCheck(expectedFunction
, callTarget
, registerOffset
, kind
);
1097 if (handleIntrinsic(usesResult
, resultOperand
, intrinsic
, registerOffset
, argumentCountIncludingThis
, prediction
)) {
1098 if (!certainAboutExpectedFunction
) {
1099 // Need to keep the call target alive for OSR. We could easily optimize this out if we wanted
1100 // to, since at this point we know that the call target is a constant. It's just that OSR isn't
1101 // smart enough to figure that out, since it doesn't understand CheckFunction.
1102 addToGraph(Phantom
, callTarget
);
1107 } else if (handleInlining(usesResult
, currentInstruction
[1].u
.operand
, callTarget
, resultOperand
, certainAboutExpectedFunction
, expectedFunction
, registerOffset
, argumentCountIncludingThis
, nextOffset
, kind
))
1111 addCall(interpreter
, currentInstruction
, op
);
1114 void ByteCodeParser::emitFunctionCheck(JSFunction
* expectedFunction
, NodeIndex callTarget
, int registerOffset
, CodeSpecializationKind kind
)
1116 NodeIndex thisArgument
;
1117 if (kind
== CodeForCall
)
1118 thisArgument
= get(registerOffset
+ argumentToOperand(0));
1120 thisArgument
= NoNode
;
1121 addToGraph(CheckFunction
, OpInfo(expectedFunction
), callTarget
, thisArgument
);
1124 bool ByteCodeParser::handleInlining(bool usesResult
, int callTarget
, NodeIndex callTargetNodeIndex
, int resultOperand
, bool certainAboutExpectedFunction
, JSFunction
* expectedFunction
, int registerOffset
, int argumentCountIncludingThis
, unsigned nextOffset
, CodeSpecializationKind kind
)
1126 // First, the really simple checks: do we have an actual JS function?
1127 if (!expectedFunction
)
1129 if (expectedFunction
->isHostFunction())
1132 FunctionExecutable
* executable
= expectedFunction
->jsExecutable();
1134 // Does the number of arguments we're passing match the arity of the target? We could
1135 // inline arity check failures, but for simplicity we currently don't.
1136 if (static_cast<int>(executable
->parameterCount()) + 1 != argumentCountIncludingThis
)
1139 // Have we exceeded inline stack depth, or are we trying to inline a recursive call?
1140 // If either of these are detected, then don't inline.
1142 for (InlineStackEntry
* entry
= m_inlineStackTop
; entry
; entry
= entry
->m_caller
) {
1144 if (depth
>= Options::maximumInliningDepth
)
1145 return false; // Depth exceeded.
1147 if (entry
->executable() == executable
)
1148 return false; // Recursion detected.
1151 // Does the code block's size match the heuristics/requirements for being
1152 // an inline candidate?
1153 CodeBlock
* profiledBlock
= executable
->profiledCodeBlockFor(kind
);
1154 if (!mightInlineFunctionFor(profiledBlock
, kind
))
1157 // If we get here then it looks like we should definitely inline this code. Proceed
1158 // with parsing the code to get bytecode, so that we can then parse the bytecode.
1159 // Note that if LLInt is enabled, the bytecode will always be available. Also note
1160 // that if LLInt is enabled, we may inline a code block that has never been JITted
1162 CodeBlock
* codeBlock
= m_codeBlockCache
.get(CodeBlockKey(executable
, kind
), expectedFunction
->scope());
1166 ASSERT(canInlineFunctionFor(codeBlock
, kind
));
1168 #if DFG_ENABLE(DEBUG_VERBOSE)
1169 dataLog("Inlining executable %p.\n", executable
);
1172 // Now we know without a doubt that we are committed to inlining. So begin the process
1173 // by checking the callee (if necessary) and making sure that arguments and the callee
1175 if (!certainAboutExpectedFunction
)
1176 emitFunctionCheck(expectedFunction
, callTargetNodeIndex
, registerOffset
, kind
);
1178 // FIXME: Don't flush constants!
1180 Vector
<VariableAccessData
*, 8> arguments
;
1181 for (int i
= 1; i
< argumentCountIncludingThis
; ++i
)
1182 arguments
.append(flushArgument(registerOffset
+ argumentToOperand(i
)));
1184 int inlineCallFrameStart
= m_inlineStackTop
->remapOperand(registerOffset
) - RegisterFile::CallFrameHeaderSize
;
1186 // Make sure that the area used by the call frame is reserved.
1187 for (int arg
= inlineCallFrameStart
+ RegisterFile::CallFrameHeaderSize
+ codeBlock
->m_numVars
; arg
-- > inlineCallFrameStart
;)
1188 m_preservedVars
.set(arg
);
1190 // Make sure that we have enough locals.
1191 unsigned newNumLocals
= inlineCallFrameStart
+ RegisterFile::CallFrameHeaderSize
+ codeBlock
->m_numCalleeRegisters
;
1192 if (newNumLocals
> m_numLocals
) {
1193 m_numLocals
= newNumLocals
;
1194 for (size_t i
= 0; i
< m_graph
.m_blocks
.size(); ++i
)
1195 m_graph
.m_blocks
[i
]->ensureLocals(newNumLocals
);
1198 InlineStackEntry
inlineStackEntry(this, codeBlock
, profiledBlock
, m_graph
.m_blocks
.size() - 1, (VirtualRegister
)m_inlineStackTop
->remapOperand(callTarget
), expectedFunction
, (VirtualRegister
)m_inlineStackTop
->remapOperand(usesResult
? resultOperand
: InvalidVirtualRegister
), (VirtualRegister
)inlineCallFrameStart
, kind
);
1200 // Link up the argument variable access datas to their argument positions.
1201 for (int i
= 1; i
< argumentCountIncludingThis
; ++i
)
1202 inlineStackEntry
.m_argumentPositions
[i
]->addVariable(arguments
[i
- 1]);
1204 // This is where the actual inlining really happens.
1205 unsigned oldIndex
= m_currentIndex
;
1206 unsigned oldProfilingIndex
= m_currentProfilingIndex
;
1208 m_currentProfilingIndex
= 0;
1210 addToGraph(InlineStart
);
1214 m_currentIndex
= oldIndex
;
1215 m_currentProfilingIndex
= oldProfilingIndex
;
1217 // If the inlined code created some new basic blocks, then we have linking to do.
1218 if (inlineStackEntry
.m_callsiteBlockHead
!= m_graph
.m_blocks
.size() - 1) {
1220 ASSERT(!inlineStackEntry
.m_unlinkedBlocks
.isEmpty());
1221 if (inlineStackEntry
.m_callsiteBlockHeadNeedsLinking
)
1222 linkBlock(m_graph
.m_blocks
[inlineStackEntry
.m_callsiteBlockHead
].get(), inlineStackEntry
.m_blockLinkingTargets
);
1224 ASSERT(m_graph
.m_blocks
[inlineStackEntry
.m_callsiteBlockHead
]->isLinked
);
1226 // It's possible that the callsite block head is not owned by the caller.
1227 if (!inlineStackEntry
.m_caller
->m_unlinkedBlocks
.isEmpty()) {
1228 // It's definitely owned by the caller, because the caller created new blocks.
1229 // Assert that this all adds up.
1230 ASSERT(inlineStackEntry
.m_caller
->m_unlinkedBlocks
.last().m_blockIndex
== inlineStackEntry
.m_callsiteBlockHead
);
1231 ASSERT(inlineStackEntry
.m_caller
->m_unlinkedBlocks
.last().m_needsNormalLinking
);
1232 inlineStackEntry
.m_caller
->m_unlinkedBlocks
.last().m_needsNormalLinking
= false;
1234 // It's definitely not owned by the caller. Tell the caller that he does not
1235 // need to link his callsite block head, because we did it for him.
1236 ASSERT(inlineStackEntry
.m_caller
->m_callsiteBlockHeadNeedsLinking
);
1237 ASSERT(inlineStackEntry
.m_caller
->m_callsiteBlockHead
== inlineStackEntry
.m_callsiteBlockHead
);
1238 inlineStackEntry
.m_caller
->m_callsiteBlockHeadNeedsLinking
= false;
1241 linkBlocks(inlineStackEntry
.m_unlinkedBlocks
, inlineStackEntry
.m_blockLinkingTargets
);
1243 ASSERT(inlineStackEntry
.m_unlinkedBlocks
.isEmpty());
1245 // If there was a return, but no early returns, then we're done. We allow parsing of
1246 // the caller to continue in whatever basic block we're in right now.
1247 if (!inlineStackEntry
.m_didEarlyReturn
&& inlineStackEntry
.m_didReturn
) {
1248 BasicBlock
* lastBlock
= m_graph
.m_blocks
.last().get();
1249 ASSERT(lastBlock
->isEmpty() || !m_graph
.last().isTerminal());
1251 // If we created new blocks then the last block needs linking, but in the
1252 // caller. It doesn't need to be linked to, but it needs outgoing links.
1253 if (!inlineStackEntry
.m_unlinkedBlocks
.isEmpty()) {
1254 #if DFG_ENABLE(DEBUG_VERBOSE)
1255 dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock
, lastBlock
->bytecodeBegin
, m_currentIndex
);
1257 // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1258 // for release builds because this block will never serve as a potential target
1259 // in the linker's binary search.
1260 lastBlock
->bytecodeBegin
= m_currentIndex
;
1261 m_inlineStackTop
->m_caller
->m_unlinkedBlocks
.append(UnlinkedBlock(m_graph
.m_blocks
.size() - 1));
1264 m_currentBlock
= m_graph
.m_blocks
.last().get();
1266 #if DFG_ENABLE(DEBUG_VERBOSE)
1267 dataLog("Done inlining executable %p, continuing code generation at epilogue.\n", executable
);
1272 // If we get to this point then all blocks must end in some sort of terminals.
1273 ASSERT(m_graph
.last().isTerminal());
1275 // Link the early returns to the basic block we're about to create.
1276 for (size_t i
= 0; i
< inlineStackEntry
.m_unlinkedBlocks
.size(); ++i
) {
1277 if (!inlineStackEntry
.m_unlinkedBlocks
[i
].m_needsEarlyReturnLinking
)
1279 BasicBlock
* block
= m_graph
.m_blocks
[inlineStackEntry
.m_unlinkedBlocks
[i
].m_blockIndex
].get();
1280 ASSERT(!block
->isLinked
);
1281 Node
& node
= m_graph
[block
->last()];
1282 ASSERT(node
.op() == Jump
);
1283 ASSERT(node
.takenBlockIndex() == NoBlock
);
1284 node
.setTakenBlockIndex(m_graph
.m_blocks
.size());
1285 inlineStackEntry
.m_unlinkedBlocks
[i
].m_needsEarlyReturnLinking
= false;
1286 #if !ASSERT_DISABLED
1287 block
->isLinked
= true;
1291 // Need to create a new basic block for the continuation at the caller.
1292 OwnPtr
<BasicBlock
> block
= adoptPtr(new BasicBlock(nextOffset
, m_numArguments
, m_numLocals
));
1293 #if DFG_ENABLE(DEBUG_VERBOSE)
1294 dataLog("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block
.get(), m_graph
.m_blocks
.size(), m_inlineStackTop
->executable(), m_currentIndex
, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop
->m_inlineCallFrame
));
1296 m_currentBlock
= block
.get();
1297 ASSERT(m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.isEmpty() || m_graph
.m_blocks
[m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.last()]->bytecodeBegin
< nextOffset
);
1298 m_inlineStackTop
->m_caller
->m_unlinkedBlocks
.append(UnlinkedBlock(m_graph
.m_blocks
.size()));
1299 m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.append(m_graph
.m_blocks
.size());
1300 m_graph
.m_blocks
.append(block
.release());
1301 prepareToParseBlock();
1303 // At this point we return and continue to generate code for the caller, but
1304 // in the new basic block.
1305 #if DFG_ENABLE(DEBUG_VERBOSE)
1306 dataLog("Done inlining executable %p, continuing code generation in new block.\n", executable
);
1311 void ByteCodeParser::setIntrinsicResult(bool usesResult
, int resultOperand
, NodeIndex nodeIndex
)
1315 set(resultOperand
, nodeIndex
);
1318 bool ByteCodeParser::handleMinMax(bool usesResult
, int resultOperand
, NodeType op
, int registerOffset
, int argumentCountIncludingThis
)
1320 if (argumentCountIncludingThis
== 1) { // Math.min()
1321 setIntrinsicResult(usesResult
, resultOperand
, constantNaN());
1325 if (argumentCountIncludingThis
== 2) { // Math.min(x)
1326 // FIXME: what we'd really like is a ValueToNumber, except we don't support that right now. Oh well.
1327 NodeIndex result
= get(registerOffset
+ argumentToOperand(1));
1328 addToGraph(CheckNumber
, result
);
1329 setIntrinsicResult(usesResult
, resultOperand
, result
);
1333 if (argumentCountIncludingThis
== 3) { // Math.min(x, y)
1334 setIntrinsicResult(usesResult
, resultOperand
, addToGraph(op
, get(registerOffset
+ argumentToOperand(1)), get(registerOffset
+ argumentToOperand(2))));
1338 // Don't handle >=3 arguments for now.
1342 // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because
1343 // they need to perform the ToNumber conversion, which can have side-effects.
1344 bool ByteCodeParser::handleIntrinsic(bool usesResult
, int resultOperand
, Intrinsic intrinsic
, int registerOffset
, int argumentCountIncludingThis
, PredictedType prediction
)
1346 switch (intrinsic
) {
1347 case AbsIntrinsic
: {
1348 if (argumentCountIncludingThis
== 1) { // Math.abs()
1349 setIntrinsicResult(usesResult
, resultOperand
, constantNaN());
1353 if (!MacroAssembler::supportsFloatingPointAbs())
1356 NodeIndex nodeIndex
= addToGraph(ArithAbs
, get(registerOffset
+ argumentToOperand(1)));
1357 if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
))
1358 m_graph
[nodeIndex
].mergeFlags(NodeMayOverflow
);
1359 setIntrinsicResult(usesResult
, resultOperand
, nodeIndex
);
1364 return handleMinMax(usesResult
, resultOperand
, ArithMin
, registerOffset
, argumentCountIncludingThis
);
1367 return handleMinMax(usesResult
, resultOperand
, ArithMax
, registerOffset
, argumentCountIncludingThis
);
1369 case SqrtIntrinsic
: {
1370 if (argumentCountIncludingThis
== 1) { // Math.sqrt()
1371 setIntrinsicResult(usesResult
, resultOperand
, constantNaN());
1375 if (!MacroAssembler::supportsFloatingPointSqrt())
1378 setIntrinsicResult(usesResult
, resultOperand
, addToGraph(ArithSqrt
, get(registerOffset
+ argumentToOperand(1))));
1382 case ArrayPushIntrinsic
: {
1383 if (argumentCountIncludingThis
!= 2)
1386 NodeIndex arrayPush
= addToGraph(ArrayPush
, OpInfo(0), OpInfo(prediction
), get(registerOffset
+ argumentToOperand(0)), get(registerOffset
+ argumentToOperand(1)));
1388 set(resultOperand
, arrayPush
);
1393 case ArrayPopIntrinsic
: {
1394 if (argumentCountIncludingThis
!= 1)
1397 NodeIndex arrayPop
= addToGraph(ArrayPop
, OpInfo(0), OpInfo(prediction
), get(registerOffset
+ argumentToOperand(0)));
1399 set(resultOperand
, arrayPop
);
1403 case CharCodeAtIntrinsic
: {
1404 if (argumentCountIncludingThis
!= 2)
1407 int thisOperand
= registerOffset
+ argumentToOperand(0);
1408 if (!(m_graph
[get(thisOperand
)].prediction() & PredictString
))
1411 int indexOperand
= registerOffset
+ argumentToOperand(1);
1412 NodeIndex storage
= addToGraph(GetIndexedPropertyStorage
, get(thisOperand
), getToInt32(indexOperand
));
1413 NodeIndex charCode
= addToGraph(StringCharCodeAt
, get(thisOperand
), getToInt32(indexOperand
), storage
);
1416 set(resultOperand
, charCode
);
1420 case CharAtIntrinsic
: {
1421 if (argumentCountIncludingThis
!= 2)
1424 int thisOperand
= registerOffset
+ argumentToOperand(0);
1425 if (!(m_graph
[get(thisOperand
)].prediction() & PredictString
))
1428 int indexOperand
= registerOffset
+ argumentToOperand(1);
1429 NodeIndex storage
= addToGraph(GetIndexedPropertyStorage
, get(thisOperand
), getToInt32(indexOperand
));
1430 NodeIndex charCode
= addToGraph(StringCharAt
, get(thisOperand
), getToInt32(indexOperand
), storage
);
1433 set(resultOperand
, charCode
);
1437 case RegExpExecIntrinsic
: {
1438 if (argumentCountIncludingThis
!= 2)
1441 NodeIndex regExpExec
= addToGraph(RegExpExec
, OpInfo(0), OpInfo(prediction
), get(registerOffset
+ argumentToOperand(0)), get(registerOffset
+ argumentToOperand(1)));
1443 set(resultOperand
, regExpExec
);
1448 case RegExpTestIntrinsic
: {
1449 if (argumentCountIncludingThis
!= 2)
1452 NodeIndex regExpExec
= addToGraph(RegExpTest
, OpInfo(0), OpInfo(prediction
), get(registerOffset
+ argumentToOperand(0)), get(registerOffset
+ argumentToOperand(1)));
1454 set(resultOperand
, regExpExec
);
1464 void ByteCodeParser::prepareToParseBlock()
1466 for (unsigned i
= 0; i
< m_constants
.size(); ++i
)
1467 m_constants
[i
] = ConstantRecord();
1468 m_cellConstantNodes
.clear();
1471 bool ByteCodeParser::parseBlock(unsigned limit
)
1473 bool shouldContinueParsing
= true;
1475 Interpreter
* interpreter
= m_globalData
->interpreter
;
1476 Instruction
* instructionsBegin
= m_inlineStackTop
->m_codeBlock
->instructions().begin();
1477 unsigned blockBegin
= m_currentIndex
;
1479 // If we are the first basic block, introduce markers for arguments. This allows
1480 // us to track if a use of an argument may use the actual argument passed, as
1481 // opposed to using a value we set explicitly.
1482 if (m_currentBlock
== m_graph
.m_blocks
[0].get() && !m_inlineStackTop
->m_inlineCallFrame
) {
1483 m_graph
.m_arguments
.resize(m_numArguments
);
1484 for (unsigned argument
= 0; argument
< m_numArguments
; ++argument
) {
1485 NodeIndex setArgument
= addToGraph(SetArgument
, OpInfo(newVariableAccessData(argumentToOperand(argument
))));
1486 m_graph
.m_arguments
[argument
] = setArgument
;
1487 m_currentBlock
->variablesAtHead
.setArgumentFirstTime(argument
, setArgument
);
1488 m_currentBlock
->variablesAtTail
.setArgumentFirstTime(argument
, setArgument
);
1493 m_currentProfilingIndex
= m_currentIndex
;
1495 // Don't extend over jump destinations.
1496 if (m_currentIndex
== limit
) {
1497 // Ordinarily we want to plant a jump. But refuse to do this if the block is
1498 // empty. This is a special case for inlining, which might otherwise create
1499 // some empty blocks in some cases. When parseBlock() returns with an empty
1500 // block, it will get repurposed instead of creating a new one. Note that this
1501 // logic relies on every bytecode resulting in one or more nodes, which would
1502 // be true anyway except for op_loop_hint, which emits a Phantom to force this
1504 if (!m_currentBlock
->isEmpty())
1505 addToGraph(Jump
, OpInfo(m_currentIndex
));
1507 #if DFG_ENABLE(DEBUG_VERBOSE)
1508 dataLog("Refusing to plant jump at limit %u because block %p is empty.\n", limit
, m_currentBlock
);
1511 return shouldContinueParsing
;
1514 // Switch on the current bytecode opcode.
1515 Instruction
* currentInstruction
= instructionsBegin
+ m_currentIndex
;
1516 OpcodeID opcodeID
= interpreter
->getOpcodeID(currentInstruction
->u
.opcode
);
1519 // === Function entry opcodes ===
1522 // Initialize all locals to undefined.
1523 for (int i
= 0; i
< m_inlineStackTop
->m_codeBlock
->m_numVars
; ++i
)
1524 set(i
, constantUndefined());
1525 NEXT_OPCODE(op_enter
);
1527 case op_convert_this
: {
1528 NodeIndex op1
= getThis();
1529 if (m_graph
[op1
].op() == ConvertThis
)
1532 setThis(addToGraph(ConvertThis
, op1
));
1533 NEXT_OPCODE(op_convert_this
);
1536 case op_create_this
: {
1537 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1538 set(currentInstruction
[1].u
.operand
, addToGraph(CreateThis
, op1
));
1539 NEXT_OPCODE(op_create_this
);
1542 case op_new_object
: {
1543 set(currentInstruction
[1].u
.operand
, addToGraph(NewObject
));
1544 NEXT_OPCODE(op_new_object
);
1547 case op_new_array
: {
1548 int startOperand
= currentInstruction
[2].u
.operand
;
1549 int numOperands
= currentInstruction
[3].u
.operand
;
1550 for (int operandIdx
= startOperand
; operandIdx
< startOperand
+ numOperands
; ++operandIdx
)
1551 addVarArgChild(get(operandIdx
));
1552 set(currentInstruction
[1].u
.operand
, addToGraph(Node::VarArg
, NewArray
, OpInfo(0), OpInfo(0)));
1553 NEXT_OPCODE(op_new_array
);
1556 case op_new_array_buffer
: {
1557 int startConstant
= currentInstruction
[2].u
.operand
;
1558 int numConstants
= currentInstruction
[3].u
.operand
;
1559 set(currentInstruction
[1].u
.operand
, addToGraph(NewArrayBuffer
, OpInfo(startConstant
), OpInfo(numConstants
)));
1560 NEXT_OPCODE(op_new_array_buffer
);
1563 case op_new_regexp
: {
1564 set(currentInstruction
[1].u
.operand
, addToGraph(NewRegexp
, OpInfo(currentInstruction
[2].u
.operand
)));
1565 NEXT_OPCODE(op_new_regexp
);
1568 case op_get_callee
: {
1569 if (m_inlineStackTop
->m_inlineCallFrame
)
1570 set(currentInstruction
[1].u
.operand
, getDirect(m_inlineStackTop
->m_calleeVR
));
1572 set(currentInstruction
[1].u
.operand
, addToGraph(GetCallee
));
1573 NEXT_OPCODE(op_get_callee
);
1576 // === Bitwise operations ===
1579 NodeIndex op1
= getToInt32(currentInstruction
[2].u
.operand
);
1580 NodeIndex op2
= getToInt32(currentInstruction
[3].u
.operand
);
1581 set(currentInstruction
[1].u
.operand
, addToGraph(BitAnd
, op1
, op2
));
1582 NEXT_OPCODE(op_bitand
);
1586 NodeIndex op1
= getToInt32(currentInstruction
[2].u
.operand
);
1587 NodeIndex op2
= getToInt32(currentInstruction
[3].u
.operand
);
1588 set(currentInstruction
[1].u
.operand
, addToGraph(BitOr
, op1
, op2
));
1589 NEXT_OPCODE(op_bitor
);
1593 NodeIndex op1
= getToInt32(currentInstruction
[2].u
.operand
);
1594 NodeIndex op2
= getToInt32(currentInstruction
[3].u
.operand
);
1595 set(currentInstruction
[1].u
.operand
, addToGraph(BitXor
, op1
, op2
));
1596 NEXT_OPCODE(op_bitxor
);
1600 NodeIndex op1
= getToInt32(currentInstruction
[2].u
.operand
);
1601 NodeIndex op2
= getToInt32(currentInstruction
[3].u
.operand
);
1603 // Optimize out shifts by zero.
1604 if (isInt32Constant(op2
) && !(valueOfInt32Constant(op2
) & 0x1f))
1607 result
= addToGraph(BitRShift
, op1
, op2
);
1608 set(currentInstruction
[1].u
.operand
, result
);
1609 NEXT_OPCODE(op_rshift
);
1613 NodeIndex op1
= getToInt32(currentInstruction
[2].u
.operand
);
1614 NodeIndex op2
= getToInt32(currentInstruction
[3].u
.operand
);
1616 // Optimize out shifts by zero.
1617 if (isInt32Constant(op2
) && !(valueOfInt32Constant(op2
) & 0x1f))
1620 result
= addToGraph(BitLShift
, op1
, op2
);
1621 set(currentInstruction
[1].u
.operand
, result
);
1622 NEXT_OPCODE(op_lshift
);
1626 NodeIndex op1
= getToInt32(currentInstruction
[2].u
.operand
);
1627 NodeIndex op2
= getToInt32(currentInstruction
[3].u
.operand
);
1629 // The result of a zero-extending right shift is treated as an unsigned value.
1630 // This means that if the top bit is set, the result is not in the int32 range,
1631 // and as such must be stored as a double. If the shift amount is a constant,
1632 // we may be able to optimize.
1633 if (isInt32Constant(op2
)) {
1634 // If we know we are shifting by a non-zero amount, then since the operation
1635 // zero fills we know the top bit of the result must be zero, and as such the
1636 // result must be within the int32 range. Conversely, if this is a shift by
1637 // zero, then the result may be changed by the conversion to unsigned, but it
1638 // is not necessary to perform the shift!
1639 if (valueOfInt32Constant(op2
) & 0x1f)
1640 result
= addToGraph(BitURShift
, op1
, op2
);
1642 result
= makeSafe(addToGraph(UInt32ToNumber
, op1
));
1644 // Cannot optimize at this stage; shift & potentially rebox as a double.
1645 result
= addToGraph(BitURShift
, op1
, op2
);
1646 result
= makeSafe(addToGraph(UInt32ToNumber
, result
));
1648 set(currentInstruction
[1].u
.operand
, result
);
1649 NEXT_OPCODE(op_urshift
);
1652 // === Increment/Decrement opcodes ===
1655 unsigned srcDst
= currentInstruction
[1].u
.operand
;
1656 NodeIndex op
= get(srcDst
);
1657 set(srcDst
, makeSafe(addToGraph(ArithAdd
, op
, one())));
1658 NEXT_OPCODE(op_pre_inc
);
1662 unsigned result
= currentInstruction
[1].u
.operand
;
1663 unsigned srcDst
= currentInstruction
[2].u
.operand
;
1664 ASSERT(result
!= srcDst
); // Required for assumptions we make during OSR.
1665 NodeIndex op
= get(srcDst
);
1667 set(srcDst
, makeSafe(addToGraph(ArithAdd
, op
, one())));
1668 NEXT_OPCODE(op_post_inc
);
1672 unsigned srcDst
= currentInstruction
[1].u
.operand
;
1673 NodeIndex op
= get(srcDst
);
1674 set(srcDst
, makeSafe(addToGraph(ArithSub
, op
, one())));
1675 NEXT_OPCODE(op_pre_dec
);
1679 unsigned result
= currentInstruction
[1].u
.operand
;
1680 unsigned srcDst
= currentInstruction
[2].u
.operand
;
1681 NodeIndex op
= get(srcDst
);
1683 set(srcDst
, makeSafe(addToGraph(ArithSub
, op
, one())));
1684 NEXT_OPCODE(op_post_dec
);
1687 // === Arithmetic operations ===
1690 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1691 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1692 if (m_graph
[op1
].hasNumberResult() && m_graph
[op2
].hasNumberResult())
1693 set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ArithAdd
, op1
, op2
)));
1695 set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ValueAdd
, op1
, op2
)));
1696 NEXT_OPCODE(op_add
);
1700 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1701 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1702 set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ArithSub
, op1
, op2
)));
1703 NEXT_OPCODE(op_sub
);
1707 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1708 set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ArithNegate
, op1
)));
1709 NEXT_OPCODE(op_negate
);
1713 // Multiply requires that the inputs are not truncated, unfortunately.
1714 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1715 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1716 set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ArithMul
, op1
, op2
)));
1717 NEXT_OPCODE(op_mul
);
1721 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1722 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1723 set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ArithMod
, op1
, op2
)));
1724 NEXT_OPCODE(op_mod
);
1728 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1729 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1730 set(currentInstruction
[1].u
.operand
, makeDivSafe(addToGraph(ArithDiv
, op1
, op2
)));
1731 NEXT_OPCODE(op_div
);
1734 // === Misc operations ===
1736 #if ENABLE(DEBUG_WITH_BREAKPOINT)
1738 addToGraph(Breakpoint
);
1739 NEXT_OPCODE(op_debug
);
1742 NodeIndex op
= get(currentInstruction
[2].u
.operand
);
1743 set(currentInstruction
[1].u
.operand
, op
);
1744 NEXT_OPCODE(op_mov
);
1747 case op_check_has_instance
:
1748 addToGraph(CheckHasInstance
, get(currentInstruction
[1].u
.operand
));
1749 NEXT_OPCODE(op_check_has_instance
);
1751 case op_instanceof
: {
1752 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
1753 NodeIndex baseValue
= get(currentInstruction
[3].u
.operand
);
1754 NodeIndex prototype
= get(currentInstruction
[4].u
.operand
);
1755 set(currentInstruction
[1].u
.operand
, addToGraph(InstanceOf
, value
, baseValue
, prototype
));
1756 NEXT_OPCODE(op_instanceof
);
1759 case op_is_undefined
: {
1760 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
1761 set(currentInstruction
[1].u
.operand
, addToGraph(IsUndefined
, value
));
1762 NEXT_OPCODE(op_is_undefined
);
1765 case op_is_boolean
: {
1766 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
1767 set(currentInstruction
[1].u
.operand
, addToGraph(IsBoolean
, value
));
1768 NEXT_OPCODE(op_is_boolean
);
1771 case op_is_number
: {
1772 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
1773 set(currentInstruction
[1].u
.operand
, addToGraph(IsNumber
, value
));
1774 NEXT_OPCODE(op_is_number
);
1777 case op_is_string
: {
1778 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
1779 set(currentInstruction
[1].u
.operand
, addToGraph(IsString
, value
));
1780 NEXT_OPCODE(op_is_string
);
1783 case op_is_object
: {
1784 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
1785 set(currentInstruction
[1].u
.operand
, addToGraph(IsObject
, value
));
1786 NEXT_OPCODE(op_is_object
);
1789 case op_is_function
: {
1790 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
1791 set(currentInstruction
[1].u
.operand
, addToGraph(IsFunction
, value
));
1792 NEXT_OPCODE(op_is_function
);
1796 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
1797 set(currentInstruction
[1].u
.operand
, addToGraph(LogicalNot
, value
));
1798 NEXT_OPCODE(op_not
);
1801 case op_to_primitive
: {
1802 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
1803 set(currentInstruction
[1].u
.operand
, addToGraph(ToPrimitive
, value
));
1804 NEXT_OPCODE(op_to_primitive
);
1808 int startOperand
= currentInstruction
[2].u
.operand
;
1809 int numOperands
= currentInstruction
[3].u
.operand
;
1810 for (int operandIdx
= startOperand
; operandIdx
< startOperand
+ numOperands
; ++operandIdx
)
1811 addVarArgChild(get(operandIdx
));
1812 set(currentInstruction
[1].u
.operand
, addToGraph(Node::VarArg
, StrCat
, OpInfo(0), OpInfo(0)));
1813 NEXT_OPCODE(op_strcat
);
1817 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1818 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1819 set(currentInstruction
[1].u
.operand
, addToGraph(CompareLess
, op1
, op2
));
1820 NEXT_OPCODE(op_less
);
1824 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1825 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1826 set(currentInstruction
[1].u
.operand
, addToGraph(CompareLessEq
, op1
, op2
));
1827 NEXT_OPCODE(op_lesseq
);
1831 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1832 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1833 set(currentInstruction
[1].u
.operand
, addToGraph(CompareGreater
, op1
, op2
));
1834 NEXT_OPCODE(op_greater
);
1837 case op_greatereq
: {
1838 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1839 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1840 set(currentInstruction
[1].u
.operand
, addToGraph(CompareGreaterEq
, op1
, op2
));
1841 NEXT_OPCODE(op_greatereq
);
1845 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1846 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1847 set(currentInstruction
[1].u
.operand
, addToGraph(CompareEq
, op1
, op2
));
1852 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
1853 set(currentInstruction
[1].u
.operand
, addToGraph(CompareEq
, value
, constantNull()));
1854 NEXT_OPCODE(op_eq_null
);
1858 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1859 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1860 set(currentInstruction
[1].u
.operand
, addToGraph(CompareStrictEq
, op1
, op2
));
1861 NEXT_OPCODE(op_stricteq
);
1865 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1866 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1867 set(currentInstruction
[1].u
.operand
, addToGraph(LogicalNot
, addToGraph(CompareEq
, op1
, op2
)));
1868 NEXT_OPCODE(op_neq
);
1872 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
1873 set(currentInstruction
[1].u
.operand
, addToGraph(LogicalNot
, addToGraph(CompareEq
, value
, constantNull())));
1874 NEXT_OPCODE(op_neq_null
);
1877 case op_nstricteq
: {
1878 NodeIndex op1
= get(currentInstruction
[2].u
.operand
);
1879 NodeIndex op2
= get(currentInstruction
[3].u
.operand
);
1880 set(currentInstruction
[1].u
.operand
, addToGraph(LogicalNot
, addToGraph(CompareStrictEq
, op1
, op2
)));
1881 NEXT_OPCODE(op_nstricteq
);
1884 // === Property access operations ===
1886 case op_get_by_val
: {
1887 PredictedType prediction
= getPrediction();
1889 NodeIndex base
= get(currentInstruction
[2].u
.operand
);
1890 NodeIndex property
= get(currentInstruction
[3].u
.operand
);
1891 NodeIndex propertyStorage
= addToGraph(GetIndexedPropertyStorage
, base
, property
);
1892 NodeIndex getByVal
= addToGraph(GetByVal
, OpInfo(0), OpInfo(prediction
), base
, property
, propertyStorage
);
1893 set(currentInstruction
[1].u
.operand
, getByVal
);
1895 NEXT_OPCODE(op_get_by_val
);
1898 case op_put_by_val
: {
1899 NodeIndex base
= get(currentInstruction
[1].u
.operand
);
1900 NodeIndex property
= get(currentInstruction
[2].u
.operand
);
1901 NodeIndex value
= get(currentInstruction
[3].u
.operand
);
1903 addToGraph(PutByVal
, base
, property
, value
);
1905 NEXT_OPCODE(op_put_by_val
);
1908 case op_method_check
: {
1909 m_currentProfilingIndex
+= OPCODE_LENGTH(op_method_check
);
1910 Instruction
* getInstruction
= currentInstruction
+ OPCODE_LENGTH(op_method_check
);
1912 PredictedType prediction
= getPrediction();
1914 ASSERT(interpreter
->getOpcodeID(getInstruction
->u
.opcode
) == op_get_by_id
);
1916 NodeIndex base
= get(getInstruction
[2].u
.operand
);
1917 unsigned identifier
= m_inlineStackTop
->m_identifierRemap
[getInstruction
[3].u
.operand
];
1919 // Check if the method_check was monomorphic. If so, emit a CheckXYZMethod
1920 // node, which is a lot more efficient.
1921 GetByIdStatus getByIdStatus
= GetByIdStatus::computeFor(
1922 m_inlineStackTop
->m_profiledBlock
,
1924 m_codeBlock
->identifier(identifier
));
1925 MethodCallLinkStatus methodCallStatus
= MethodCallLinkStatus::computeFor(
1926 m_inlineStackTop
->m_profiledBlock
, m_currentIndex
);
1928 if (methodCallStatus
.isSet()
1929 && !getByIdStatus
.wasSeenInJIT()
1930 && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)) {
1931 // It's monomorphic as far as we can tell, since the method_check was linked
1932 // but the slow path (i.e. the normal get_by_id) never fired.
1934 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(methodCallStatus
.structure())), base
);
1935 if (methodCallStatus
.needsPrototypeCheck())
1936 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(methodCallStatus
.prototypeStructure())), cellConstant(methodCallStatus
.prototype()));
1938 // Keep the base of the access alive past the speculations.
1939 addToGraph(Phantom
, base
);
1941 set(getInstruction
[1].u
.operand
, cellConstant(methodCallStatus
.function()));
1943 set(getInstruction
[1].u
.operand
, addToGraph(getByIdStatus
.makesCalls() ? GetByIdFlush
: GetById
, OpInfo(identifier
), OpInfo(prediction
), base
));
1945 m_currentIndex
+= OPCODE_LENGTH(op_method_check
) + OPCODE_LENGTH(op_get_by_id
);
1948 case op_get_scoped_var
: {
1949 PredictedType prediction
= getPrediction();
1950 int dst
= currentInstruction
[1].u
.operand
;
1951 int slot
= currentInstruction
[2].u
.operand
;
1952 int depth
= currentInstruction
[3].u
.operand
;
1953 NodeIndex getScopeChain
= addToGraph(GetScopeChain
, OpInfo(depth
));
1954 NodeIndex getScopedVar
= addToGraph(GetScopedVar
, OpInfo(slot
), OpInfo(prediction
), getScopeChain
);
1955 set(dst
, getScopedVar
);
1956 NEXT_OPCODE(op_get_scoped_var
);
1958 case op_put_scoped_var
: {
1959 int slot
= currentInstruction
[1].u
.operand
;
1960 int depth
= currentInstruction
[2].u
.operand
;
1961 int source
= currentInstruction
[3].u
.operand
;
1962 NodeIndex getScopeChain
= addToGraph(GetScopeChain
, OpInfo(depth
));
1963 addToGraph(PutScopedVar
, OpInfo(slot
), getScopeChain
, get(source
));
1964 NEXT_OPCODE(op_put_scoped_var
);
1966 case op_get_by_id
: {
1967 PredictedType prediction
= getPredictionWithoutOSRExit();
1969 NodeIndex base
= get(currentInstruction
[2].u
.operand
);
1970 unsigned identifierNumber
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[3].u
.operand
];
1972 Identifier identifier
= m_codeBlock
->identifier(identifierNumber
);
1973 GetByIdStatus getByIdStatus
= GetByIdStatus::computeFor(
1974 m_inlineStackTop
->m_profiledBlock
, m_currentIndex
, identifier
);
1976 if (getByIdStatus
.isSimpleDirect()
1977 && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)) {
1978 ASSERT(getByIdStatus
.structureSet().size());
1980 // The implementation of GetByOffset does not know to terminate speculative
1981 // execution if it doesn't have a prediction, so we do it manually.
1982 if (prediction
== PredictNone
)
1983 addToGraph(ForceOSRExit
);
1985 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(getByIdStatus
.structureSet())), base
);
1986 NodeIndex propertyStorage
;
1987 size_t offsetOffset
;
1988 if (getByIdStatus
.structureSet().allAreUsingInlinePropertyStorage()) {
1989 propertyStorage
= base
;
1990 ASSERT(!(sizeof(JSObject
) % sizeof(EncodedJSValue
)));
1991 offsetOffset
= sizeof(JSObject
) / sizeof(EncodedJSValue
);
1993 propertyStorage
= addToGraph(GetPropertyStorage
, base
);
1996 set(currentInstruction
[1].u
.operand
, addToGraph(GetByOffset
, OpInfo(m_graph
.m_storageAccessData
.size()), OpInfo(prediction
), propertyStorage
));
1998 StorageAccessData storageAccessData
;
1999 storageAccessData
.offset
= getByIdStatus
.offset() + offsetOffset
;
2000 storageAccessData
.identifierNumber
= identifierNumber
;
2001 m_graph
.m_storageAccessData
.append(storageAccessData
);
2003 set(currentInstruction
[1].u
.operand
, addToGraph(getByIdStatus
.makesCalls() ? GetByIdFlush
: GetById
, OpInfo(identifierNumber
), OpInfo(prediction
), base
));
2005 NEXT_OPCODE(op_get_by_id
);
2008 case op_put_by_id_transition_direct
:
2009 case op_put_by_id_transition_normal
: {
2010 NodeIndex value
= get(currentInstruction
[3].u
.operand
);
2011 NodeIndex base
= get(currentInstruction
[1].u
.operand
);
2012 unsigned identifierNumber
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[2].u
.operand
];
2013 bool direct
= currentInstruction
[8].u
.operand
;
2015 PutByIdStatus putByIdStatus
= PutByIdStatus::computeFor(
2016 m_inlineStackTop
->m_profiledBlock
,
2018 m_codeBlock
->identifier(identifierNumber
));
2019 if (!putByIdStatus
.isSet())
2020 addToGraph(ForceOSRExit
);
2022 bool hasExitSite
= m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
);
2024 if (!hasExitSite
&& putByIdStatus
.isSimpleReplace()) {
2025 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(putByIdStatus
.oldStructure())), base
);
2026 addToGraph(PutByOffset
, OpInfo(m_graph
.m_storageAccessData
.size()), base
, addToGraph(GetPropertyStorage
, base
), value
);
2028 StorageAccessData storageAccessData
;
2029 storageAccessData
.offset
= putByIdStatus
.offset();
2030 storageAccessData
.identifierNumber
= identifierNumber
;
2031 m_graph
.m_storageAccessData
.append(storageAccessData
);
2032 } else if (!hasExitSite
2033 && putByIdStatus
.isSimpleTransition()
2034 && putByIdStatus
.oldStructure()->propertyStorageCapacity() == putByIdStatus
.newStructure()->propertyStorageCapacity()
2035 && structureChainIsStillValid(
2037 putByIdStatus
.oldStructure(),
2038 putByIdStatus
.structureChain())) {
2040 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(putByIdStatus
.oldStructure())), base
);
2042 if (!putByIdStatus
.oldStructure()->storedPrototype().isNull())
2045 OpInfo(m_graph
.addStructureSet(putByIdStatus
.oldStructure()->storedPrototype().asCell()->structure())),
2046 cellConstant(putByIdStatus
.oldStructure()->storedPrototype().asCell()));
2048 for (WriteBarrier
<Structure
>* it
= putByIdStatus
.structureChain()->head(); *it
; ++it
) {
2049 JSValue prototype
= (*it
)->storedPrototype();
2050 if (prototype
.isNull())
2052 ASSERT(prototype
.isCell());
2055 OpInfo(m_graph
.addStructureSet(prototype
.asCell()->structure())),
2056 cellConstant(prototype
.asCell()));
2062 m_graph
.addStructureTransitionData(
2063 StructureTransitionData(
2064 putByIdStatus
.oldStructure(),
2065 putByIdStatus
.newStructure()))),
2070 OpInfo(m_graph
.m_storageAccessData
.size()),
2072 addToGraph(GetPropertyStorage
, base
),
2075 StorageAccessData storageAccessData
;
2076 storageAccessData
.offset
= putByIdStatus
.offset();
2077 storageAccessData
.identifierNumber
= identifierNumber
;
2078 m_graph
.m_storageAccessData
.append(storageAccessData
);
2081 addToGraph(PutByIdDirect
, OpInfo(identifierNumber
), base
, value
);
2083 addToGraph(PutById
, OpInfo(identifierNumber
), base
, value
);
2086 NEXT_OPCODE(op_put_by_id
);
2089 case op_get_global_var
: {
2090 PredictedType prediction
= getPrediction();
2092 NodeIndex getGlobalVar
= addToGraph(GetGlobalVar
, OpInfo(currentInstruction
[2].u
.operand
), OpInfo(prediction
));
2093 set(currentInstruction
[1].u
.operand
, getGlobalVar
);
2094 NEXT_OPCODE(op_get_global_var
);
2097 case op_put_global_var
: {
2098 NodeIndex value
= get(currentInstruction
[2].u
.operand
);
2099 addToGraph(PutGlobalVar
, OpInfo(currentInstruction
[1].u
.operand
), value
);
2100 NEXT_OPCODE(op_put_global_var
);
2103 // === Block terminators. ===
2106 unsigned relativeOffset
= currentInstruction
[1].u
.operand
;
2107 addToGraph(Jump
, OpInfo(m_currentIndex
+ relativeOffset
));
2108 LAST_OPCODE(op_jmp
);
2112 unsigned relativeOffset
= currentInstruction
[1].u
.operand
;
2113 addToGraph(Jump
, OpInfo(m_currentIndex
+ relativeOffset
));
2114 LAST_OPCODE(op_loop
);
2118 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
2119 NodeIndex condition
= get(currentInstruction
[1].u
.operand
);
2120 addToGraph(Branch
, OpInfo(m_currentIndex
+ relativeOffset
), OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jtrue
)), condition
);
2121 LAST_OPCODE(op_jtrue
);
2125 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
2126 NodeIndex condition
= get(currentInstruction
[1].u
.operand
);
2127 addToGraph(Branch
, OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jfalse
)), OpInfo(m_currentIndex
+ relativeOffset
), condition
);
2128 LAST_OPCODE(op_jfalse
);
2131 case op_loop_if_true
: {
2132 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
2133 NodeIndex condition
= get(currentInstruction
[1].u
.operand
);
2134 addToGraph(Branch
, OpInfo(m_currentIndex
+ relativeOffset
), OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_loop_if_true
)), condition
);
2135 LAST_OPCODE(op_loop_if_true
);
2138 case op_loop_if_false
: {
2139 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
2140 NodeIndex condition
= get(currentInstruction
[1].u
.operand
);
2141 addToGraph(Branch
, OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_loop_if_false
)), OpInfo(m_currentIndex
+ relativeOffset
), condition
);
2142 LAST_OPCODE(op_loop_if_false
);
2146 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
2147 NodeIndex value
= get(currentInstruction
[1].u
.operand
);
2148 NodeIndex condition
= addToGraph(CompareEq
, value
, constantNull());
2149 addToGraph(Branch
, OpInfo(m_currentIndex
+ relativeOffset
), OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jeq_null
)), condition
);
2150 LAST_OPCODE(op_jeq_null
);
2153 case op_jneq_null
: {
2154 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
2155 NodeIndex value
= get(currentInstruction
[1].u
.operand
);
2156 NodeIndex condition
= addToGraph(CompareEq
, value
, constantNull());
2157 addToGraph(Branch
, OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jneq_null
)), OpInfo(m_currentIndex
+ relativeOffset
), condition
);
2158 LAST_OPCODE(op_jneq_null
);
2162 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2163 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2164 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2165 NodeIndex condition
= addToGraph(CompareLess
, op1
, op2
);
2166 addToGraph(Branch
, OpInfo(m_currentIndex
+ relativeOffset
), OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jless
)), condition
);
2167 LAST_OPCODE(op_jless
);
2171 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2172 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2173 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2174 NodeIndex condition
= addToGraph(CompareLessEq
, op1
, op2
);
2175 addToGraph(Branch
, OpInfo(m_currentIndex
+ relativeOffset
), OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jlesseq
)), condition
);
2176 LAST_OPCODE(op_jlesseq
);
2180 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2181 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2182 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2183 NodeIndex condition
= addToGraph(CompareGreater
, op1
, op2
);
2184 addToGraph(Branch
, OpInfo(m_currentIndex
+ relativeOffset
), OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jgreater
)), condition
);
2185 LAST_OPCODE(op_jgreater
);
2188 case op_jgreatereq
: {
2189 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2190 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2191 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2192 NodeIndex condition
= addToGraph(CompareGreaterEq
, op1
, op2
);
2193 addToGraph(Branch
, OpInfo(m_currentIndex
+ relativeOffset
), OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jgreatereq
)), condition
);
2194 LAST_OPCODE(op_jgreatereq
);
2198 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2199 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2200 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2201 NodeIndex condition
= addToGraph(CompareLess
, op1
, op2
);
2202 addToGraph(Branch
, OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jnless
)), OpInfo(m_currentIndex
+ relativeOffset
), condition
);
2203 LAST_OPCODE(op_jnless
);
2207 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2208 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2209 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2210 NodeIndex condition
= addToGraph(CompareLessEq
, op1
, op2
);
2211 addToGraph(Branch
, OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jnlesseq
)), OpInfo(m_currentIndex
+ relativeOffset
), condition
);
2212 LAST_OPCODE(op_jnlesseq
);
2215 case op_jngreater
: {
2216 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2217 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2218 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2219 NodeIndex condition
= addToGraph(CompareGreater
, op1
, op2
);
2220 addToGraph(Branch
, OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jngreater
)), OpInfo(m_currentIndex
+ relativeOffset
), condition
);
2221 LAST_OPCODE(op_jngreater
);
2224 case op_jngreatereq
: {
2225 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2226 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2227 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2228 NodeIndex condition
= addToGraph(CompareGreaterEq
, op1
, op2
);
2229 addToGraph(Branch
, OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jngreatereq
)), OpInfo(m_currentIndex
+ relativeOffset
), condition
);
2230 LAST_OPCODE(op_jngreatereq
);
2233 case op_loop_if_less
: {
2234 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2235 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2236 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2237 NodeIndex condition
= addToGraph(CompareLess
, op1
, op2
);
2238 addToGraph(Branch
, OpInfo(m_currentIndex
+ relativeOffset
), OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_loop_if_less
)), condition
);
2239 LAST_OPCODE(op_loop_if_less
);
2242 case op_loop_if_lesseq
: {
2243 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2244 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2245 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2246 NodeIndex condition
= addToGraph(CompareLessEq
, op1
, op2
);
2247 addToGraph(Branch
, OpInfo(m_currentIndex
+ relativeOffset
), OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_loop_if_lesseq
)), condition
);
2248 LAST_OPCODE(op_loop_if_lesseq
);
2251 case op_loop_if_greater
: {
2252 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2253 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2254 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2255 NodeIndex condition
= addToGraph(CompareGreater
, op1
, op2
);
2256 addToGraph(Branch
, OpInfo(m_currentIndex
+ relativeOffset
), OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_loop_if_greater
)), condition
);
2257 LAST_OPCODE(op_loop_if_greater
);
2260 case op_loop_if_greatereq
: {
2261 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2262 NodeIndex op1
= get(currentInstruction
[1].u
.operand
);
2263 NodeIndex op2
= get(currentInstruction
[2].u
.operand
);
2264 NodeIndex condition
= addToGraph(CompareGreaterEq
, op1
, op2
);
2265 addToGraph(Branch
, OpInfo(m_currentIndex
+ relativeOffset
), OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_loop_if_greatereq
)), condition
);
2266 LAST_OPCODE(op_loop_if_greatereq
);
2270 if (m_inlineStackTop
->m_inlineCallFrame
) {
2271 if (m_inlineStackTop
->m_returnValue
!= InvalidVirtualRegister
)
2272 setDirect(m_inlineStackTop
->m_returnValue
, get(currentInstruction
[1].u
.operand
));
2273 m_inlineStackTop
->m_didReturn
= true;
2274 if (m_inlineStackTop
->m_unlinkedBlocks
.isEmpty()) {
2275 // If we're returning from the first block, then we're done parsing.
2276 ASSERT(m_inlineStackTop
->m_callsiteBlockHead
== m_graph
.m_blocks
.size() - 1);
2277 shouldContinueParsing
= false;
2278 LAST_OPCODE(op_ret
);
2280 // If inlining created blocks, and we're doing a return, then we need some
2282 ASSERT(m_inlineStackTop
->m_unlinkedBlocks
.last().m_blockIndex
== m_graph
.m_blocks
.size() - 1);
2283 m_inlineStackTop
->m_unlinkedBlocks
.last().m_needsNormalLinking
= false;
2285 if (m_currentIndex
+ OPCODE_LENGTH(op_ret
) != m_inlineStackTop
->m_codeBlock
->instructions().size() || m_inlineStackTop
->m_didEarlyReturn
) {
2286 ASSERT(m_currentIndex
+ OPCODE_LENGTH(op_ret
) <= m_inlineStackTop
->m_codeBlock
->instructions().size());
2287 addToGraph(Jump
, OpInfo(NoBlock
));
2288 m_inlineStackTop
->m_unlinkedBlocks
.last().m_needsEarlyReturnLinking
= true;
2289 m_inlineStackTop
->m_didEarlyReturn
= true;
2291 LAST_OPCODE(op_ret
);
2293 addToGraph(Return
, get(currentInstruction
[1].u
.operand
));
2294 LAST_OPCODE(op_ret
);
2297 ASSERT(!m_inlineStackTop
->m_inlineCallFrame
);
2298 addToGraph(Return
, get(currentInstruction
[1].u
.operand
));
2299 LAST_OPCODE(op_end
);
2302 addToGraph(Throw
, get(currentInstruction
[1].u
.operand
));
2303 LAST_OPCODE(op_throw
);
2305 case op_throw_reference_error
:
2306 addToGraph(ThrowReferenceError
);
2307 LAST_OPCODE(op_throw_reference_error
);
2310 handleCall(interpreter
, currentInstruction
, Call
, CodeForCall
);
2311 NEXT_OPCODE(op_call
);
2314 handleCall(interpreter
, currentInstruction
, Construct
, CodeForConstruct
);
2315 NEXT_OPCODE(op_construct
);
2317 case op_call_put_result
:
2318 NEXT_OPCODE(op_call_put_result
);
2321 PredictedType prediction
= getPrediction();
2323 unsigned identifier
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[2].u
.operand
];
2325 NodeIndex resolve
= addToGraph(Resolve
, OpInfo(identifier
), OpInfo(prediction
));
2326 set(currentInstruction
[1].u
.operand
, resolve
);
2328 NEXT_OPCODE(op_resolve
);
2331 case op_resolve_base
: {
2332 PredictedType prediction
= getPrediction();
2334 unsigned identifier
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[2].u
.operand
];
2336 NodeIndex resolve
= addToGraph(currentInstruction
[3].u
.operand
? ResolveBaseStrictPut
: ResolveBase
, OpInfo(identifier
), OpInfo(prediction
));
2337 set(currentInstruction
[1].u
.operand
, resolve
);
2339 NEXT_OPCODE(op_resolve_base
);
2342 case op_resolve_global
: {
2343 PredictedType prediction
= getPrediction();
2345 NodeIndex resolve
= addToGraph(ResolveGlobal
, OpInfo(m_graph
.m_resolveGlobalData
.size()), OpInfo(prediction
));
2346 m_graph
.m_resolveGlobalData
.append(ResolveGlobalData());
2347 ResolveGlobalData
& data
= m_graph
.m_resolveGlobalData
.last();
2348 data
.identifierNumber
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[2].u
.operand
];
2349 data
.resolveInfoIndex
= m_globalResolveNumber
++;
2350 set(currentInstruction
[1].u
.operand
, resolve
);
2352 NEXT_OPCODE(op_resolve_global
);
2355 case op_loop_hint
: {
2356 // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
2357 // OSR can only happen at basic block boundaries. Assert that these two statements
2359 ASSERT_UNUSED(blockBegin
, m_currentIndex
== blockBegin
);
2361 // We never do OSR into an inlined code block. That could not happen, since OSR
2362 // looks up the code block that is the replacement for the baseline JIT code
2363 // block. Hence, machine code block = true code block = not inline code block.
2364 if (!m_inlineStackTop
->m_caller
)
2365 m_currentBlock
->isOSRTarget
= true;
2367 // Emit a phantom node to ensure that there is a placeholder node for this bytecode
2369 addToGraph(Phantom
);
2371 NEXT_OPCODE(op_loop_hint
);
2374 case op_init_lazy_reg
: {
2375 set(currentInstruction
[1].u
.operand
, getJSConstantForValue(JSValue()));
2376 NEXT_OPCODE(op_init_lazy_reg
);
2379 case op_create_activation
: {
2380 set(currentInstruction
[1].u
.operand
, addToGraph(CreateActivation
, get(currentInstruction
[1].u
.operand
)));
2381 NEXT_OPCODE(op_create_activation
);
2384 case op_tear_off_activation
: {
2385 // This currently ignores arguments because we don't support them yet.
2386 addToGraph(TearOffActivation
, get(currentInstruction
[1].u
.operand
));
2387 NEXT_OPCODE(op_tear_off_activation
);
2391 if (!currentInstruction
[3].u
.operand
) {
2392 set(currentInstruction
[1].u
.operand
,
2393 addToGraph(NewFunctionNoCheck
, OpInfo(currentInstruction
[2].u
.operand
)));
2395 set(currentInstruction
[1].u
.operand
,
2398 OpInfo(currentInstruction
[2].u
.operand
),
2399 get(currentInstruction
[1].u
.operand
)));
2401 NEXT_OPCODE(op_new_func
);
2404 case op_new_func_exp
: {
2405 set(currentInstruction
[1].u
.operand
,
2406 addToGraph(NewFunctionExpression
, OpInfo(currentInstruction
[2].u
.operand
)));
2407 NEXT_OPCODE(op_new_func_exp
);
2411 // Parse failed! This should not happen because the capabilities checker
2412 // should have caught it.
2413 ASSERT_NOT_REACHED();
2417 ASSERT(canCompileOpcode(opcodeID
));
2421 template<ByteCodeParser::PhiStackType stackType
>
2422 void ByteCodeParser::processPhiStack()
2424 Vector
<PhiStackEntry
, 16>& phiStack
= (stackType
== ArgumentPhiStack
) ? m_argumentPhiStack
: m_localPhiStack
;
2426 while (!phiStack
.isEmpty()) {
2427 PhiStackEntry entry
= phiStack
.last();
2428 phiStack
.removeLast();
2430 PredecessorList
& predecessors
= entry
.m_block
->m_predecessors
;
2431 unsigned varNo
= entry
.m_varNo
;
2432 VariableAccessData
* dataForPhi
= m_graph
[entry
.m_phi
].variableAccessData();
2434 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2435 dataLog(" Handling phi entry for var %u, phi @%u.\n", entry
.m_varNo
, entry
.m_phi
);
2438 for (size_t i
= 0; i
< predecessors
.size(); ++i
) {
2439 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2440 dataLog(" Dealing with predecessor block %u.\n", predecessors
[i
]);
2443 BasicBlock
* predecessorBlock
= m_graph
.m_blocks
[predecessors
[i
]].get();
2445 NodeIndex
& var
= (stackType
== ArgumentPhiStack
) ? predecessorBlock
->variablesAtTail
.argument(varNo
) : predecessorBlock
->variablesAtTail
.local(varNo
);
2447 NodeIndex valueInPredecessor
= var
;
2448 if (valueInPredecessor
== NoNode
) {
2449 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2450 dataLog(" Did not find node, adding phi.\n");
2453 valueInPredecessor
= insertPhiNode(OpInfo(newVariableAccessData(stackType
== ArgumentPhiStack
? argumentToOperand(varNo
) : static_cast<int>(varNo
))), predecessorBlock
);
2454 var
= valueInPredecessor
;
2455 if (stackType
== ArgumentPhiStack
)
2456 predecessorBlock
->variablesAtHead
.setArgumentFirstTime(varNo
, valueInPredecessor
);
2458 predecessorBlock
->variablesAtHead
.setLocalFirstTime(varNo
, valueInPredecessor
);
2459 phiStack
.append(PhiStackEntry(predecessorBlock
, valueInPredecessor
, varNo
));
2460 } else if (m_graph
[valueInPredecessor
].op() == GetLocal
) {
2461 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2462 dataLog(" Found GetLocal @%u.\n", valueInPredecessor
);
2465 // We want to ensure that the VariableAccessDatas are identical between the
2466 // GetLocal and its block-local Phi. Strictly speaking we only need the two
2467 // to be unified. But for efficiency, we want the code that creates GetLocals
2468 // and Phis to try to reuse VariableAccessDatas as much as possible.
2469 ASSERT(m_graph
[valueInPredecessor
].variableAccessData() == m_graph
[m_graph
[valueInPredecessor
].child1().index()].variableAccessData());
2471 valueInPredecessor
= m_graph
[valueInPredecessor
].child1().index();
2473 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2474 dataLog(" Found @%u.\n", valueInPredecessor
);
2477 ASSERT(m_graph
[valueInPredecessor
].op() == SetLocal
2478 || m_graph
[valueInPredecessor
].op() == Phi
2479 || m_graph
[valueInPredecessor
].op() == Flush
2480 || (m_graph
[valueInPredecessor
].op() == SetArgument
2481 && stackType
== ArgumentPhiStack
));
2483 VariableAccessData
* dataForPredecessor
= m_graph
[valueInPredecessor
].variableAccessData();
2485 dataForPredecessor
->unify(dataForPhi
);
2487 Node
* phiNode
= &m_graph
[entry
.m_phi
];
2488 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2489 dataLog(" Ref count of @%u = %u.\n", entry
.m_phi
, phiNode
->refCount());
2491 if (phiNode
->refCount()) {
2492 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2493 dataLog(" Reffing @%u.\n", valueInPredecessor
);
2495 m_graph
.ref(valueInPredecessor
);
2498 if (!phiNode
->child1()) {
2499 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2500 dataLog(" Setting @%u->child1 = @%u.\n", entry
.m_phi
, valueInPredecessor
);
2502 phiNode
->children
.setChild1(Edge(valueInPredecessor
));
2503 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2504 dataLog(" Children of @%u: ", entry
.m_phi
);
2505 phiNode
->dumpChildren(WTF::dataFile());
2510 if (!phiNode
->child2()) {
2511 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2512 dataLog(" Setting @%u->child2 = @%u.\n", entry
.m_phi
, valueInPredecessor
);
2514 phiNode
->children
.setChild2(Edge(valueInPredecessor
));
2515 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2516 dataLog(" Children of @%u: ", entry
.m_phi
);
2517 phiNode
->dumpChildren(WTF::dataFile());
2522 if (!phiNode
->child3()) {
2523 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2524 dataLog(" Setting @%u->child3 = @%u.\n", entry
.m_phi
, valueInPredecessor
);
2526 phiNode
->children
.setChild3(Edge(valueInPredecessor
));
2527 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2528 dataLog(" Children of @%u: ", entry
.m_phi
);
2529 phiNode
->dumpChildren(WTF::dataFile());
2535 NodeIndex newPhi
= insertPhiNode(OpInfo(dataForPhi
), entry
.m_block
);
2537 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2538 dataLog(" Splitting @%u, created @%u.\n", entry
.m_phi
, newPhi
);
2541 phiNode
= &m_graph
[entry
.m_phi
]; // reload after vector resize
2542 Node
& newPhiNode
= m_graph
[newPhi
];
2543 if (phiNode
->refCount())
2544 m_graph
.ref(newPhi
);
2546 newPhiNode
.children
= phiNode
->children
;
2548 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2549 dataLog(" Children of @%u: ", newPhi
);
2550 newPhiNode
.dumpChildren(WTF::dataFile());
2554 phiNode
->children
.initialize(newPhi
, valueInPredecessor
, NoNode
);
2556 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2557 dataLog(" Children of @%u: ", entry
.m_phi
);
2558 phiNode
->dumpChildren(WTF::dataFile());
2565 void ByteCodeParser::fixVariableAccessPredictions()
2567 for (unsigned i
= 0; i
< m_graph
.m_variableAccessData
.size(); ++i
) {
2568 VariableAccessData
* data
= &m_graph
.m_variableAccessData
[i
];
2569 data
->find()->predict(data
->nonUnifiedPrediction());
2573 void ByteCodeParser::linkBlock(BasicBlock
* block
, Vector
<BlockIndex
>& possibleTargets
)
2575 ASSERT(!block
->isLinked
);
2576 ASSERT(!block
->isEmpty());
2577 Node
& node
= m_graph
[block
->last()];
2578 ASSERT(node
.isTerminal());
2580 switch (node
.op()) {
2582 node
.setTakenBlockIndex(m_graph
.blockIndexForBytecodeOffset(possibleTargets
, node
.takenBytecodeOffsetDuringParsing()));
2583 #if DFG_ENABLE(DEBUG_VERBOSE)
2584 dataLog("Linked basic block %p to %p, #%u.\n", block
, m_graph
.m_blocks
[node
.takenBlockIndex()].get(), node
.takenBlockIndex());
2589 node
.setTakenBlockIndex(m_graph
.blockIndexForBytecodeOffset(possibleTargets
, node
.takenBytecodeOffsetDuringParsing()));
2590 node
.setNotTakenBlockIndex(m_graph
.blockIndexForBytecodeOffset(possibleTargets
, node
.notTakenBytecodeOffsetDuringParsing()));
2591 #if DFG_ENABLE(DEBUG_VERBOSE)
2592 dataLog("Linked basic block %p to %p, #%u and %p, #%u.\n", block
, m_graph
.m_blocks
[node
.takenBlockIndex()].get(), node
.takenBlockIndex(), m_graph
.m_blocks
[node
.notTakenBlockIndex()].get(), node
.notTakenBlockIndex());
2597 #if DFG_ENABLE(DEBUG_VERBOSE)
2598 dataLog("Marking basic block %p as linked.\n", block
);
2603 #if !ASSERT_DISABLED
2604 block
->isLinked
= true;
2608 void ByteCodeParser::linkBlocks(Vector
<UnlinkedBlock
>& unlinkedBlocks
, Vector
<BlockIndex
>& possibleTargets
)
2610 for (size_t i
= 0; i
< unlinkedBlocks
.size(); ++i
) {
2611 if (unlinkedBlocks
[i
].m_needsNormalLinking
) {
2612 linkBlock(m_graph
.m_blocks
[unlinkedBlocks
[i
].m_blockIndex
].get(), possibleTargets
);
2613 unlinkedBlocks
[i
].m_needsNormalLinking
= false;
2618 void ByteCodeParser::handleSuccessor(Vector
<BlockIndex
, 16>& worklist
, BlockIndex blockIndex
, BlockIndex successorIndex
)
2620 BasicBlock
* successor
= m_graph
.m_blocks
[successorIndex
].get();
2621 if (!successor
->isReachable
) {
2622 successor
->isReachable
= true;
2623 worklist
.append(successorIndex
);
2626 successor
->m_predecessors
.append(blockIndex
);
2629 void ByteCodeParser::determineReachability()
2631 Vector
<BlockIndex
, 16> worklist
;
2633 m_graph
.m_blocks
[0]->isReachable
= true;
2634 while (!worklist
.isEmpty()) {
2635 BlockIndex index
= worklist
.last();
2636 worklist
.removeLast();
2638 BasicBlock
* block
= m_graph
.m_blocks
[index
].get();
2639 ASSERT(block
->isLinked
);
2641 Node
& node
= m_graph
[block
->last()];
2642 ASSERT(node
.isTerminal());
2645 handleSuccessor(worklist
, index
, node
.takenBlockIndex());
2646 else if (node
.isBranch()) {
2647 handleSuccessor(worklist
, index
, node
.takenBlockIndex());
2648 handleSuccessor(worklist
, index
, node
.notTakenBlockIndex());
2653 void ByteCodeParser::buildOperandMapsIfNecessary()
2655 if (m_haveBuiltOperandMaps
)
2658 for (size_t i
= 0; i
< m_codeBlock
->numberOfIdentifiers(); ++i
)
2659 m_identifierMap
.add(m_codeBlock
->identifier(i
).impl(), i
);
2660 for (size_t i
= 0; i
< m_codeBlock
->numberOfConstantRegisters(); ++i
) {
2661 JSValue value
= m_codeBlock
->getConstant(i
+ FirstConstantRegisterIndex
);
2663 m_emptyJSValueIndex
= i
+ FirstConstantRegisterIndex
;
2665 m_jsValueMap
.add(JSValue::encode(value
), i
+ FirstConstantRegisterIndex
);
2668 m_haveBuiltOperandMaps
= true;
2671 ByteCodeParser::InlineStackEntry::InlineStackEntry(ByteCodeParser
* byteCodeParser
, CodeBlock
* codeBlock
, CodeBlock
* profiledBlock
, BlockIndex callsiteBlockHead
, VirtualRegister calleeVR
, JSFunction
* callee
, VirtualRegister returnValueVR
, VirtualRegister inlineCallFrameStart
, CodeSpecializationKind kind
)
2672 : m_byteCodeParser(byteCodeParser
)
2673 , m_codeBlock(codeBlock
)
2674 , m_profiledBlock(profiledBlock
)
2675 , m_calleeVR(calleeVR
)
2676 , m_exitProfile(profiledBlock
->exitProfile())
2677 , m_callsiteBlockHead(callsiteBlockHead
)
2678 , m_returnValue(returnValueVR
)
2679 , m_lazyOperands(profiledBlock
->lazyOperandValueProfiles())
2680 , m_didReturn(false)
2681 , m_didEarlyReturn(false)
2682 , m_caller(byteCodeParser
->m_inlineStackTop
)
2684 m_argumentPositions
.resize(codeBlock
->numParameters());
2685 for (unsigned i
= codeBlock
->numParameters(); i
--;) {
2686 byteCodeParser
->m_graph
.m_argumentPositions
.append(ArgumentPosition());
2687 ArgumentPosition
* argumentPosition
= &byteCodeParser
->m_graph
.m_argumentPositions
.last();
2688 m_argumentPositions
[i
] = argumentPosition
;
2693 ASSERT(codeBlock
!= byteCodeParser
->m_codeBlock
);
2695 ASSERT(calleeVR
!= InvalidVirtualRegister
);
2696 ASSERT(inlineCallFrameStart
!= InvalidVirtualRegister
);
2697 ASSERT(callsiteBlockHead
!= NoBlock
);
2699 InlineCallFrame inlineCallFrame
;
2700 inlineCallFrame
.executable
.set(*byteCodeParser
->m_globalData
, byteCodeParser
->m_codeBlock
->ownerExecutable(), codeBlock
->ownerExecutable());
2701 inlineCallFrame
.stackOffset
= inlineCallFrameStart
+ RegisterFile::CallFrameHeaderSize
;
2702 inlineCallFrame
.callee
.set(*byteCodeParser
->m_globalData
, byteCodeParser
->m_codeBlock
->ownerExecutable(), callee
);
2703 inlineCallFrame
.caller
= byteCodeParser
->currentCodeOrigin();
2704 inlineCallFrame
.arguments
.resize(codeBlock
->numParameters()); // Set the number of arguments including this, but don't configure the value recoveries, yet.
2705 inlineCallFrame
.isCall
= isCall(kind
);
2706 byteCodeParser
->m_codeBlock
->inlineCallFrames().append(inlineCallFrame
);
2707 m_inlineCallFrame
= &byteCodeParser
->m_codeBlock
->inlineCallFrames().last();
2709 byteCodeParser
->buildOperandMapsIfNecessary();
2711 m_identifierRemap
.resize(codeBlock
->numberOfIdentifiers());
2712 m_constantRemap
.resize(codeBlock
->numberOfConstantRegisters());
2714 for (size_t i
= 0; i
< codeBlock
->numberOfIdentifiers(); ++i
) {
2715 StringImpl
* rep
= codeBlock
->identifier(i
).impl();
2716 IdentifierMap::AddResult result
= byteCodeParser
->m_identifierMap
.add(rep
, byteCodeParser
->m_codeBlock
->numberOfIdentifiers());
2717 if (result
.isNewEntry
)
2718 byteCodeParser
->m_codeBlock
->addIdentifier(Identifier(byteCodeParser
->m_globalData
, rep
));
2719 m_identifierRemap
[i
] = result
.iterator
->second
;
2721 for (size_t i
= 0; i
< codeBlock
->numberOfConstantRegisters(); ++i
) {
2722 JSValue value
= codeBlock
->getConstant(i
+ FirstConstantRegisterIndex
);
2724 if (byteCodeParser
->m_emptyJSValueIndex
== UINT_MAX
) {
2725 byteCodeParser
->m_emptyJSValueIndex
= byteCodeParser
->m_codeBlock
->numberOfConstantRegisters() + FirstConstantRegisterIndex
;
2726 byteCodeParser
->m_codeBlock
->addConstant(JSValue());
2727 byteCodeParser
->m_constants
.append(ConstantRecord());
2729 m_constantRemap
[i
] = byteCodeParser
->m_emptyJSValueIndex
;
2732 JSValueMap::AddResult result
= byteCodeParser
->m_jsValueMap
.add(JSValue::encode(value
), byteCodeParser
->m_codeBlock
->numberOfConstantRegisters() + FirstConstantRegisterIndex
);
2733 if (result
.isNewEntry
) {
2734 byteCodeParser
->m_codeBlock
->addConstant(value
);
2735 byteCodeParser
->m_constants
.append(ConstantRecord());
2737 m_constantRemap
[i
] = result
.iterator
->second
;
2740 m_callsiteBlockHeadNeedsLinking
= true;
2742 // Machine code block case.
2743 ASSERT(codeBlock
== byteCodeParser
->m_codeBlock
);
2745 ASSERT(calleeVR
== InvalidVirtualRegister
);
2746 ASSERT(returnValueVR
== InvalidVirtualRegister
);
2747 ASSERT(inlineCallFrameStart
== InvalidVirtualRegister
);
2748 ASSERT(callsiteBlockHead
== NoBlock
);
2750 m_inlineCallFrame
= 0;
2752 m_identifierRemap
.resize(codeBlock
->numberOfIdentifiers());
2753 m_constantRemap
.resize(codeBlock
->numberOfConstantRegisters());
2755 for (size_t i
= 0; i
< codeBlock
->numberOfIdentifiers(); ++i
)
2756 m_identifierRemap
[i
] = i
;
2757 for (size_t i
= 0; i
< codeBlock
->numberOfConstantRegisters(); ++i
)
2758 m_constantRemap
[i
] = i
+ FirstConstantRegisterIndex
;
2760 m_callsiteBlockHeadNeedsLinking
= false;
2763 for (size_t i
= 0; i
< m_constantRemap
.size(); ++i
)
2764 ASSERT(m_constantRemap
[i
] >= static_cast<unsigned>(FirstConstantRegisterIndex
));
2766 byteCodeParser
->m_inlineStackTop
= this;
2769 void ByteCodeParser::parseCodeBlock()
2771 CodeBlock
* codeBlock
= m_inlineStackTop
->m_codeBlock
;
2773 #if DFG_ENABLE(DEBUG_VERBOSE)
2774 dataLog("Parsing code block %p. codeType = %s, numCapturedVars = %u, needsFullScopeChain = %s, needsActivation = %s, isStrictMode = %s\n",
2776 codeTypeToString(codeBlock
->codeType()),
2777 codeBlock
->m_numCapturedVars
,
2778 codeBlock
->needsFullScopeChain()?"true":"false",
2779 codeBlock
->ownerExecutable()->needsActivation()?"true":"false",
2780 codeBlock
->ownerExecutable()->isStrictMode()?"true":"false");
2783 for (unsigned jumpTargetIndex
= 0; jumpTargetIndex
<= codeBlock
->numberOfJumpTargets(); ++jumpTargetIndex
) {
2784 // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
2785 unsigned limit
= jumpTargetIndex
< codeBlock
->numberOfJumpTargets() ? codeBlock
->jumpTarget(jumpTargetIndex
) : codeBlock
->instructions().size();
2786 #if DFG_ENABLE(DEBUG_VERBOSE)
2787 dataLog("Parsing bytecode with limit %p bc#%u at inline depth %u.\n", m_inlineStackTop
->executable(), limit
, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop
->m_inlineCallFrame
));
2789 ASSERT(m_currentIndex
< limit
);
2791 // Loop until we reach the current limit (i.e. next jump target).
2793 if (!m_currentBlock
) {
2794 // Check if we can use the last block.
2795 if (!m_graph
.m_blocks
.isEmpty() && m_graph
.m_blocks
.last()->isEmpty()) {
2796 // This must be a block belonging to us.
2797 ASSERT(m_inlineStackTop
->m_unlinkedBlocks
.last().m_blockIndex
== m_graph
.m_blocks
.size() - 1);
2798 // Either the block is linkable or it isn't. If it's linkable then it's the last
2799 // block in the blockLinkingTargets list. If it's not then the last block will
2800 // have a lower bytecode index that the one we're about to give to this block.
2801 if (m_inlineStackTop
->m_blockLinkingTargets
.isEmpty() || m_graph
.m_blocks
[m_inlineStackTop
->m_blockLinkingTargets
.last()]->bytecodeBegin
!= m_currentIndex
) {
2802 // Make the block linkable.
2803 ASSERT(m_inlineStackTop
->m_blockLinkingTargets
.isEmpty() || m_graph
.m_blocks
[m_inlineStackTop
->m_blockLinkingTargets
.last()]->bytecodeBegin
< m_currentIndex
);
2804 m_inlineStackTop
->m_blockLinkingTargets
.append(m_graph
.m_blocks
.size() - 1);
2806 // Change its bytecode begin and continue.
2807 m_currentBlock
= m_graph
.m_blocks
.last().get();
2808 #if DFG_ENABLE(DEBUG_VERBOSE)
2809 dataLog("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock
, m_currentBlock
->bytecodeBegin
, m_currentIndex
);
2811 m_currentBlock
->bytecodeBegin
= m_currentIndex
;
2813 OwnPtr
<BasicBlock
> block
= adoptPtr(new BasicBlock(m_currentIndex
, m_numArguments
, m_numLocals
));
2814 #if DFG_ENABLE(DEBUG_VERBOSE)
2815 dataLog("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block
.get(), m_graph
.m_blocks
.size(), m_inlineStackTop
->executable(), m_currentIndex
, CodeOrigin::inlineDepthForCallFrame(m_inlineStackTop
->m_inlineCallFrame
));
2817 m_currentBlock
= block
.get();
2818 ASSERT(m_inlineStackTop
->m_unlinkedBlocks
.isEmpty() || m_graph
.m_blocks
[m_inlineStackTop
->m_unlinkedBlocks
.last().m_blockIndex
]->bytecodeBegin
< m_currentIndex
);
2819 m_inlineStackTop
->m_unlinkedBlocks
.append(UnlinkedBlock(m_graph
.m_blocks
.size()));
2820 m_inlineStackTop
->m_blockLinkingTargets
.append(m_graph
.m_blocks
.size());
2821 m_graph
.m_blocks
.append(block
.release());
2822 prepareToParseBlock();
2826 bool shouldContinueParsing
= parseBlock(limit
);
2828 // We should not have gone beyond the limit.
2829 ASSERT(m_currentIndex
<= limit
);
2831 // We should have planted a terminal, or we just gave up because
2832 // we realized that the jump target information is imprecise, or we
2833 // are at the end of an inline function, or we realized that we
2834 // should stop parsing because there was a return in the first
2836 ASSERT(m_currentBlock
->isEmpty() || m_graph
.last().isTerminal() || (m_currentIndex
== codeBlock
->instructions().size() && m_inlineStackTop
->m_inlineCallFrame
) || !shouldContinueParsing
);
2838 if (!shouldContinueParsing
)
2842 } while (m_currentIndex
< limit
);
2845 // Should have reached the end of the instructions.
2846 ASSERT(m_currentIndex
== codeBlock
->instructions().size());
2849 bool ByteCodeParser::parse()
2851 // Set during construction.
2852 ASSERT(!m_currentIndex
);
2854 #if DFG_ENABLE(ALL_VARIABLES_CAPTURED)
2855 // We should be pretending that the code has an activation.
2856 ASSERT(m_graph
.needsActivation());
2859 InlineStackEntry
inlineStackEntry(this, m_codeBlock
, m_profiledBlock
, NoBlock
, InvalidVirtualRegister
, 0, InvalidVirtualRegister
, InvalidVirtualRegister
, CodeForCall
);
2863 linkBlocks(inlineStackEntry
.m_unlinkedBlocks
, inlineStackEntry
.m_blockLinkingTargets
);
2864 determineReachability();
2865 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2866 dataLog("Processing local variable phis.\n");
2869 m_currentProfilingIndex
= m_currentIndex
;
2871 processPhiStack
<LocalPhiStack
>();
2872 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE)
2873 dataLog("Processing argument phis.\n");
2875 processPhiStack
<ArgumentPhiStack
>();
2877 fixVariableAccessPredictions();
2879 m_graph
.m_preservedVars
= m_preservedVars
;
2880 m_graph
.m_localVars
= m_numLocals
;
2881 m_graph
.m_parameterSlots
= m_parameterSlots
;
2886 bool parse(Graph
& graph
)
2888 #if DFG_DEBUG_LOCAL_DISBALE
2889 UNUSED_PARAM(graph
);
2892 return ByteCodeParser(graph
).parse();
2896 } } // namespace JSC::DFG