2 * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "ArrayConstructor.h"
32 #include "BasicBlockLocation.h"
33 #include "CallLinkStatus.h"
34 #include "CodeBlock.h"
35 #include "CodeBlockWithJITType.h"
36 #include "DFGArrayMode.h"
37 #include "DFGCapabilities.h"
39 #include "DFGJITCode.h"
40 #include "GetByIdStatus.h"
42 #include "JSLexicalEnvironment.h"
43 #include "JSCInlines.h"
44 #include "PreciseJumpTargets.h"
45 #include "PutByIdStatus.h"
46 #include "StackAlignment.h"
47 #include "StringConstructor.h"
48 #include <wtf/CommaPrinter.h>
49 #include <wtf/HashMap.h>
50 #include <wtf/MathExtras.h>
51 #include <wtf/StdLibExtras.h>
53 namespace JSC
{ namespace DFG
{
55 static const bool verbose
= false;
57 class ConstantBufferKey
{
65 ConstantBufferKey(WTF::HashTableDeletedValueType
)
71 ConstantBufferKey(CodeBlock
* codeBlock
, unsigned index
)
72 : m_codeBlock(codeBlock
)
77 bool operator==(const ConstantBufferKey
& other
) const
79 return m_codeBlock
== other
.m_codeBlock
80 && m_index
== other
.m_index
;
85 return WTF::PtrHash
<CodeBlock
*>::hash(m_codeBlock
) ^ m_index
;
88 bool isHashTableDeletedValue() const
90 return !m_codeBlock
&& m_index
;
93 CodeBlock
* codeBlock() const { return m_codeBlock
; }
94 unsigned index() const { return m_index
; }
97 CodeBlock
* m_codeBlock
;
101 struct ConstantBufferKeyHash
{
102 static unsigned hash(const ConstantBufferKey
& key
) { return key
.hash(); }
103 static bool equal(const ConstantBufferKey
& a
, const ConstantBufferKey
& b
)
108 static const bool safeToCompareToEmptyOrDeleted
= true;
111 } } // namespace JSC::DFG
115 template<typename T
> struct DefaultHash
;
116 template<> struct DefaultHash
<JSC::DFG::ConstantBufferKey
> {
117 typedef JSC::DFG::ConstantBufferKeyHash Hash
;
120 template<typename T
> struct HashTraits
;
121 template<> struct HashTraits
<JSC::DFG::ConstantBufferKey
> : SimpleClassHashTraits
<JSC::DFG::ConstantBufferKey
> { };
125 namespace JSC
{ namespace DFG
{
127 // === ByteCodeParser ===
129 // This class is used to compile the dataflow graph from a CodeBlock.
130 class ByteCodeParser
{
132 ByteCodeParser(Graph
& graph
)
134 , m_codeBlock(graph
.m_codeBlock
)
135 , m_profiledBlock(graph
.m_profiledBlock
)
139 , m_constantUndefined(graph
.freeze(jsUndefined()))
140 , m_constantNull(graph
.freeze(jsNull()))
141 , m_constantNaN(graph
.freeze(jsNumber(PNaN
)))
142 , m_constantOne(graph
.freeze(jsNumber(1)))
143 , m_numArguments(m_codeBlock
->numParameters())
144 , m_numLocals(m_codeBlock
->m_numCalleeRegisters
)
145 , m_parameterSlots(0)
146 , m_numPassedVarArgs(0)
147 , m_inlineStackTop(0)
148 , m_haveBuiltOperandMaps(false)
149 , m_currentInstruction(0)
150 , m_hasDebuggerEnabled(graph
.hasDebuggerEnabled())
152 ASSERT(m_profiledBlock
);
155 // Parse a full CodeBlock of bytecode.
159 struct InlineStackEntry
;
161 // Just parse from m_currentIndex to the end of the current CodeBlock.
162 void parseCodeBlock();
164 void ensureLocals(unsigned newNumLocals
)
166 if (newNumLocals
<= m_numLocals
)
168 m_numLocals
= newNumLocals
;
169 for (size_t i
= 0; i
< m_graph
.numBlocks(); ++i
)
170 m_graph
.block(i
)->ensureLocals(newNumLocals
);
173 // Helper for min and max.
174 template<typename ChecksFunctor
>
175 bool handleMinMax(int resultOperand
, NodeType op
, int registerOffset
, int argumentCountIncludingThis
, const ChecksFunctor
& insertChecks
);
177 // Handle calls. This resolves issues surrounding inlining and intrinsics.
179 int result
, NodeType op
, InlineCallFrame::Kind
, unsigned instructionSize
,
180 Node
* callTarget
, int argCount
, int registerOffset
, CallLinkStatus
,
181 SpeculatedType prediction
);
183 int result
, NodeType op
, InlineCallFrame::Kind
, unsigned instructionSize
,
184 Node
* callTarget
, int argCount
, int registerOffset
, CallLinkStatus
);
185 void handleCall(int result
, NodeType op
, CodeSpecializationKind
, unsigned instructionSize
, int callee
, int argCount
, int registerOffset
);
186 void handleCall(Instruction
* pc
, NodeType op
, CodeSpecializationKind
);
187 void handleVarargsCall(Instruction
* pc
, NodeType op
, CodeSpecializationKind
);
188 void emitFunctionChecks(CallVariant
, Node
* callTarget
, VirtualRegister thisArgumnt
);
189 void emitArgumentPhantoms(int registerOffset
, int argumentCountIncludingThis
);
190 unsigned inliningCost(CallVariant
, int argumentCountIncludingThis
, CodeSpecializationKind
); // Return UINT_MAX if it's not an inlining candidate. By convention, intrinsics have a cost of 1.
191 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
192 bool handleInlining(Node
* callTargetNode
, int resultOperand
, const CallLinkStatus
&, int registerOffset
, VirtualRegister thisArgument
, VirtualRegister argumentsArgument
, unsigned argumentsOffset
, int argumentCountIncludingThis
, unsigned nextOffset
, NodeType callOp
, InlineCallFrame::Kind
, SpeculatedType prediction
);
193 enum CallerLinkability
{ CallerDoesNormalLinking
, CallerLinksManually
};
194 template<typename ChecksFunctor
>
195 bool attemptToInlineCall(Node
* callTargetNode
, int resultOperand
, CallVariant
, int registerOffset
, int argumentCountIncludingThis
, unsigned nextOffset
, InlineCallFrame::Kind
, CallerLinkability
, SpeculatedType prediction
, unsigned& inliningBalance
, const ChecksFunctor
& insertChecks
);
196 template<typename ChecksFunctor
>
197 void inlineCall(Node
* callTargetNode
, int resultOperand
, CallVariant
, int registerOffset
, int argumentCountIncludingThis
, unsigned nextOffset
, InlineCallFrame::Kind
, CallerLinkability
, const ChecksFunctor
& insertChecks
);
198 void cancelLinkingForBlock(InlineStackEntry
*, BasicBlock
*); // Only works when the given block is the last one to have been added for that inline stack entry.
199 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
200 template<typename ChecksFunctor
>
201 bool handleIntrinsic(int resultOperand
, Intrinsic
, int registerOffset
, int argumentCountIncludingThis
, SpeculatedType prediction
, const ChecksFunctor
& insertChecks
);
202 template<typename ChecksFunctor
>
203 bool handleTypedArrayConstructor(int resultOperand
, InternalFunction
*, int registerOffset
, int argumentCountIncludingThis
, TypedArrayType
, const ChecksFunctor
& insertChecks
);
204 template<typename ChecksFunctor
>
205 bool handleConstantInternalFunction(int resultOperand
, InternalFunction
*, int registerOffset
, int argumentCountIncludingThis
, CodeSpecializationKind
, const ChecksFunctor
& insertChecks
);
206 Node
* handlePutByOffset(Node
* base
, unsigned identifier
, PropertyOffset
, Node
* value
);
207 Node
* handleGetByOffset(SpeculatedType
, Node
* base
, const StructureSet
&, unsigned identifierNumber
, PropertyOffset
, NodeType op
= GetByOffset
);
209 int destinationOperand
, SpeculatedType
, Node
* base
, unsigned identifierNumber
,
210 const GetByIdStatus
&);
212 Node
* base
, unsigned identifierNumber
, Node
* value
, const PutByIdStatus
&, bool isDirect
);
214 Node
* base
, unsigned identifierNumber
, Node
* value
, const PutByIdStatus
&,
216 void emitChecks(const ConstantStructureCheckVector
&);
218 void prepareToParseBlock();
221 // Parse a single basic block of bytecode instructions.
222 bool parseBlock(unsigned limit
);
223 // Link block successors.
224 void linkBlock(BasicBlock
*, Vector
<BasicBlock
*>& possibleTargets
);
225 void linkBlocks(Vector
<UnlinkedBlock
>& unlinkedBlocks
, Vector
<BasicBlock
*>& possibleTargets
);
227 VariableAccessData
* newVariableAccessData(VirtualRegister operand
)
229 ASSERT(!operand
.isConstant());
231 m_graph
.m_variableAccessData
.append(VariableAccessData(operand
));
232 return &m_graph
.m_variableAccessData
.last();
235 // Get/Set the operands/result of a bytecode instruction.
236 Node
* getDirect(VirtualRegister operand
)
238 ASSERT(!operand
.isConstant());
240 // Is this an argument?
241 if (operand
.isArgument())
242 return getArgument(operand
);
245 return getLocal(operand
);
248 Node
* get(VirtualRegister operand
)
250 if (operand
.isConstant()) {
251 unsigned constantIndex
= operand
.toConstantIndex();
252 unsigned oldSize
= m_constants
.size();
253 if (constantIndex
>= oldSize
|| !m_constants
[constantIndex
]) {
254 const CodeBlock
& codeBlock
= *m_inlineStackTop
->m_codeBlock
;
255 JSValue value
= codeBlock
.getConstant(operand
.offset());
256 SourceCodeRepresentation sourceCodeRepresentation
= codeBlock
.constantSourceCodeRepresentation(operand
.offset());
257 if (constantIndex
>= oldSize
) {
258 m_constants
.grow(constantIndex
+ 1);
259 for (unsigned i
= oldSize
; i
< m_constants
.size(); ++i
)
260 m_constants
[i
] = nullptr;
263 Node
* constantNode
= nullptr;
264 if (sourceCodeRepresentation
== SourceCodeRepresentation::Double
)
265 constantNode
= addToGraph(DoubleConstant
, OpInfo(m_graph
.freezeStrong(jsDoubleNumber(value
.asNumber()))));
267 constantNode
= addToGraph(JSConstant
, OpInfo(m_graph
.freezeStrong(value
)));
268 m_constants
[constantIndex
] = constantNode
;
270 ASSERT(m_constants
[constantIndex
]);
271 return m_constants
[constantIndex
];
274 if (inlineCallFrame()) {
275 if (!inlineCallFrame()->isClosureCall
) {
276 JSFunction
* callee
= inlineCallFrame()->calleeConstant();
277 if (operand
.offset() == JSStack::Callee
)
278 return weakJSConstant(callee
);
280 } else if (operand
.offset() == JSStack::Callee
) {
281 // We have to do some constant-folding here because this enables CreateThis folding. Note
282 // that we don't have such watchpoint-based folding for inlined uses of Callee, since in that
283 // case if the function is a singleton then we already know it.
284 if (FunctionExecutable
* executable
= jsDynamicCast
<FunctionExecutable
*>(m_codeBlock
->ownerExecutable())) {
285 InferredValue
* singleton
= executable
->singletonFunction();
286 if (JSValue value
= singleton
->inferredValue()) {
287 m_graph
.watchpoints().addLazily(singleton
);
288 JSFunction
* function
= jsCast
<JSFunction
*>(value
);
289 return weakJSConstant(function
);
292 return addToGraph(GetCallee
);
295 return getDirect(m_inlineStackTop
->remapOperand(operand
));
299 // A normal set which follows a two-phase commit that spans code origins. During
300 // the current code origin it issues a MovHint, and at the start of the next
301 // code origin there will be a SetLocal. If the local needs flushing, the second
302 // SetLocal will be preceded with a Flush.
305 // A set where the SetLocal happens immediately and there is still a Flush. This
306 // is relevant when assigning to a local in tricky situations for the delayed
307 // SetLocal logic but where we know that we have not performed any side effects
308 // within this code origin. This is a safe replacement for NormalSet anytime we
309 // know that we have not yet performed side effects in this code origin.
310 ImmediateSetWithFlush
,
312 // A set where the SetLocal happens immediately and we do not Flush it even if
313 // this is a local that is marked as needing it. This is relevant when
314 // initializing locals at the top of a function.
317 Node
* setDirect(VirtualRegister operand
, Node
* value
, SetMode setMode
= NormalSet
)
319 addToGraph(MovHint
, OpInfo(operand
.offset()), value
);
321 DelayedSetLocal
delayed(currentCodeOrigin(), operand
, value
);
323 if (setMode
== NormalSet
) {
324 m_setLocalQueue
.append(delayed
);
328 return delayed
.execute(this, setMode
);
331 void processSetLocalQueue()
333 for (unsigned i
= 0; i
< m_setLocalQueue
.size(); ++i
)
334 m_setLocalQueue
[i
].execute(this);
335 m_setLocalQueue
.resize(0);
338 Node
* set(VirtualRegister operand
, Node
* value
, SetMode setMode
= NormalSet
)
340 return setDirect(m_inlineStackTop
->remapOperand(operand
), value
, setMode
);
343 Node
* injectLazyOperandSpeculation(Node
* node
)
345 ASSERT(node
->op() == GetLocal
);
346 ASSERT(node
->origin
.semantic
.bytecodeIndex
== m_currentIndex
);
347 ConcurrentJITLocker
locker(m_inlineStackTop
->m_profiledBlock
->m_lock
);
348 LazyOperandValueProfileKey
key(m_currentIndex
, node
->local());
349 SpeculatedType prediction
= m_inlineStackTop
->m_lazyOperands
.prediction(locker
, key
);
350 node
->variableAccessData()->predict(prediction
);
354 // Used in implementing get/set, above, where the operand is a local variable.
355 Node
* getLocal(VirtualRegister operand
)
357 unsigned local
= operand
.toLocal();
359 Node
* node
= m_currentBlock
->variablesAtTail
.local(local
);
361 // This has two goals: 1) link together variable access datas, and 2)
362 // try to avoid creating redundant GetLocals. (1) is required for
363 // correctness - no other phase will ensure that block-local variable
364 // access data unification is done correctly. (2) is purely opportunistic
365 // and is meant as an compile-time optimization only.
367 VariableAccessData
* variable
;
370 variable
= node
->variableAccessData();
372 switch (node
->op()) {
376 return node
->child1().node();
381 variable
= newVariableAccessData(operand
);
383 node
= injectLazyOperandSpeculation(addToGraph(GetLocal
, OpInfo(variable
)));
384 m_currentBlock
->variablesAtTail
.local(local
) = node
;
388 Node
* setLocal(const CodeOrigin
& semanticOrigin
, VirtualRegister operand
, Node
* value
, SetMode setMode
= NormalSet
)
390 CodeOrigin oldSemanticOrigin
= m_currentSemanticOrigin
;
391 m_currentSemanticOrigin
= semanticOrigin
;
393 unsigned local
= operand
.toLocal();
395 if (setMode
!= ImmediateNakedSet
) {
396 ArgumentPosition
* argumentPosition
= findArgumentPositionForLocal(operand
);
397 if (argumentPosition
)
398 flushDirect(operand
, argumentPosition
);
399 else if (m_hasDebuggerEnabled
&& operand
== m_codeBlock
->scopeRegister())
403 VariableAccessData
* variableAccessData
= newVariableAccessData(operand
);
404 variableAccessData
->mergeStructureCheckHoistingFailed(
405 m_inlineStackTop
->m_exitProfile
.hasExitSite(semanticOrigin
.bytecodeIndex
, BadCache
));
406 variableAccessData
->mergeCheckArrayHoistingFailed(
407 m_inlineStackTop
->m_exitProfile
.hasExitSite(semanticOrigin
.bytecodeIndex
, BadIndexingType
));
408 Node
* node
= addToGraph(SetLocal
, OpInfo(variableAccessData
), value
);
409 m_currentBlock
->variablesAtTail
.local(local
) = node
;
411 m_currentSemanticOrigin
= oldSemanticOrigin
;
415 // Used in implementing get/set, above, where the operand is an argument.
416 Node
* getArgument(VirtualRegister operand
)
418 unsigned argument
= operand
.toArgument();
419 ASSERT(argument
< m_numArguments
);
421 Node
* node
= m_currentBlock
->variablesAtTail
.argument(argument
);
423 VariableAccessData
* variable
;
426 variable
= node
->variableAccessData();
428 switch (node
->op()) {
432 return node
->child1().node();
437 variable
= newVariableAccessData(operand
);
439 node
= injectLazyOperandSpeculation(addToGraph(GetLocal
, OpInfo(variable
)));
440 m_currentBlock
->variablesAtTail
.argument(argument
) = node
;
443 Node
* setArgument(const CodeOrigin
& semanticOrigin
, VirtualRegister operand
, Node
* value
, SetMode setMode
= NormalSet
)
445 CodeOrigin oldSemanticOrigin
= m_currentSemanticOrigin
;
446 m_currentSemanticOrigin
= semanticOrigin
;
448 unsigned argument
= operand
.toArgument();
449 ASSERT(argument
< m_numArguments
);
451 VariableAccessData
* variableAccessData
= newVariableAccessData(operand
);
453 // Always flush arguments, except for 'this'. If 'this' is created by us,
454 // then make sure that it's never unboxed.
456 if (setMode
!= ImmediateNakedSet
)
457 flushDirect(operand
);
458 } else if (m_codeBlock
->specializationKind() == CodeForConstruct
)
459 variableAccessData
->mergeShouldNeverUnbox(true);
461 variableAccessData
->mergeStructureCheckHoistingFailed(
462 m_inlineStackTop
->m_exitProfile
.hasExitSite(semanticOrigin
.bytecodeIndex
, BadCache
));
463 variableAccessData
->mergeCheckArrayHoistingFailed(
464 m_inlineStackTop
->m_exitProfile
.hasExitSite(semanticOrigin
.bytecodeIndex
, BadIndexingType
));
465 Node
* node
= addToGraph(SetLocal
, OpInfo(variableAccessData
), value
);
466 m_currentBlock
->variablesAtTail
.argument(argument
) = node
;
468 m_currentSemanticOrigin
= oldSemanticOrigin
;
472 ArgumentPosition
* findArgumentPositionForArgument(int argument
)
474 InlineStackEntry
* stack
= m_inlineStackTop
;
475 while (stack
->m_inlineCallFrame
)
476 stack
= stack
->m_caller
;
477 return stack
->m_argumentPositions
[argument
];
480 ArgumentPosition
* findArgumentPositionForLocal(VirtualRegister operand
)
482 for (InlineStackEntry
* stack
= m_inlineStackTop
; ; stack
= stack
->m_caller
) {
483 InlineCallFrame
* inlineCallFrame
= stack
->m_inlineCallFrame
;
484 if (!inlineCallFrame
)
486 if (operand
.offset() < static_cast<int>(inlineCallFrame
->stackOffset
+ JSStack::CallFrameHeaderSize
))
488 if (operand
.offset() == inlineCallFrame
->stackOffset
+ CallFrame::thisArgumentOffset())
490 if (operand
.offset() >= static_cast<int>(inlineCallFrame
->stackOffset
+ CallFrame::thisArgumentOffset() + inlineCallFrame
->arguments
.size()))
492 int argument
= VirtualRegister(operand
.offset() - inlineCallFrame
->stackOffset
).toArgument();
493 return stack
->m_argumentPositions
[argument
];
498 ArgumentPosition
* findArgumentPosition(VirtualRegister operand
)
500 if (operand
.isArgument())
501 return findArgumentPositionForArgument(operand
.toArgument());
502 return findArgumentPositionForLocal(operand
);
505 void flush(VirtualRegister operand
)
507 flushDirect(m_inlineStackTop
->remapOperand(operand
));
510 void flushDirect(VirtualRegister operand
)
512 flushDirect(operand
, findArgumentPosition(operand
));
515 void flushDirect(VirtualRegister operand
, ArgumentPosition
* argumentPosition
)
517 ASSERT(!operand
.isConstant());
519 Node
* node
= m_currentBlock
->variablesAtTail
.operand(operand
);
521 VariableAccessData
* variable
;
524 variable
= node
->variableAccessData();
526 variable
= newVariableAccessData(operand
);
528 node
= addToGraph(Flush
, OpInfo(variable
));
529 m_currentBlock
->variablesAtTail
.operand(operand
) = node
;
530 if (argumentPosition
)
531 argumentPosition
->addVariable(variable
);
534 void flush(InlineStackEntry
* inlineStackEntry
)
537 if (InlineCallFrame
* inlineCallFrame
= inlineStackEntry
->m_inlineCallFrame
) {
538 ASSERT(!m_hasDebuggerEnabled
);
539 numArguments
= inlineCallFrame
->arguments
.size();
540 if (inlineCallFrame
->isClosureCall
)
541 flushDirect(inlineStackEntry
->remapOperand(VirtualRegister(JSStack::Callee
)));
542 if (inlineCallFrame
->isVarargs())
543 flushDirect(inlineStackEntry
->remapOperand(VirtualRegister(JSStack::ArgumentCount
)));
545 numArguments
= inlineStackEntry
->m_codeBlock
->numParameters();
546 for (unsigned argument
= numArguments
; argument
-- > 1;)
547 flushDirect(inlineStackEntry
->remapOperand(virtualRegisterForArgument(argument
)));
548 if (m_hasDebuggerEnabled
)
549 flush(m_codeBlock
->scopeRegister());
552 void flushForTerminal()
554 for (InlineStackEntry
* inlineStackEntry
= m_inlineStackTop
; inlineStackEntry
; inlineStackEntry
= inlineStackEntry
->m_caller
)
555 flush(inlineStackEntry
);
558 void flushForReturn()
560 flush(m_inlineStackTop
);
563 void flushIfTerminal(SwitchData
& data
)
565 if (data
.fallThrough
.bytecodeIndex() > m_currentIndex
)
568 for (unsigned i
= data
.cases
.size(); i
--;) {
569 if (data
.cases
[i
].target
.bytecodeIndex() > m_currentIndex
)
576 // Assumes that the constant should be strongly marked.
577 Node
* jsConstant(JSValue constantValue
)
579 return addToGraph(JSConstant
, OpInfo(m_graph
.freezeStrong(constantValue
)));
582 Node
* weakJSConstant(JSValue constantValue
)
584 return addToGraph(JSConstant
, OpInfo(m_graph
.freeze(constantValue
)));
587 // Helper functions to get/set the this value.
590 return get(m_inlineStackTop
->m_codeBlock
->thisRegister());
593 void setThis(Node
* value
)
595 set(m_inlineStackTop
->m_codeBlock
->thisRegister(), value
);
598 InlineCallFrame
* inlineCallFrame()
600 return m_inlineStackTop
->m_inlineCallFrame
;
603 CodeOrigin
currentCodeOrigin()
605 return CodeOrigin(m_currentIndex
, inlineCallFrame());
608 NodeOrigin
currentNodeOrigin()
610 // FIXME: We should set the forExit origin only on those nodes that can exit.
611 // https://bugs.webkit.org/show_bug.cgi?id=145204
612 if (m_currentSemanticOrigin
.isSet())
613 return NodeOrigin(m_currentSemanticOrigin
, currentCodeOrigin());
614 return NodeOrigin(currentCodeOrigin());
617 BranchData
* branchData(unsigned taken
, unsigned notTaken
)
619 // We assume that branches originating from bytecode always have a fall-through. We
620 // use this assumption to avoid checking for the creation of terminal blocks.
621 ASSERT((taken
> m_currentIndex
) || (notTaken
> m_currentIndex
));
622 BranchData
* data
= m_graph
.m_branchData
.add();
623 *data
= BranchData::withBytecodeIndices(taken
, notTaken
);
627 Node
* addToGraph(Node
* node
)
629 if (Options::verboseDFGByteCodeParsing())
630 dataLog(" appended ", node
, " ", Graph::opName(node
->op()), "\n");
631 m_currentBlock
->append(node
);
635 Node
* addToGraph(NodeType op
, Node
* child1
= 0, Node
* child2
= 0, Node
* child3
= 0)
637 Node
* result
= m_graph
.addNode(
638 SpecNone
, op
, currentNodeOrigin(), Edge(child1
), Edge(child2
),
640 return addToGraph(result
);
642 Node
* addToGraph(NodeType op
, Edge child1
, Edge child2
= Edge(), Edge child3
= Edge())
644 Node
* result
= m_graph
.addNode(
645 SpecNone
, op
, currentNodeOrigin(), child1
, child2
, child3
);
646 return addToGraph(result
);
648 Node
* addToGraph(NodeType op
, OpInfo info
, Node
* child1
= 0, Node
* child2
= 0, Node
* child3
= 0)
650 Node
* result
= m_graph
.addNode(
651 SpecNone
, op
, currentNodeOrigin(), info
, Edge(child1
), Edge(child2
),
653 return addToGraph(result
);
655 Node
* addToGraph(NodeType op
, OpInfo info1
, OpInfo info2
, Node
* child1
= 0, Node
* child2
= 0, Node
* child3
= 0)
657 Node
* result
= m_graph
.addNode(
658 SpecNone
, op
, currentNodeOrigin(), info1
, info2
,
659 Edge(child1
), Edge(child2
), Edge(child3
));
660 return addToGraph(result
);
663 Node
* addToGraph(Node::VarArgTag
, NodeType op
, OpInfo info1
, OpInfo info2
)
665 Node
* result
= m_graph
.addNode(
666 SpecNone
, Node::VarArg
, op
, currentNodeOrigin(), info1
, info2
,
667 m_graph
.m_varArgChildren
.size() - m_numPassedVarArgs
, m_numPassedVarArgs
);
670 m_numPassedVarArgs
= 0;
675 void addVarArgChild(Node
* child
)
677 m_graph
.m_varArgChildren
.append(Edge(child
));
678 m_numPassedVarArgs
++;
681 Node
* addCallWithoutSettingResult(
682 NodeType op
, OpInfo opInfo
, Node
* callee
, int argCount
, int registerOffset
,
683 SpeculatedType prediction
)
685 addVarArgChild(callee
);
686 size_t parameterSlots
= JSStack::CallFrameHeaderSize
- JSStack::CallerFrameAndPCSize
+ argCount
;
687 if (parameterSlots
> m_parameterSlots
)
688 m_parameterSlots
= parameterSlots
;
690 for (int i
= 0; i
< argCount
; ++i
)
691 addVarArgChild(get(virtualRegisterForArgument(i
, registerOffset
)));
693 return addToGraph(Node::VarArg
, op
, opInfo
, OpInfo(prediction
));
697 int result
, NodeType op
, OpInfo opInfo
, Node
* callee
, int argCount
, int registerOffset
,
698 SpeculatedType prediction
)
700 Node
* call
= addCallWithoutSettingResult(
701 op
, opInfo
, callee
, argCount
, registerOffset
, prediction
);
702 VirtualRegister
resultReg(result
);
703 if (resultReg
.isValid())
704 set(resultReg
, call
);
708 Node
* cellConstantWithStructureCheck(JSCell
* object
, Structure
* structure
)
710 Node
* objectNode
= weakJSConstant(object
);
711 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(structure
)), objectNode
);
715 SpeculatedType
getPredictionWithoutOSRExit(unsigned bytecodeIndex
)
717 ConcurrentJITLocker
locker(m_inlineStackTop
->m_profiledBlock
->m_lock
);
718 return m_inlineStackTop
->m_profiledBlock
->valueProfilePredictionForBytecodeOffset(locker
, bytecodeIndex
);
721 SpeculatedType
getPrediction(unsigned bytecodeIndex
)
723 SpeculatedType prediction
= getPredictionWithoutOSRExit(bytecodeIndex
);
725 if (prediction
== SpecNone
) {
726 // We have no information about what values this node generates. Give up
727 // on executing this code, since we're likely to do more damage than good.
728 addToGraph(ForceOSRExit
);
734 SpeculatedType
getPredictionWithoutOSRExit()
736 return getPredictionWithoutOSRExit(m_currentIndex
);
739 SpeculatedType
getPrediction()
741 return getPrediction(m_currentIndex
);
744 ArrayMode
getArrayMode(ArrayProfile
* profile
, Array::Action action
)
746 ConcurrentJITLocker
locker(m_inlineStackTop
->m_profiledBlock
->m_lock
);
747 profile
->computeUpdatedPrediction(locker
, m_inlineStackTop
->m_profiledBlock
);
748 bool makeSafe
= profile
->outOfBounds(locker
);
749 return ArrayMode::fromObserved(locker
, profile
, action
, makeSafe
);
752 ArrayMode
getArrayMode(ArrayProfile
* profile
)
754 return getArrayMode(profile
, Array::Read
);
757 Node
* makeSafe(Node
* node
)
759 if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
))
760 node
->mergeFlags(NodeMayOverflowInDFG
);
761 if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
))
762 node
->mergeFlags(NodeMayNegZeroInDFG
);
764 if (!isX86() && node
->op() == ArithMod
)
767 if (!m_inlineStackTop
->m_profiledBlock
->likelyToTakeSlowCase(m_currentIndex
))
770 switch (node
->op()) {
775 case ArithMod
: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
776 node
->mergeFlags(NodeMayOverflowInBaseline
);
780 // Currently we can't tell the difference between a negation overflowing
781 // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
782 // path then we assume that it did both of those things.
783 node
->mergeFlags(NodeMayOverflowInBaseline
);
784 node
->mergeFlags(NodeMayNegZeroInBaseline
);
788 // FIXME: We should detect cases where we only overflowed but never created
790 // https://bugs.webkit.org/show_bug.cgi?id=132470
791 if (m_inlineStackTop
->m_profiledBlock
->likelyToTakeDeepestSlowCase(m_currentIndex
)
792 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
))
793 node
->mergeFlags(NodeMayOverflowInBaseline
| NodeMayNegZeroInBaseline
);
794 else if (m_inlineStackTop
->m_profiledBlock
->likelyToTakeSlowCase(m_currentIndex
)
795 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
))
796 node
->mergeFlags(NodeMayNegZeroInBaseline
);
800 RELEASE_ASSERT_NOT_REACHED();
807 Node
* makeDivSafe(Node
* node
)
809 ASSERT(node
->op() == ArithDiv
);
811 if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
))
812 node
->mergeFlags(NodeMayOverflowInDFG
);
813 if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
))
814 node
->mergeFlags(NodeMayNegZeroInDFG
);
816 // The main slow case counter for op_div in the old JIT counts only when
817 // the operands are not numbers. We don't care about that since we already
818 // have speculations in place that take care of that separately. We only
819 // care about when the outcome of the division is not an integer, which
820 // is what the special fast case counter tells us.
822 if (!m_inlineStackTop
->m_profiledBlock
->couldTakeSpecialFastCase(m_currentIndex
))
825 // FIXME: It might be possible to make this more granular.
826 node
->mergeFlags(NodeMayOverflowInBaseline
| NodeMayNegZeroInBaseline
);
831 void noticeArgumentsUse()
833 // All of the arguments in this function need to be formatted as JSValues because we will
834 // load from them in a random-access fashion and we don't want to have to switch on
837 for (ArgumentPosition
* argument
: m_inlineStackTop
->m_argumentPositions
)
838 argument
->mergeShouldNeverUnbox(true);
841 void buildOperandMapsIfNecessary();
844 CodeBlock
* m_codeBlock
;
845 CodeBlock
* m_profiledBlock
;
848 // The current block being generated.
849 BasicBlock
* m_currentBlock
;
850 // The bytecode index of the current instruction being generated.
851 unsigned m_currentIndex
;
852 // The semantic origin of the current node if different from the current Index.
853 CodeOrigin m_currentSemanticOrigin
;
855 FrozenValue
* m_constantUndefined
;
856 FrozenValue
* m_constantNull
;
857 FrozenValue
* m_constantNaN
;
858 FrozenValue
* m_constantOne
;
859 Vector
<Node
*, 16> m_constants
;
861 // The number of arguments passed to the function.
862 unsigned m_numArguments
;
863 // The number of locals (vars + temporaries) used in the function.
864 unsigned m_numLocals
;
865 // The number of slots (in units of sizeof(Register)) that we need to
866 // preallocate for arguments to outgoing calls from this frame. This
867 // number includes the CallFrame slots that we initialize for the callee
868 // (but not the callee-initialized CallerFrame and ReturnPC slots).
869 // This number is 0 if and only if this function is a leaf.
870 unsigned m_parameterSlots
;
871 // The number of var args passed to the next var arg node.
872 unsigned m_numPassedVarArgs
;
874 HashMap
<ConstantBufferKey
, unsigned> m_constantBufferCache
;
876 struct InlineStackEntry
{
877 ByteCodeParser
* m_byteCodeParser
;
879 CodeBlock
* m_codeBlock
;
880 CodeBlock
* m_profiledBlock
;
881 InlineCallFrame
* m_inlineCallFrame
;
883 ScriptExecutable
* executable() { return m_codeBlock
->ownerExecutable(); }
885 QueryableExitProfile m_exitProfile
;
887 // Remapping of identifier and constant numbers from the code block being
888 // inlined (inline callee) to the code block that we're inlining into
889 // (the machine code block, which is the transitive, though not necessarily
891 Vector
<unsigned> m_identifierRemap
;
892 Vector
<unsigned> m_constantBufferRemap
;
893 Vector
<unsigned> m_switchRemap
;
895 // Blocks introduced by this code block, which need successor linking.
896 // May include up to one basic block that includes the continuation after
897 // the callsite in the caller. These must be appended in the order that they
898 // are created, but their bytecodeBegin values need not be in order as they
900 Vector
<UnlinkedBlock
> m_unlinkedBlocks
;
902 // Potential block linking targets. Must be sorted by bytecodeBegin, and
903 // cannot have two blocks that have the same bytecodeBegin.
904 Vector
<BasicBlock
*> m_blockLinkingTargets
;
906 // If the callsite's basic block was split into two, then this will be
907 // the head of the callsite block. It needs its successors linked to the
908 // m_unlinkedBlocks, but not the other way around: there's no way for
909 // any blocks in m_unlinkedBlocks to jump back into this block.
910 BasicBlock
* m_callsiteBlockHead
;
912 // Does the callsite block head need linking? This is typically true
913 // but will be false for the machine code block's inline stack entry
914 // (since that one is not inlined) and for cases where an inline callee
915 // did the linking for us.
916 bool m_callsiteBlockHeadNeedsLinking
;
918 VirtualRegister m_returnValue
;
920 // Speculations about variable types collected from the profiled code block,
921 // which are based on OSR exit profiles that past DFG compilatins of this
922 // code block had gathered.
923 LazyOperandValueProfileParser m_lazyOperands
;
925 CallLinkInfoMap m_callLinkInfos
;
926 StubInfoMap m_stubInfos
;
928 // Did we see any returns? We need to handle the (uncommon but necessary)
929 // case where a procedure that does not return was inlined.
932 // Did we have any early returns?
933 bool m_didEarlyReturn
;
935 // Pointers to the argument position trackers for this slice of code.
936 Vector
<ArgumentPosition
*> m_argumentPositions
;
938 InlineStackEntry
* m_caller
;
943 CodeBlock
* profiledBlock
,
944 BasicBlock
* callsiteBlockHead
,
945 JSFunction
* callee
, // Null if this is a closure call.
946 VirtualRegister returnValueVR
,
947 VirtualRegister inlineCallFrameStart
,
948 int argumentCountIncludingThis
,
949 InlineCallFrame::Kind
);
953 m_byteCodeParser
->m_inlineStackTop
= m_caller
;
956 VirtualRegister
remapOperand(VirtualRegister operand
) const
958 if (!m_inlineCallFrame
)
961 ASSERT(!operand
.isConstant());
963 return VirtualRegister(operand
.offset() + m_inlineCallFrame
->stackOffset
);
967 InlineStackEntry
* m_inlineStackTop
;
969 struct DelayedSetLocal
{
971 VirtualRegister m_operand
;
974 DelayedSetLocal() { }
975 DelayedSetLocal(const CodeOrigin
& origin
, VirtualRegister operand
, Node
* value
)
982 Node
* execute(ByteCodeParser
* parser
, SetMode setMode
= NormalSet
)
984 if (m_operand
.isArgument())
985 return parser
->setArgument(m_origin
, m_operand
, m_value
, setMode
);
986 return parser
->setLocal(m_origin
, m_operand
, m_value
, setMode
);
990 Vector
<DelayedSetLocal
, 2> m_setLocalQueue
;
992 // Have we built operand maps? We initialize them lazily, and only when doing
994 bool m_haveBuiltOperandMaps
;
995 // Mapping between identifier names and numbers.
996 BorrowedIdentifierMap m_identifierMap
;
998 CodeBlock
* m_dfgCodeBlock
;
999 CallLinkStatus::ContextMap m_callContextMap
;
1000 StubInfoMap m_dfgStubInfos
;
1002 Instruction
* m_currentInstruction
;
1003 bool m_hasDebuggerEnabled
;
1006 #define NEXT_OPCODE(name) \
1007 m_currentIndex += OPCODE_LENGTH(name); \
1010 #define LAST_OPCODE(name) \
1011 m_currentIndex += OPCODE_LENGTH(name); \
1012 return shouldContinueParsing
1014 void ByteCodeParser::handleCall(Instruction
* pc
, NodeType op
, CodeSpecializationKind kind
)
1016 ASSERT(OPCODE_LENGTH(op_call
) == OPCODE_LENGTH(op_construct
));
1018 pc
[1].u
.operand
, op
, kind
, OPCODE_LENGTH(op_call
),
1019 pc
[2].u
.operand
, pc
[3].u
.operand
, -pc
[4].u
.operand
);
1022 void ByteCodeParser::handleCall(
1023 int result
, NodeType op
, CodeSpecializationKind kind
, unsigned instructionSize
,
1024 int callee
, int argumentCountIncludingThis
, int registerOffset
)
1026 Node
* callTarget
= get(VirtualRegister(callee
));
1028 CallLinkStatus callLinkStatus
= CallLinkStatus::computeFor(
1029 m_inlineStackTop
->m_profiledBlock
, currentCodeOrigin(),
1030 m_inlineStackTop
->m_callLinkInfos
, m_callContextMap
);
1033 result
, op
, InlineCallFrame::kindFor(kind
), instructionSize
, callTarget
,
1034 argumentCountIncludingThis
, registerOffset
, callLinkStatus
);
1037 void ByteCodeParser::handleCall(
1038 int result
, NodeType op
, InlineCallFrame::Kind kind
, unsigned instructionSize
,
1039 Node
* callTarget
, int argumentCountIncludingThis
, int registerOffset
,
1040 CallLinkStatus callLinkStatus
)
1043 result
, op
, kind
, instructionSize
, callTarget
, argumentCountIncludingThis
,
1044 registerOffset
, callLinkStatus
, getPrediction());
1047 void ByteCodeParser::handleCall(
1048 int result
, NodeType op
, InlineCallFrame::Kind kind
, unsigned instructionSize
,
1049 Node
* callTarget
, int argumentCountIncludingThis
, int registerOffset
,
1050 CallLinkStatus callLinkStatus
, SpeculatedType prediction
)
1052 ASSERT(registerOffset
<= 0);
1054 if (callTarget
->isCellConstant())
1055 callLinkStatus
.setProvenConstantCallee(CallVariant(callTarget
->asCell()));
1057 if (Options::verboseDFGByteCodeParsing())
1058 dataLog(" Handling call at ", currentCodeOrigin(), ": ", callLinkStatus
, "\n");
1060 if (!callLinkStatus
.canOptimize()) {
1061 // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1062 // that we cannot optimize them.
1064 addCall(result
, op
, OpInfo(), callTarget
, argumentCountIncludingThis
, registerOffset
, prediction
);
1068 unsigned nextOffset
= m_currentIndex
+ instructionSize
;
1072 if (handleInlining(callTarget
, result
, callLinkStatus
, registerOffset
, virtualRegisterForArgument(0, registerOffset
), VirtualRegister(), 0, argumentCountIncludingThis
, nextOffset
, op
, kind
, prediction
)) {
1073 if (m_graph
.compilation())
1074 m_graph
.compilation()->noticeInlinedCall();
1078 #if ENABLE(FTL_NATIVE_CALL_INLINING)
1079 if (isFTL(m_graph
.m_plan
.mode
) && Options::optimizeNativeCalls() && callLinkStatus
.size() == 1 && !callLinkStatus
.couldTakeSlowPath()) {
1080 CallVariant callee
= callLinkStatus
[0];
1081 JSFunction
* function
= callee
.function();
1082 CodeSpecializationKind specializationKind
= InlineCallFrame::specializationKindFor(kind
);
1083 if (function
&& function
->isHostFunction()) {
1084 emitFunctionChecks(callee
, callTarget
, virtualRegisterForArgument(0, registerOffset
));
1085 callOpInfo
= OpInfo(m_graph
.freeze(function
));
1090 ASSERT(op
== Construct
);
1091 op
= NativeConstruct
;
1097 addCall(result
, op
, callOpInfo
, callTarget
, argumentCountIncludingThis
, registerOffset
, prediction
);
1100 void ByteCodeParser::handleVarargsCall(Instruction
* pc
, NodeType op
, CodeSpecializationKind kind
)
1102 ASSERT(OPCODE_LENGTH(op_call_varargs
) == OPCODE_LENGTH(op_construct_varargs
));
1104 int result
= pc
[1].u
.operand
;
1105 int callee
= pc
[2].u
.operand
;
1106 int thisReg
= pc
[3].u
.operand
;
1107 int arguments
= pc
[4].u
.operand
;
1108 int firstFreeReg
= pc
[5].u
.operand
;
1109 int firstVarArgOffset
= pc
[6].u
.operand
;
1111 SpeculatedType prediction
= getPrediction();
1113 Node
* callTarget
= get(VirtualRegister(callee
));
1115 CallLinkStatus callLinkStatus
= CallLinkStatus::computeFor(
1116 m_inlineStackTop
->m_profiledBlock
, currentCodeOrigin(),
1117 m_inlineStackTop
->m_callLinkInfos
, m_callContextMap
);
1118 if (callTarget
->isCellConstant())
1119 callLinkStatus
.setProvenConstantCallee(CallVariant(callTarget
->asCell()));
1121 if (Options::verboseDFGByteCodeParsing())
1122 dataLog(" Varargs call link status at ", currentCodeOrigin(), ": ", callLinkStatus
, "\n");
1124 if (callLinkStatus
.canOptimize()
1125 && handleInlining(callTarget
, result
, callLinkStatus
, firstFreeReg
, VirtualRegister(thisReg
), VirtualRegister(arguments
), firstVarArgOffset
, 0, m_currentIndex
+ OPCODE_LENGTH(op_call_varargs
), op
, InlineCallFrame::varargsKindFor(kind
), prediction
)) {
1126 if (m_graph
.compilation())
1127 m_graph
.compilation()->noticeInlinedCall();
1131 CallVarargsData
* data
= m_graph
.m_callVarargsData
.add();
1132 data
->firstVarArgOffset
= firstVarArgOffset
;
1134 Node
* thisChild
= get(VirtualRegister(thisReg
));
1136 Node
* call
= addToGraph(op
, OpInfo(data
), OpInfo(prediction
), callTarget
, get(VirtualRegister(arguments
)), thisChild
);
1137 VirtualRegister
resultReg(result
);
1138 if (resultReg
.isValid())
1139 set(resultReg
, call
);
1142 void ByteCodeParser::emitFunctionChecks(CallVariant callee
, Node
* callTarget
, VirtualRegister thisArgumentReg
)
1145 if (thisArgumentReg
.isValid())
1146 thisArgument
= get(thisArgumentReg
);
1151 Node
* callTargetForCheck
;
1152 if (callee
.isClosureCall()) {
1153 calleeCell
= callee
.executable();
1154 callTargetForCheck
= addToGraph(GetExecutable
, callTarget
);
1156 calleeCell
= callee
.nonExecutableCallee();
1157 callTargetForCheck
= callTarget
;
1161 addToGraph(CheckCell
, OpInfo(m_graph
.freeze(calleeCell
)), callTargetForCheck
, thisArgument
);
1164 void ByteCodeParser::emitArgumentPhantoms(int registerOffset
, int argumentCountIncludingThis
)
1166 for (int i
= 0; i
< argumentCountIncludingThis
; ++i
)
1167 addToGraph(Phantom
, get(virtualRegisterForArgument(i
, registerOffset
)));
1170 unsigned ByteCodeParser::inliningCost(CallVariant callee
, int argumentCountIncludingThis
, CodeSpecializationKind kind
)
1173 dataLog("Considering inlining ", callee
, " into ", currentCodeOrigin(), "\n");
1175 if (m_hasDebuggerEnabled
) {
1177 dataLog(" Failing because the debugger is in use.\n");
1181 FunctionExecutable
* executable
= callee
.functionExecutable();
1184 dataLog(" Failing because there is no function executable.\n");
1188 // Does the number of arguments we're passing match the arity of the target? We currently
1189 // inline only if the number of arguments passed is greater than or equal to the number
1190 // arguments expected.
1191 if (static_cast<int>(executable
->parameterCount()) + 1 > argumentCountIncludingThis
) {
1193 dataLog(" Failing because of arity mismatch.\n");
1197 // Do we have a code block, and does the code block's size match the heuristics/requirements for
1198 // being an inline candidate? We might not have a code block (1) if code was thrown away,
1199 // (2) if we simply hadn't actually made this call yet or (3) code is a builtin function and
1200 // specialization kind is construct. In the former 2 cases, we could still theoretically attempt
1201 // to inline it if we had a static proof of what was being called; this might happen for example
1202 // if you call a global function, where watchpointing gives us static information. Overall,
1203 // it's a rare case because we expect that any hot callees would have already been compiled.
1204 CodeBlock
* codeBlock
= executable
->baselineCodeBlockFor(kind
);
1207 dataLog(" Failing because no code block available.\n");
1210 CapabilityLevel capabilityLevel
= inlineFunctionForCapabilityLevel(
1211 codeBlock
, kind
, callee
.isClosureCall());
1213 dataLog(" Kind: ", kind
, "\n");
1214 dataLog(" Is closure call: ", callee
.isClosureCall(), "\n");
1215 dataLog(" Capability level: ", capabilityLevel
, "\n");
1216 dataLog(" Might inline function: ", mightInlineFunctionFor(codeBlock
, kind
), "\n");
1217 dataLog(" Might compile function: ", mightCompileFunctionFor(codeBlock
, kind
), "\n");
1218 dataLog(" Is supported for inlining: ", isSupportedForInlining(codeBlock
), "\n");
1219 dataLog(" Needs activation: ", codeBlock
->ownerExecutable()->needsActivation(), "\n");
1220 dataLog(" Is inlining candidate: ", codeBlock
->ownerExecutable()->isInliningCandidate(), "\n");
1222 if (!canInline(capabilityLevel
)) {
1224 dataLog(" Failing because the function is not inlineable.\n");
1228 // Check if the caller is already too large. We do this check here because that's just
1229 // where we happen to also have the callee's code block, and we want that for the
1230 // purpose of unsetting SABI.
1231 if (!isSmallEnoughToInlineCodeInto(m_codeBlock
)) {
1232 codeBlock
->m_shouldAlwaysBeInlined
= false;
1234 dataLog(" Failing because the caller is too large.\n");
1238 // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1240 // https://bugs.webkit.org/show_bug.cgi?id=127627
1242 // FIXME: We currently inline functions that have run in LLInt but not in Baseline. These
1243 // functions have very low fidelity profiling, and presumably they weren't very hot if they
1244 // haven't gotten to Baseline yet. Consider not inlining these functions.
1245 // https://bugs.webkit.org/show_bug.cgi?id=145503
1247 // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1248 // too many levels? If either of these are detected, then don't inline. We adjust our
1249 // heuristics if we are dealing with a function that cannot otherwise be compiled.
1252 unsigned recursion
= 0;
1254 for (InlineStackEntry
* entry
= m_inlineStackTop
; entry
; entry
= entry
->m_caller
) {
1256 if (depth
>= Options::maximumInliningDepth()) {
1258 dataLog(" Failing because depth exceeded.\n");
1262 if (entry
->executable() == executable
) {
1264 if (recursion
>= Options::maximumInliningRecursion()) {
1266 dataLog(" Failing because recursion detected.\n");
1273 dataLog(" Inlining should be possible.\n");
1275 // It might be possible to inline.
1276 return codeBlock
->instructionCount();
1279 template<typename ChecksFunctor
>
1280 void ByteCodeParser::inlineCall(Node
* callTargetNode
, int resultOperand
, CallVariant callee
, int registerOffset
, int argumentCountIncludingThis
, unsigned nextOffset
, InlineCallFrame::Kind kind
, CallerLinkability callerLinkability
, const ChecksFunctor
& insertChecks
)
1282 CodeSpecializationKind specializationKind
= InlineCallFrame::specializationKindFor(kind
);
1284 ASSERT(inliningCost(callee
, argumentCountIncludingThis
, specializationKind
) != UINT_MAX
);
1286 CodeBlock
* codeBlock
= callee
.functionExecutable()->baselineCodeBlockFor(specializationKind
);
1287 insertChecks(codeBlock
);
1289 // FIXME: Don't flush constants!
1291 int inlineCallFrameStart
= m_inlineStackTop
->remapOperand(VirtualRegister(registerOffset
)).offset() + JSStack::CallFrameHeaderSize
;
1294 VirtualRegister(inlineCallFrameStart
).toLocal() + 1 +
1295 JSStack::CallFrameHeaderSize
+ codeBlock
->m_numCalleeRegisters
);
1297 size_t argumentPositionStart
= m_graph
.m_argumentPositions
.size();
1299 VirtualRegister
resultReg(resultOperand
);
1300 if (resultReg
.isValid())
1301 resultReg
= m_inlineStackTop
->remapOperand(resultReg
);
1303 InlineStackEntry
inlineStackEntry(
1304 this, codeBlock
, codeBlock
, m_graph
.lastBlock(), callee
.function(), resultReg
,
1305 (VirtualRegister
)inlineCallFrameStart
, argumentCountIncludingThis
, kind
);
1307 // This is where the actual inlining really happens.
1308 unsigned oldIndex
= m_currentIndex
;
1311 InlineVariableData inlineVariableData
;
1312 inlineVariableData
.inlineCallFrame
= m_inlineStackTop
->m_inlineCallFrame
;
1313 inlineVariableData
.argumentPositionStart
= argumentPositionStart
;
1314 inlineVariableData
.calleeVariable
= 0;
1317 m_inlineStackTop
->m_inlineCallFrame
->isClosureCall
1318 == callee
.isClosureCall());
1319 if (callee
.isClosureCall()) {
1320 VariableAccessData
* calleeVariable
=
1321 set(VirtualRegister(JSStack::Callee
), callTargetNode
, ImmediateNakedSet
)->variableAccessData();
1323 calleeVariable
->mergeShouldNeverUnbox(true);
1325 inlineVariableData
.calleeVariable
= calleeVariable
;
1328 m_graph
.m_inlineVariableData
.append(inlineVariableData
);
1331 clearCaches(); // Reset our state now that we're back to the outer code.
1333 m_currentIndex
= oldIndex
;
1335 // If the inlined code created some new basic blocks, then we have linking to do.
1336 if (inlineStackEntry
.m_callsiteBlockHead
!= m_graph
.lastBlock()) {
1338 ASSERT(!inlineStackEntry
.m_unlinkedBlocks
.isEmpty());
1339 if (inlineStackEntry
.m_callsiteBlockHeadNeedsLinking
)
1340 linkBlock(inlineStackEntry
.m_callsiteBlockHead
, inlineStackEntry
.m_blockLinkingTargets
);
1342 ASSERT(inlineStackEntry
.m_callsiteBlockHead
->isLinked
);
1344 if (callerLinkability
== CallerDoesNormalLinking
)
1345 cancelLinkingForBlock(inlineStackEntry
.m_caller
, inlineStackEntry
.m_callsiteBlockHead
);
1347 linkBlocks(inlineStackEntry
.m_unlinkedBlocks
, inlineStackEntry
.m_blockLinkingTargets
);
1349 ASSERT(inlineStackEntry
.m_unlinkedBlocks
.isEmpty());
1351 BasicBlock
* lastBlock
= m_graph
.lastBlock();
1352 // If there was a return, but no early returns, then we're done. We allow parsing of
1353 // the caller to continue in whatever basic block we're in right now.
1354 if (!inlineStackEntry
.m_didEarlyReturn
&& inlineStackEntry
.m_didReturn
) {
1355 if (Options::verboseDFGByteCodeParsing())
1356 dataLog(" Allowing parsing to continue in last inlined block.\n");
1358 ASSERT(lastBlock
->isEmpty() || !lastBlock
->terminal());
1360 // If we created new blocks then the last block needs linking, but in the
1361 // caller. It doesn't need to be linked to, but it needs outgoing links.
1362 if (!inlineStackEntry
.m_unlinkedBlocks
.isEmpty()) {
1363 // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1364 // for release builds because this block will never serve as a potential target
1365 // in the linker's binary search.
1366 if (Options::verboseDFGByteCodeParsing())
1367 dataLog(" Repurposing last block from ", lastBlock
->bytecodeBegin
, " to ", m_currentIndex
, "\n");
1368 lastBlock
->bytecodeBegin
= m_currentIndex
;
1369 if (callerLinkability
== CallerDoesNormalLinking
) {
1371 dataLog("Adding unlinked block ", RawPointer(m_graph
.lastBlock()), " (one return)\n");
1372 m_inlineStackTop
->m_caller
->m_unlinkedBlocks
.append(UnlinkedBlock(m_graph
.lastBlock()));
1376 m_currentBlock
= m_graph
.lastBlock();
1380 if (Options::verboseDFGByteCodeParsing())
1381 dataLog(" Creating new block after inlining.\n");
1383 // If we get to this point then all blocks must end in some sort of terminals.
1384 ASSERT(lastBlock
->terminal());
1386 // Need to create a new basic block for the continuation at the caller.
1387 RefPtr
<BasicBlock
> block
= adoptRef(new BasicBlock(nextOffset
, m_numArguments
, m_numLocals
, PNaN
));
1389 // Link the early returns to the basic block we're about to create.
1390 for (size_t i
= 0; i
< inlineStackEntry
.m_unlinkedBlocks
.size(); ++i
) {
1391 if (!inlineStackEntry
.m_unlinkedBlocks
[i
].m_needsEarlyReturnLinking
)
1393 BasicBlock
* blockToLink
= inlineStackEntry
.m_unlinkedBlocks
[i
].m_block
;
1394 ASSERT(!blockToLink
->isLinked
);
1395 Node
* node
= blockToLink
->terminal();
1396 ASSERT(node
->op() == Jump
);
1397 ASSERT(!node
->targetBlock());
1398 node
->targetBlock() = block
.get();
1399 inlineStackEntry
.m_unlinkedBlocks
[i
].m_needsEarlyReturnLinking
= false;
1401 dataLog("Marking ", RawPointer(blockToLink
), " as linked (jumps to return)\n");
1402 blockToLink
->didLink();
1405 m_currentBlock
= block
.get();
1406 ASSERT(m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.isEmpty() || m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.last()->bytecodeBegin
< nextOffset
);
1408 dataLog("Adding unlinked block ", RawPointer(block
.get()), " (many returns)\n");
1409 if (callerLinkability
== CallerDoesNormalLinking
) {
1410 m_inlineStackTop
->m_caller
->m_unlinkedBlocks
.append(UnlinkedBlock(block
.get()));
1411 m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.append(block
.get());
1413 m_graph
.appendBlock(block
);
1414 prepareToParseBlock();
1417 void ByteCodeParser::cancelLinkingForBlock(InlineStackEntry
* inlineStackEntry
, BasicBlock
* block
)
1419 // It's possible that the callsite block head is not owned by the caller.
1420 if (!inlineStackEntry
->m_unlinkedBlocks
.isEmpty()) {
1421 // It's definitely owned by the caller, because the caller created new blocks.
1422 // Assert that this all adds up.
1423 ASSERT_UNUSED(block
, inlineStackEntry
->m_unlinkedBlocks
.last().m_block
== block
);
1424 ASSERT(inlineStackEntry
->m_unlinkedBlocks
.last().m_needsNormalLinking
);
1425 inlineStackEntry
->m_unlinkedBlocks
.last().m_needsNormalLinking
= false;
1427 // It's definitely not owned by the caller. Tell the caller that he does not
1428 // need to link his callsite block head, because we did it for him.
1429 ASSERT(inlineStackEntry
->m_callsiteBlockHeadNeedsLinking
);
1430 ASSERT_UNUSED(block
, inlineStackEntry
->m_callsiteBlockHead
== block
);
1431 inlineStackEntry
->m_callsiteBlockHeadNeedsLinking
= false;
1435 template<typename ChecksFunctor
>
1436 bool ByteCodeParser::attemptToInlineCall(Node
* callTargetNode
, int resultOperand
, CallVariant callee
, int registerOffset
, int argumentCountIncludingThis
, unsigned nextOffset
, InlineCallFrame::Kind kind
, CallerLinkability callerLinkability
, SpeculatedType prediction
, unsigned& inliningBalance
, const ChecksFunctor
& insertChecks
)
1438 CodeSpecializationKind specializationKind
= InlineCallFrame::specializationKindFor(kind
);
1440 if (!inliningBalance
)
1443 bool didInsertChecks
= false;
1444 auto insertChecksWithAccounting
= [&] () {
1445 insertChecks(nullptr);
1446 didInsertChecks
= true;
1450 dataLog(" Considering callee ", callee
, "\n");
1452 // Intrinsics and internal functions can only be inlined if we're not doing varargs. This is because
1453 // we currently don't have any way of getting profiling information for arguments to non-JS varargs
1454 // calls. The prediction propagator won't be of any help because LoadVarargs obscures the data flow,
1455 // and there are no callsite value profiles and native function won't have callee value profiles for
1456 // those arguments. Even worse, if the intrinsic decides to exit, it won't really have anywhere to
1457 // exit to: LoadVarargs is effectful and it's part of the op_call_varargs, so we can't exit without
1458 // calling LoadVarargs twice.
1459 if (!InlineCallFrame::isVarargs(kind
)) {
1460 if (InternalFunction
* function
= callee
.internalFunction()) {
1461 if (handleConstantInternalFunction(resultOperand
, function
, registerOffset
, argumentCountIncludingThis
, specializationKind
, insertChecksWithAccounting
)) {
1462 RELEASE_ASSERT(didInsertChecks
);
1463 addToGraph(Phantom
, callTargetNode
);
1464 emitArgumentPhantoms(registerOffset
, argumentCountIncludingThis
);
1468 RELEASE_ASSERT(!didInsertChecks
);
1472 Intrinsic intrinsic
= callee
.intrinsicFor(specializationKind
);
1473 if (intrinsic
!= NoIntrinsic
) {
1474 if (handleIntrinsic(resultOperand
, intrinsic
, registerOffset
, argumentCountIncludingThis
, prediction
, insertChecksWithAccounting
)) {
1475 RELEASE_ASSERT(didInsertChecks
);
1476 addToGraph(Phantom
, callTargetNode
);
1477 emitArgumentPhantoms(registerOffset
, argumentCountIncludingThis
);
1481 RELEASE_ASSERT(!didInsertChecks
);
1486 unsigned myInliningCost
= inliningCost(callee
, argumentCountIncludingThis
, specializationKind
);
1487 if (myInliningCost
> inliningBalance
)
1490 Instruction
* savedCurrentInstruction
= m_currentInstruction
;
1491 inlineCall(callTargetNode
, resultOperand
, callee
, registerOffset
, argumentCountIncludingThis
, nextOffset
, kind
, callerLinkability
, insertChecks
);
1492 inliningBalance
-= myInliningCost
;
1493 m_currentInstruction
= savedCurrentInstruction
;
1497 bool ByteCodeParser::handleInlining(
1498 Node
* callTargetNode
, int resultOperand
, const CallLinkStatus
& callLinkStatus
,
1499 int registerOffsetOrFirstFreeReg
, VirtualRegister thisArgument
,
1500 VirtualRegister argumentsArgument
, unsigned argumentsOffset
, int argumentCountIncludingThis
,
1501 unsigned nextOffset
, NodeType callOp
, InlineCallFrame::Kind kind
, SpeculatedType prediction
)
1504 dataLog("Handling inlining...\n");
1505 dataLog("Stack: ", currentCodeOrigin(), "\n");
1507 CodeSpecializationKind specializationKind
= InlineCallFrame::specializationKindFor(kind
);
1509 if (!callLinkStatus
.size()) {
1511 dataLog("Bailing inlining.\n");
1515 if (InlineCallFrame::isVarargs(kind
)
1516 && callLinkStatus
.maxNumArguments() > Options::maximumVarargsForInlining()) {
1518 dataLog("Bailing inlining because of varargs.\n");
1522 unsigned inliningBalance
= Options::maximumFunctionForCallInlineCandidateInstructionCount();
1523 if (specializationKind
== CodeForConstruct
)
1524 inliningBalance
= std::min(inliningBalance
, Options::maximumFunctionForConstructInlineCandidateInstructionCount());
1525 if (callLinkStatus
.isClosureCall())
1526 inliningBalance
= std::min(inliningBalance
, Options::maximumFunctionForClosureCallInlineCandidateInstructionCount());
1528 // First check if we can avoid creating control flow. Our inliner does some CFG
1529 // simplification on the fly and this helps reduce compile times, but we can only leverage
1530 // this in cases where we don't need control flow diamonds to check the callee.
1531 if (!callLinkStatus
.couldTakeSlowPath() && callLinkStatus
.size() == 1) {
1534 // Only used for varargs calls.
1535 unsigned mandatoryMinimum
= 0;
1536 unsigned maxNumArguments
= 0;
1538 if (InlineCallFrame::isVarargs(kind
)) {
1539 if (FunctionExecutable
* functionExecutable
= callLinkStatus
[0].functionExecutable())
1540 mandatoryMinimum
= functionExecutable
->parameterCount();
1542 mandatoryMinimum
= 0;
1545 maxNumArguments
= std::max(
1546 callLinkStatus
.maxNumArguments(),
1547 mandatoryMinimum
+ 1);
1549 // We sort of pretend that this *is* the number of arguments that were passed.
1550 argumentCountIncludingThis
= maxNumArguments
;
1552 registerOffset
= registerOffsetOrFirstFreeReg
+ 1;
1553 registerOffset
-= maxNumArguments
; // includes "this"
1554 registerOffset
-= JSStack::CallFrameHeaderSize
;
1555 registerOffset
= -WTF::roundUpToMultipleOf(
1556 stackAlignmentRegisters(),
1559 registerOffset
= registerOffsetOrFirstFreeReg
;
1561 bool result
= attemptToInlineCall(
1562 callTargetNode
, resultOperand
, callLinkStatus
[0], registerOffset
,
1563 argumentCountIncludingThis
, nextOffset
, kind
, CallerDoesNormalLinking
, prediction
,
1564 inliningBalance
, [&] (CodeBlock
* codeBlock
) {
1565 emitFunctionChecks(callLinkStatus
[0], callTargetNode
, thisArgument
);
1567 // If we have a varargs call, we want to extract the arguments right now.
1568 if (InlineCallFrame::isVarargs(kind
)) {
1569 int remappedRegisterOffset
=
1570 m_inlineStackTop
->remapOperand(VirtualRegister(registerOffset
)).offset();
1572 ensureLocals(VirtualRegister(remappedRegisterOffset
).toLocal());
1574 int argumentStart
= registerOffset
+ JSStack::CallFrameHeaderSize
;
1575 int remappedArgumentStart
=
1576 m_inlineStackTop
->remapOperand(VirtualRegister(argumentStart
)).offset();
1578 LoadVarargsData
* data
= m_graph
.m_loadVarargsData
.add();
1579 data
->start
= VirtualRegister(remappedArgumentStart
+ 1);
1580 data
->count
= VirtualRegister(remappedRegisterOffset
+ JSStack::ArgumentCount
);
1581 data
->offset
= argumentsOffset
;
1582 data
->limit
= maxNumArguments
;
1583 data
->mandatoryMinimum
= mandatoryMinimum
;
1585 addToGraph(LoadVarargs
, OpInfo(data
), get(argumentsArgument
));
1587 // LoadVarargs may OSR exit. Hence, we need to keep alive callTargetNode, thisArgument
1588 // and argumentsArgument for the baseline JIT. However, we only need a Phantom for
1589 // callTargetNode because the other 2 are still in use and alive at this point.
1590 addToGraph(Phantom
, callTargetNode
);
1592 // In DFG IR before SSA, we cannot insert control flow between after the
1593 // LoadVarargs and the last SetArgument. This isn't a problem once we get to DFG
1594 // SSA. Fortunately, we also have other reasons for not inserting control flow
1597 VariableAccessData
* countVariable
= newVariableAccessData(
1598 VirtualRegister(remappedRegisterOffset
+ JSStack::ArgumentCount
));
1599 // This is pretty lame, but it will force the count to be flushed as an int. This doesn't
1600 // matter very much, since our use of a SetArgument and Flushes for this local slot is
1601 // mostly just a formality.
1602 countVariable
->predict(SpecInt32
);
1603 countVariable
->mergeIsProfitableToUnbox(true);
1604 Node
* setArgumentCount
= addToGraph(SetArgument
, OpInfo(countVariable
));
1605 m_currentBlock
->variablesAtTail
.setOperand(countVariable
->local(), setArgumentCount
);
1607 set(VirtualRegister(argumentStart
), get(thisArgument
), ImmediateNakedSet
);
1608 for (unsigned argument
= 1; argument
< maxNumArguments
; ++argument
) {
1609 VariableAccessData
* variable
= newVariableAccessData(
1610 VirtualRegister(remappedArgumentStart
+ argument
));
1611 variable
->mergeShouldNeverUnbox(true); // We currently have nowhere to put the type check on the LoadVarargs. LoadVarargs is effectful, so after it finishes, we cannot exit.
1613 // For a while it had been my intention to do things like this inside the
1614 // prediction injection phase. But in this case it's really best to do it here,
1615 // because it's here that we have access to the variable access datas for the
1616 // inlining we're about to do.
1618 // Something else that's interesting here is that we'd really love to get
1619 // predictions from the arguments loaded at the callsite, rather than the
1620 // arguments received inside the callee. But that probably won't matter for most
1622 if (codeBlock
&& argument
< static_cast<unsigned>(codeBlock
->numParameters())) {
1623 ConcurrentJITLocker
locker(codeBlock
->m_lock
);
1624 if (ValueProfile
* profile
= codeBlock
->valueProfileForArgument(argument
))
1625 variable
->predict(profile
->computeUpdatedPrediction(locker
));
1628 Node
* setArgument
= addToGraph(SetArgument
, OpInfo(variable
));
1629 m_currentBlock
->variablesAtTail
.setOperand(variable
->local(), setArgument
);
1634 dataLog("Done inlining (simple).\n");
1635 dataLog("Stack: ", currentCodeOrigin(), "\n");
1636 dataLog("Result: ", result
, "\n");
1641 // We need to create some kind of switch over callee. For now we only do this if we believe that
1642 // we're in the top tier. We have two reasons for this: first, it provides us an opportunity to
1643 // do more detailed polyvariant/polymorphic profiling; and second, it reduces compile times in
1644 // the DFG. And by polyvariant profiling we mean polyvariant profiling of *this* call. Note that
1645 // we could improve that aspect of this by doing polymorphic inlining but having the profiling
1647 if (!isFTL(m_graph
.m_plan
.mode
) || !Options::enablePolymorphicCallInlining()
1648 || InlineCallFrame::isVarargs(kind
)) {
1650 dataLog("Bailing inlining (hard).\n");
1651 dataLog("Stack: ", currentCodeOrigin(), "\n");
1656 unsigned oldOffset
= m_currentIndex
;
1658 bool allAreClosureCalls
= true;
1659 bool allAreDirectCalls
= true;
1660 for (unsigned i
= callLinkStatus
.size(); i
--;) {
1661 if (callLinkStatus
[i
].isClosureCall())
1662 allAreDirectCalls
= false;
1664 allAreClosureCalls
= false;
1667 Node
* thingToSwitchOn
;
1668 if (allAreDirectCalls
)
1669 thingToSwitchOn
= callTargetNode
;
1670 else if (allAreClosureCalls
)
1671 thingToSwitchOn
= addToGraph(GetExecutable
, callTargetNode
);
1673 // FIXME: We should be able to handle this case, but it's tricky and we don't know of cases
1674 // where it would be beneficial. It might be best to handle these cases as if all calls were
1676 // https://bugs.webkit.org/show_bug.cgi?id=136020
1678 dataLog("Bailing inlining (mix).\n");
1679 dataLog("Stack: ", currentCodeOrigin(), "\n");
1685 dataLog("Doing hard inlining...\n");
1686 dataLog("Stack: ", currentCodeOrigin(), "\n");
1689 int registerOffset
= registerOffsetOrFirstFreeReg
;
1691 // This makes me wish that we were in SSA all the time. We need to pick a variable into which to
1692 // store the callee so that it will be accessible to all of the blocks we're about to create. We
1693 // get away with doing an immediate-set here because we wouldn't have performed any side effects
1696 dataLog("Register offset: ", registerOffset
);
1697 VirtualRegister
calleeReg(registerOffset
+ JSStack::Callee
);
1698 calleeReg
= m_inlineStackTop
->remapOperand(calleeReg
);
1700 dataLog("Callee is going to be ", calleeReg
, "\n");
1701 setDirect(calleeReg
, callTargetNode
, ImmediateSetWithFlush
);
1703 SwitchData
& data
= *m_graph
.m_switchData
.add();
1704 data
.kind
= SwitchCell
;
1705 addToGraph(Switch
, OpInfo(&data
), thingToSwitchOn
);
1707 BasicBlock
* originBlock
= m_currentBlock
;
1709 dataLog("Marking ", RawPointer(originBlock
), " as linked (origin of poly inline)\n");
1710 originBlock
->didLink();
1711 cancelLinkingForBlock(m_inlineStackTop
, originBlock
);
1713 // Each inlined callee will have a landing block that it returns at. They should all have jumps
1714 // to the continuation block, which we create last.
1715 Vector
<BasicBlock
*> landingBlocks
;
1717 // We may force this true if we give up on inlining any of the edges.
1718 bool couldTakeSlowPath
= callLinkStatus
.couldTakeSlowPath();
1721 dataLog("About to loop over functions at ", currentCodeOrigin(), ".\n");
1723 for (unsigned i
= 0; i
< callLinkStatus
.size(); ++i
) {
1724 m_currentIndex
= oldOffset
;
1725 RefPtr
<BasicBlock
> block
= adoptRef(new BasicBlock(UINT_MAX
, m_numArguments
, m_numLocals
, PNaN
));
1726 m_currentBlock
= block
.get();
1727 m_graph
.appendBlock(block
);
1728 prepareToParseBlock();
1730 Node
* myCallTargetNode
= getDirect(calleeReg
);
1732 bool inliningResult
= attemptToInlineCall(
1733 myCallTargetNode
, resultOperand
, callLinkStatus
[i
], registerOffset
,
1734 argumentCountIncludingThis
, nextOffset
, kind
, CallerLinksManually
, prediction
,
1735 inliningBalance
, [&] (CodeBlock
*) { });
1737 if (!inliningResult
) {
1738 // That failed so we let the block die. Nothing interesting should have been added to
1739 // the block. We also give up on inlining any of the (less frequent) callees.
1740 ASSERT(m_currentBlock
== block
.get());
1741 ASSERT(m_graph
.m_blocks
.last() == block
);
1742 m_graph
.killBlockAndItsContents(block
.get());
1743 m_graph
.m_blocks
.removeLast();
1745 // The fact that inlining failed means we need a slow path.
1746 couldTakeSlowPath
= true;
1750 JSCell
* thingToCaseOn
;
1751 if (allAreDirectCalls
)
1752 thingToCaseOn
= callLinkStatus
[i
].nonExecutableCallee();
1754 ASSERT(allAreClosureCalls
);
1755 thingToCaseOn
= callLinkStatus
[i
].executable();
1757 data
.cases
.append(SwitchCase(m_graph
.freeze(thingToCaseOn
), block
.get()));
1758 m_currentIndex
= nextOffset
;
1759 processSetLocalQueue(); // This only comes into play for intrinsics, since normal inlined code will leave an empty queue.
1762 dataLog("Marking ", RawPointer(m_currentBlock
), " as linked (tail of poly inlinee)\n");
1763 m_currentBlock
->didLink();
1764 landingBlocks
.append(m_currentBlock
);
1767 dataLog("Finished inlining ", callLinkStatus
[i
], " at ", currentCodeOrigin(), ".\n");
1770 RefPtr
<BasicBlock
> slowPathBlock
= adoptRef(
1771 new BasicBlock(UINT_MAX
, m_numArguments
, m_numLocals
, PNaN
));
1772 m_currentIndex
= oldOffset
;
1773 data
.fallThrough
= BranchTarget(slowPathBlock
.get());
1774 m_graph
.appendBlock(slowPathBlock
);
1776 dataLog("Marking ", RawPointer(slowPathBlock
.get()), " as linked (slow path block)\n");
1777 slowPathBlock
->didLink();
1778 prepareToParseBlock();
1779 m_currentBlock
= slowPathBlock
.get();
1780 Node
* myCallTargetNode
= getDirect(calleeReg
);
1781 if (couldTakeSlowPath
) {
1783 resultOperand
, callOp
, OpInfo(), myCallTargetNode
, argumentCountIncludingThis
,
1784 registerOffset
, prediction
);
1786 addToGraph(CheckBadCell
);
1787 addToGraph(Phantom
, myCallTargetNode
);
1788 emitArgumentPhantoms(registerOffset
, argumentCountIncludingThis
);
1790 set(VirtualRegister(resultOperand
), addToGraph(BottomValue
));
1793 m_currentIndex
= nextOffset
;
1794 processSetLocalQueue();
1796 landingBlocks
.append(m_currentBlock
);
1798 RefPtr
<BasicBlock
> continuationBlock
= adoptRef(
1799 new BasicBlock(UINT_MAX
, m_numArguments
, m_numLocals
, PNaN
));
1800 m_graph
.appendBlock(continuationBlock
);
1802 dataLog("Adding unlinked block ", RawPointer(continuationBlock
.get()), " (continuation)\n");
1803 m_inlineStackTop
->m_unlinkedBlocks
.append(UnlinkedBlock(continuationBlock
.get()));
1804 prepareToParseBlock();
1805 m_currentBlock
= continuationBlock
.get();
1807 for (unsigned i
= landingBlocks
.size(); i
--;)
1808 landingBlocks
[i
]->terminal()->targetBlock() = continuationBlock
.get();
1810 m_currentIndex
= oldOffset
;
1813 dataLog("Done inlining (hard).\n");
1814 dataLog("Stack: ", currentCodeOrigin(), "\n");
1819 template<typename ChecksFunctor
>
1820 bool ByteCodeParser::handleMinMax(int resultOperand
, NodeType op
, int registerOffset
, int argumentCountIncludingThis
, const ChecksFunctor
& insertChecks
)
1822 if (argumentCountIncludingThis
== 1) { // Math.min()
1824 set(VirtualRegister(resultOperand
), addToGraph(JSConstant
, OpInfo(m_constantNaN
)));
1828 if (argumentCountIncludingThis
== 2) { // Math.min(x)
1830 Node
* result
= get(VirtualRegister(virtualRegisterForArgument(1, registerOffset
)));
1831 addToGraph(Phantom
, Edge(result
, NumberUse
));
1832 set(VirtualRegister(resultOperand
), result
);
1836 if (argumentCountIncludingThis
== 3) { // Math.min(x, y)
1838 set(VirtualRegister(resultOperand
), addToGraph(op
, get(virtualRegisterForArgument(1, registerOffset
)), get(virtualRegisterForArgument(2, registerOffset
))));
1842 // Don't handle >=3 arguments for now.
1846 template<typename ChecksFunctor
>
1847 bool ByteCodeParser::handleIntrinsic(int resultOperand
, Intrinsic intrinsic
, int registerOffset
, int argumentCountIncludingThis
, SpeculatedType prediction
, const ChecksFunctor
& insertChecks
)
1849 switch (intrinsic
) {
1850 case AbsIntrinsic
: {
1851 if (argumentCountIncludingThis
== 1) { // Math.abs()
1853 set(VirtualRegister(resultOperand
), addToGraph(JSConstant
, OpInfo(m_constantNaN
)));
1857 if (!MacroAssembler::supportsFloatingPointAbs())
1861 Node
* node
= addToGraph(ArithAbs
, get(virtualRegisterForArgument(1, registerOffset
)));
1862 if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
))
1863 node
->mergeFlags(NodeMayOverflowInDFG
);
1864 set(VirtualRegister(resultOperand
), node
);
1869 return handleMinMax(resultOperand
, ArithMin
, registerOffset
, argumentCountIncludingThis
, insertChecks
);
1872 return handleMinMax(resultOperand
, ArithMax
, registerOffset
, argumentCountIncludingThis
, insertChecks
);
1877 case LogIntrinsic
: {
1878 if (argumentCountIncludingThis
== 1) {
1880 set(VirtualRegister(resultOperand
), addToGraph(JSConstant
, OpInfo(m_constantNaN
)));
1884 switch (intrinsic
) {
1887 set(VirtualRegister(resultOperand
), addToGraph(ArithSqrt
, get(virtualRegisterForArgument(1, registerOffset
))));
1892 set(VirtualRegister(resultOperand
), addToGraph(ArithCos
, get(virtualRegisterForArgument(1, registerOffset
))));
1897 set(VirtualRegister(resultOperand
), addToGraph(ArithSin
, get(virtualRegisterForArgument(1, registerOffset
))));
1902 set(VirtualRegister(resultOperand
), addToGraph(ArithLog
, get(virtualRegisterForArgument(1, registerOffset
))));
1906 RELEASE_ASSERT_NOT_REACHED();
1911 case PowIntrinsic
: {
1912 if (argumentCountIncludingThis
< 3) {
1913 // Math.pow() and Math.pow(x) return NaN.
1915 set(VirtualRegister(resultOperand
), addToGraph(JSConstant
, OpInfo(m_constantNaN
)));
1919 VirtualRegister xOperand
= virtualRegisterForArgument(1, registerOffset
);
1920 VirtualRegister yOperand
= virtualRegisterForArgument(2, registerOffset
);
1921 set(VirtualRegister(resultOperand
), addToGraph(ArithPow
, get(xOperand
), get(yOperand
)));
1925 case ArrayPushIntrinsic
: {
1926 if (argumentCountIncludingThis
!= 2)
1929 ArrayMode arrayMode
= getArrayMode(m_currentInstruction
[OPCODE_LENGTH(op_call
) - 2].u
.arrayProfile
);
1930 if (!arrayMode
.isJSArray())
1932 switch (arrayMode
.type()) {
1933 case Array::Undecided
:
1936 case Array::Contiguous
:
1937 case Array::ArrayStorage
: {
1939 Node
* arrayPush
= addToGraph(ArrayPush
, OpInfo(arrayMode
.asWord()), OpInfo(prediction
), get(virtualRegisterForArgument(0, registerOffset
)), get(virtualRegisterForArgument(1, registerOffset
)));
1940 set(VirtualRegister(resultOperand
), arrayPush
);
1950 case ArrayPopIntrinsic
: {
1951 if (argumentCountIncludingThis
!= 1)
1954 ArrayMode arrayMode
= getArrayMode(m_currentInstruction
[OPCODE_LENGTH(op_call
) - 2].u
.arrayProfile
);
1955 if (!arrayMode
.isJSArray())
1957 switch (arrayMode
.type()) {
1960 case Array::Contiguous
:
1961 case Array::ArrayStorage
: {
1963 Node
* arrayPop
= addToGraph(ArrayPop
, OpInfo(arrayMode
.asWord()), OpInfo(prediction
), get(virtualRegisterForArgument(0, registerOffset
)));
1964 set(VirtualRegister(resultOperand
), arrayPop
);
1973 case CharCodeAtIntrinsic
: {
1974 if (argumentCountIncludingThis
!= 2)
1978 VirtualRegister thisOperand
= virtualRegisterForArgument(0, registerOffset
);
1979 VirtualRegister indexOperand
= virtualRegisterForArgument(1, registerOffset
);
1980 Node
* charCode
= addToGraph(StringCharCodeAt
, OpInfo(ArrayMode(Array::String
).asWord()), get(thisOperand
), get(indexOperand
));
1982 set(VirtualRegister(resultOperand
), charCode
);
1986 case CharAtIntrinsic
: {
1987 if (argumentCountIncludingThis
!= 2)
1991 VirtualRegister thisOperand
= virtualRegisterForArgument(0, registerOffset
);
1992 VirtualRegister indexOperand
= virtualRegisterForArgument(1, registerOffset
);
1993 Node
* charCode
= addToGraph(StringCharAt
, OpInfo(ArrayMode(Array::String
).asWord()), get(thisOperand
), get(indexOperand
));
1995 set(VirtualRegister(resultOperand
), charCode
);
1998 case Clz32Intrinsic
: {
2000 if (argumentCountIncludingThis
== 1)
2001 set(VirtualRegister(resultOperand
), addToGraph(JSConstant
, OpInfo(m_graph
.freeze(jsNumber(32)))));
2003 Node
* operand
= get(virtualRegisterForArgument(1, registerOffset
));
2004 set(VirtualRegister(resultOperand
), addToGraph(ArithClz32
, operand
));
2008 case FromCharCodeIntrinsic
: {
2009 if (argumentCountIncludingThis
!= 2)
2013 VirtualRegister indexOperand
= virtualRegisterForArgument(1, registerOffset
);
2014 Node
* charCode
= addToGraph(StringFromCharCode
, get(indexOperand
));
2016 set(VirtualRegister(resultOperand
), charCode
);
2021 case RegExpExecIntrinsic
: {
2022 if (argumentCountIncludingThis
!= 2)
2026 Node
* regExpExec
= addToGraph(RegExpExec
, OpInfo(0), OpInfo(prediction
), get(virtualRegisterForArgument(0, registerOffset
)), get(virtualRegisterForArgument(1, registerOffset
)));
2027 set(VirtualRegister(resultOperand
), regExpExec
);
2032 case RegExpTestIntrinsic
: {
2033 if (argumentCountIncludingThis
!= 2)
2037 Node
* regExpExec
= addToGraph(RegExpTest
, OpInfo(0), OpInfo(prediction
), get(virtualRegisterForArgument(0, registerOffset
)), get(virtualRegisterForArgument(1, registerOffset
)));
2038 set(VirtualRegister(resultOperand
), regExpExec
);
2042 case RoundIntrinsic
: {
2043 if (argumentCountIncludingThis
== 1) {
2045 set(VirtualRegister(resultOperand
), addToGraph(JSConstant
, OpInfo(m_constantNaN
)));
2048 if (argumentCountIncludingThis
== 2) {
2050 Node
* operand
= get(virtualRegisterForArgument(1, registerOffset
));
2051 Node
* roundNode
= addToGraph(ArithRound
, OpInfo(0), OpInfo(prediction
), operand
);
2052 set(VirtualRegister(resultOperand
), roundNode
);
2057 case IMulIntrinsic
: {
2058 if (argumentCountIncludingThis
!= 3)
2061 VirtualRegister leftOperand
= virtualRegisterForArgument(1, registerOffset
);
2062 VirtualRegister rightOperand
= virtualRegisterForArgument(2, registerOffset
);
2063 Node
* left
= get(leftOperand
);
2064 Node
* right
= get(rightOperand
);
2065 set(VirtualRegister(resultOperand
), addToGraph(ArithIMul
, left
, right
));
2069 case FRoundIntrinsic
: {
2070 if (argumentCountIncludingThis
!= 2)
2073 VirtualRegister operand
= virtualRegisterForArgument(1, registerOffset
);
2074 set(VirtualRegister(resultOperand
), addToGraph(ArithFRound
, get(operand
)));
2078 case DFGTrueIntrinsic
: {
2080 set(VirtualRegister(resultOperand
), jsConstant(jsBoolean(true)));
2084 case OSRExitIntrinsic
: {
2086 addToGraph(ForceOSRExit
);
2087 set(VirtualRegister(resultOperand
), addToGraph(JSConstant
, OpInfo(m_constantUndefined
)));
2091 case IsFinalTierIntrinsic
: {
2093 set(VirtualRegister(resultOperand
),
2094 jsConstant(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph
.m_plan
.mode
) : true)));
2098 case SetInt32HeapPredictionIntrinsic
: {
2100 for (int i
= 1; i
< argumentCountIncludingThis
; ++i
) {
2101 Node
* node
= get(virtualRegisterForArgument(i
, registerOffset
));
2102 if (node
->hasHeapPrediction())
2103 node
->setHeapPrediction(SpecInt32
);
2105 set(VirtualRegister(resultOperand
), addToGraph(JSConstant
, OpInfo(m_constantUndefined
)));
2109 case CheckInt32Intrinsic
: {
2111 for (int i
= 1; i
< argumentCountIncludingThis
; ++i
) {
2112 Node
* node
= get(virtualRegisterForArgument(i
, registerOffset
));
2113 addToGraph(Phantom
, Edge(node
, Int32Use
));
2115 set(VirtualRegister(resultOperand
), jsConstant(jsBoolean(true)));
2119 case FiatInt52Intrinsic
: {
2120 if (argumentCountIncludingThis
!= 2)
2123 VirtualRegister operand
= virtualRegisterForArgument(1, registerOffset
);
2125 set(VirtualRegister(resultOperand
), addToGraph(FiatInt52
, get(operand
)));
2127 set(VirtualRegister(resultOperand
), get(operand
));
2136 template<typename ChecksFunctor
>
2137 bool ByteCodeParser::handleTypedArrayConstructor(
2138 int resultOperand
, InternalFunction
* function
, int registerOffset
,
2139 int argumentCountIncludingThis
, TypedArrayType type
, const ChecksFunctor
& insertChecks
)
2141 if (!isTypedView(type
))
2144 if (function
->classInfo() != constructorClassInfoForType(type
))
2147 if (function
->globalObject() != m_inlineStackTop
->m_codeBlock
->globalObject())
2150 // We only have an intrinsic for the case where you say:
2152 // new FooArray(blah);
2154 // Of course, 'blah' could be any of the following:
2156 // - Integer, indicating that you want to allocate an array of that length.
2157 // This is the thing we're hoping for, and what we can actually do meaningful
2158 // optimizations for.
2160 // - Array buffer, indicating that you want to create a view onto that _entire_
2163 // - Non-buffer object, indicating that you want to create a copy of that
2164 // object by pretending that it quacks like an array.
2166 // - Anything else, indicating that you want to have an exception thrown at
2169 // The intrinsic, NewTypedArray, will behave as if it could do any of these
2170 // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
2171 // predicted Int32, then we lock it in as a normal typed array allocation.
2172 // Otherwise, NewTypedArray turns into a totally opaque function call that
2173 // may clobber the world - by virtue of it accessing properties on what could
2176 // Note that although the generic form of NewTypedArray sounds sort of awful,
2177 // it is actually quite likely to be more efficient than a fully generic
2178 // Construct. So, we might want to think about making NewTypedArray variadic,
2179 // or else making Construct not super slow.
2181 if (argumentCountIncludingThis
!= 2)
2185 set(VirtualRegister(resultOperand
),
2186 addToGraph(NewTypedArray
, OpInfo(type
), get(virtualRegisterForArgument(1, registerOffset
))));
2190 template<typename ChecksFunctor
>
2191 bool ByteCodeParser::handleConstantInternalFunction(
2192 int resultOperand
, InternalFunction
* function
, int registerOffset
,
2193 int argumentCountIncludingThis
, CodeSpecializationKind kind
, const ChecksFunctor
& insertChecks
)
2196 dataLog(" Handling constant internal function ", JSValue(function
), "\n");
2198 // If we ever find that we have a lot of internal functions that we specialize for,
2199 // then we should probably have some sort of hashtable dispatch, or maybe even
2200 // dispatch straight through the MethodTable of the InternalFunction. But for now,
2201 // it seems that this case is hit infrequently enough, and the number of functions
2202 // we know about is small enough, that having just a linear cascade of if statements
2205 if (function
->classInfo() == ArrayConstructor::info()) {
2206 if (function
->globalObject() != m_inlineStackTop
->m_codeBlock
->globalObject())
2210 if (argumentCountIncludingThis
== 2) {
2211 set(VirtualRegister(resultOperand
),
2212 addToGraph(NewArrayWithSize
, OpInfo(ArrayWithUndecided
), get(virtualRegisterForArgument(1, registerOffset
))));
2216 // FIXME: Array constructor should use "this" as newTarget.
2217 for (int i
= 1; i
< argumentCountIncludingThis
; ++i
)
2218 addVarArgChild(get(virtualRegisterForArgument(i
, registerOffset
)));
2219 set(VirtualRegister(resultOperand
),
2220 addToGraph(Node::VarArg
, NewArray
, OpInfo(ArrayWithUndecided
), OpInfo(0)));
2224 if (function
->classInfo() == StringConstructor::info()) {
2229 if (argumentCountIncludingThis
<= 1)
2230 result
= jsConstant(m_vm
->smallStrings
.emptyString());
2232 result
= addToGraph(CallStringConstructor
, get(virtualRegisterForArgument(1, registerOffset
)));
2234 if (kind
== CodeForConstruct
)
2235 result
= addToGraph(NewStringObject
, OpInfo(function
->globalObject()->stringObjectStructure()), result
);
2237 set(VirtualRegister(resultOperand
), result
);
2241 for (unsigned typeIndex
= 0; typeIndex
< NUMBER_OF_TYPED_ARRAY_TYPES
; ++typeIndex
) {
2242 bool result
= handleTypedArrayConstructor(
2243 resultOperand
, function
, registerOffset
, argumentCountIncludingThis
,
2244 indexToTypedArrayType(typeIndex
), insertChecks
);
2252 Node
* ByteCodeParser::handleGetByOffset(SpeculatedType prediction
, Node
* base
, const StructureSet
& structureSet
, unsigned identifierNumber
, PropertyOffset offset
, NodeType op
)
2254 if (base
->hasConstant()) {
2255 if (JSValue constant
= m_graph
.tryGetConstantProperty(base
->asJSValue(), structureSet
, offset
)) {
2256 addToGraph(Phantom
, base
);
2257 return weakJSConstant(constant
);
2261 Node
* propertyStorage
;
2262 if (isInlineOffset(offset
))
2263 propertyStorage
= base
;
2265 propertyStorage
= addToGraph(GetButterfly
, base
);
2267 StorageAccessData
* data
= m_graph
.m_storageAccessData
.add();
2268 data
->offset
= offset
;
2269 data
->identifierNumber
= identifierNumber
;
2271 Node
* getByOffset
= addToGraph(op
, OpInfo(data
), OpInfo(prediction
), propertyStorage
, base
);
2276 Node
* ByteCodeParser::handlePutByOffset(Node
* base
, unsigned identifier
, PropertyOffset offset
, Node
* value
)
2278 Node
* propertyStorage
;
2279 if (isInlineOffset(offset
))
2280 propertyStorage
= base
;
2282 propertyStorage
= addToGraph(GetButterfly
, base
);
2284 StorageAccessData
* data
= m_graph
.m_storageAccessData
.add();
2285 data
->offset
= offset
;
2286 data
->identifierNumber
= identifier
;
2288 Node
* result
= addToGraph(PutByOffset
, OpInfo(data
), propertyStorage
, base
, value
);
2293 void ByteCodeParser::emitChecks(const ConstantStructureCheckVector
& vector
)
2295 for (unsigned i
= 0; i
< vector
.size(); ++i
)
2296 cellConstantWithStructureCheck(vector
[i
].constant(), vector
[i
].structure());
2299 void ByteCodeParser::handleGetById(
2300 int destinationOperand
, SpeculatedType prediction
, Node
* base
, unsigned identifierNumber
,
2301 const GetByIdStatus
& getByIdStatus
)
2303 NodeType getById
= getByIdStatus
.makesCalls() ? GetByIdFlush
: GetById
;
2305 if (!getByIdStatus
.isSimple() || !getByIdStatus
.numVariants() || !Options::enableAccessInlining()) {
2306 set(VirtualRegister(destinationOperand
),
2307 addToGraph(getById
, OpInfo(identifierNumber
), OpInfo(prediction
), base
));
2311 if (getByIdStatus
.numVariants() > 1) {
2312 if (getByIdStatus
.makesCalls() || !isFTL(m_graph
.m_plan
.mode
)
2313 || !Options::enablePolymorphicAccessInlining()) {
2314 set(VirtualRegister(destinationOperand
),
2315 addToGraph(getById
, OpInfo(identifierNumber
), OpInfo(prediction
), base
));
2319 if (m_graph
.compilation())
2320 m_graph
.compilation()->noticeInlinedGetById();
2322 // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
2323 // optimal, if there is some rarely executed case in the chain that requires a lot
2324 // of checks and those checks are not watchpointable.
2325 for (unsigned variantIndex
= getByIdStatus
.numVariants(); variantIndex
--;)
2326 emitChecks(getByIdStatus
[variantIndex
].constantChecks());
2328 // 2) Emit a MultiGetByOffset
2329 MultiGetByOffsetData
* data
= m_graph
.m_multiGetByOffsetData
.add();
2330 data
->variants
= getByIdStatus
.variants();
2331 data
->identifierNumber
= identifierNumber
;
2332 set(VirtualRegister(destinationOperand
),
2333 addToGraph(MultiGetByOffset
, OpInfo(data
), OpInfo(prediction
), base
));
2337 ASSERT(getByIdStatus
.numVariants() == 1);
2338 GetByIdVariant variant
= getByIdStatus
[0];
2340 if (m_graph
.compilation())
2341 m_graph
.compilation()->noticeInlinedGetById();
2343 Node
* originalBase
= base
;
2345 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(variant
.structureSet())), base
);
2347 emitChecks(variant
.constantChecks());
2349 if (variant
.alternateBase())
2350 base
= weakJSConstant(variant
.alternateBase());
2352 // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
2353 // ensure that the base of the original get_by_id is kept alive until we're done with
2354 // all of the speculations. We only insert the Phantom if there had been a CheckStructure
2355 // on something other than the base following the CheckStructure on base.
2356 if (originalBase
!= base
)
2357 addToGraph(Phantom
, originalBase
);
2359 Node
* loadedValue
= handleGetByOffset(
2360 variant
.callLinkStatus() ? SpecCellOther
: prediction
,
2361 base
, variant
.baseStructure(), identifierNumber
, variant
.offset(),
2362 variant
.callLinkStatus() ? GetGetterSetterByOffset
: GetByOffset
);
2364 if (!variant
.callLinkStatus()) {
2365 set(VirtualRegister(destinationOperand
), loadedValue
);
2369 Node
* getter
= addToGraph(GetGetter
, loadedValue
);
2371 // Make a call. We don't try to get fancy with using the smallest operand number because
2372 // the stack layout phase should compress the stack anyway.
2374 unsigned numberOfParameters
= 0;
2375 numberOfParameters
++; // The 'this' argument.
2376 numberOfParameters
++; // True return PC.
2378 // Start with a register offset that corresponds to the last in-use register.
2379 int registerOffset
= virtualRegisterForLocal(
2380 m_inlineStackTop
->m_profiledBlock
->m_numCalleeRegisters
- 1).offset();
2381 registerOffset
-= numberOfParameters
;
2382 registerOffset
-= JSStack::CallFrameHeaderSize
;
2384 // Get the alignment right.
2385 registerOffset
= -WTF::roundUpToMultipleOf(
2386 stackAlignmentRegisters(),
2390 m_inlineStackTop
->remapOperand(
2391 VirtualRegister(registerOffset
)).toLocal());
2393 // Issue SetLocals. This has two effects:
2394 // 1) That's how handleCall() sees the arguments.
2395 // 2) If we inline then this ensures that the arguments are flushed so that if you use
2396 // the dreaded arguments object on the getter, the right things happen. Well, sort of -
2397 // since we only really care about 'this' in this case. But we're not going to take that
2399 int nextRegister
= registerOffset
+ JSStack::CallFrameHeaderSize
;
2400 set(VirtualRegister(nextRegister
++), originalBase
, ImmediateNakedSet
);
2403 destinationOperand
, Call
, InlineCallFrame::GetterCall
, OPCODE_LENGTH(op_get_by_id
),
2404 getter
, numberOfParameters
- 1, registerOffset
, *variant
.callLinkStatus(), prediction
);
2407 void ByteCodeParser::emitPutById(
2408 Node
* base
, unsigned identifierNumber
, Node
* value
, const PutByIdStatus
& putByIdStatus
, bool isDirect
)
2411 addToGraph(PutByIdDirect
, OpInfo(identifierNumber
), base
, value
);
2413 addToGraph(putByIdStatus
.makesCalls() ? PutByIdFlush
: PutById
, OpInfo(identifierNumber
), base
, value
);
2416 void ByteCodeParser::handlePutById(
2417 Node
* base
, unsigned identifierNumber
, Node
* value
,
2418 const PutByIdStatus
& putByIdStatus
, bool isDirect
)
2420 if (!putByIdStatus
.isSimple() || !putByIdStatus
.numVariants() || !Options::enableAccessInlining()) {
2421 if (!putByIdStatus
.isSet())
2422 addToGraph(ForceOSRExit
);
2423 emitPutById(base
, identifierNumber
, value
, putByIdStatus
, isDirect
);
2427 if (putByIdStatus
.numVariants() > 1) {
2428 if (!isFTL(m_graph
.m_plan
.mode
) || putByIdStatus
.makesCalls()
2429 || !Options::enablePolymorphicAccessInlining()) {
2430 emitPutById(base
, identifierNumber
, value
, putByIdStatus
, isDirect
);
2434 if (m_graph
.compilation())
2435 m_graph
.compilation()->noticeInlinedPutById();
2438 for (unsigned variantIndex
= putByIdStatus
.numVariants(); variantIndex
--;) {
2439 if (putByIdStatus
[variantIndex
].kind() != PutByIdVariant::Transition
)
2441 emitChecks(putByIdStatus
[variantIndex
].constantChecks());
2445 MultiPutByOffsetData
* data
= m_graph
.m_multiPutByOffsetData
.add();
2446 data
->variants
= putByIdStatus
.variants();
2447 data
->identifierNumber
= identifierNumber
;
2448 addToGraph(MultiPutByOffset
, OpInfo(data
), base
, value
);
2452 ASSERT(putByIdStatus
.numVariants() == 1);
2453 const PutByIdVariant
& variant
= putByIdStatus
[0];
2455 switch (variant
.kind()) {
2456 case PutByIdVariant::Replace
: {
2457 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(variant
.structure())), base
);
2458 handlePutByOffset(base
, identifierNumber
, variant
.offset(), value
);
2459 if (m_graph
.compilation())
2460 m_graph
.compilation()->noticeInlinedPutById();
2464 case PutByIdVariant::Transition
: {
2465 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(variant
.oldStructure())), base
);
2466 emitChecks(variant
.constantChecks());
2468 ASSERT(variant
.oldStructureForTransition()->transitionWatchpointSetHasBeenInvalidated());
2470 Node
* propertyStorage
;
2471 Transition
* transition
= m_graph
.m_transitions
.add(
2472 variant
.oldStructureForTransition(), variant
.newStructure());
2474 if (variant
.reallocatesStorage()) {
2476 // If we're growing the property storage then it must be because we're
2477 // storing into the out-of-line storage.
2478 ASSERT(!isInlineOffset(variant
.offset()));
2480 if (!variant
.oldStructureForTransition()->outOfLineCapacity()) {
2481 propertyStorage
= addToGraph(
2482 AllocatePropertyStorage
, OpInfo(transition
), base
);
2484 propertyStorage
= addToGraph(
2485 ReallocatePropertyStorage
, OpInfo(transition
),
2486 base
, addToGraph(GetButterfly
, base
));
2489 if (isInlineOffset(variant
.offset()))
2490 propertyStorage
= base
;
2492 propertyStorage
= addToGraph(GetButterfly
, base
);
2495 StorageAccessData
* data
= m_graph
.m_storageAccessData
.add();
2496 data
->offset
= variant
.offset();
2497 data
->identifierNumber
= identifierNumber
;
2506 // FIXME: PutStructure goes last until we fix either
2507 // https://bugs.webkit.org/show_bug.cgi?id=142921 or
2508 // https://bugs.webkit.org/show_bug.cgi?id=142924.
2509 addToGraph(PutStructure
, OpInfo(transition
), base
);
2511 if (m_graph
.compilation())
2512 m_graph
.compilation()->noticeInlinedPutById();
2516 case PutByIdVariant::Setter
: {
2517 Node
* originalBase
= base
;
2520 CheckStructure
, OpInfo(m_graph
.addStructureSet(variant
.structure())), base
);
2522 emitChecks(variant
.constantChecks());
2524 if (variant
.alternateBase())
2525 base
= weakJSConstant(variant
.alternateBase());
2527 Node
* loadedValue
= handleGetByOffset(
2528 SpecCellOther
, base
, variant
.baseStructure(), identifierNumber
, variant
.offset(),
2529 GetGetterSetterByOffset
);
2531 Node
* setter
= addToGraph(GetSetter
, loadedValue
);
2533 // Make a call. We don't try to get fancy with using the smallest operand number because
2534 // the stack layout phase should compress the stack anyway.
2536 unsigned numberOfParameters
= 0;
2537 numberOfParameters
++; // The 'this' argument.
2538 numberOfParameters
++; // The new value.
2539 numberOfParameters
++; // True return PC.
2541 // Start with a register offset that corresponds to the last in-use register.
2542 int registerOffset
= virtualRegisterForLocal(
2543 m_inlineStackTop
->m_profiledBlock
->m_numCalleeRegisters
- 1).offset();
2544 registerOffset
-= numberOfParameters
;
2545 registerOffset
-= JSStack::CallFrameHeaderSize
;
2547 // Get the alignment right.
2548 registerOffset
= -WTF::roundUpToMultipleOf(
2549 stackAlignmentRegisters(),
2553 m_inlineStackTop
->remapOperand(
2554 VirtualRegister(registerOffset
)).toLocal());
2556 int nextRegister
= registerOffset
+ JSStack::CallFrameHeaderSize
;
2557 set(VirtualRegister(nextRegister
++), originalBase
, ImmediateNakedSet
);
2558 set(VirtualRegister(nextRegister
++), value
, ImmediateNakedSet
);
2561 VirtualRegister().offset(), Call
, InlineCallFrame::SetterCall
,
2562 OPCODE_LENGTH(op_put_by_id
), setter
, numberOfParameters
- 1, registerOffset
,
2563 *variant
.callLinkStatus(), SpecOther
);
2568 emitPutById(base
, identifierNumber
, value
, putByIdStatus
, isDirect
);
2573 void ByteCodeParser::prepareToParseBlock()
2576 ASSERT(m_setLocalQueue
.isEmpty());
2579 void ByteCodeParser::clearCaches()
2581 m_constants
.resize(0);
2584 bool ByteCodeParser::parseBlock(unsigned limit
)
2586 bool shouldContinueParsing
= true;
2588 Interpreter
* interpreter
= m_vm
->interpreter
;
2589 Instruction
* instructionsBegin
= m_inlineStackTop
->m_codeBlock
->instructions().begin();
2590 unsigned blockBegin
= m_currentIndex
;
2592 // If we are the first basic block, introduce markers for arguments. This allows
2593 // us to track if a use of an argument may use the actual argument passed, as
2594 // opposed to using a value we set explicitly.
2595 if (m_currentBlock
== m_graph
.block(0) && !inlineCallFrame()) {
2596 m_graph
.m_arguments
.resize(m_numArguments
);
2597 for (unsigned argument
= 0; argument
< m_numArguments
; ++argument
) {
2598 VariableAccessData
* variable
= newVariableAccessData(
2599 virtualRegisterForArgument(argument
));
2600 variable
->mergeStructureCheckHoistingFailed(
2601 m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
));
2602 variable
->mergeCheckArrayHoistingFailed(
2603 m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadIndexingType
));
2605 Node
* setArgument
= addToGraph(SetArgument
, OpInfo(variable
));
2606 m_graph
.m_arguments
[argument
] = setArgument
;
2607 m_currentBlock
->variablesAtTail
.setArgumentFirstTime(argument
, setArgument
);
2612 processSetLocalQueue();
2614 // Don't extend over jump destinations.
2615 if (m_currentIndex
== limit
) {
2616 // Ordinarily we want to plant a jump. But refuse to do this if the block is
2617 // empty. This is a special case for inlining, which might otherwise create
2618 // some empty blocks in some cases. When parseBlock() returns with an empty
2619 // block, it will get repurposed instead of creating a new one. Note that this
2620 // logic relies on every bytecode resulting in one or more nodes, which would
2621 // be true anyway except for op_loop_hint, which emits a Phantom to force this
2623 if (!m_currentBlock
->isEmpty())
2624 addToGraph(Jump
, OpInfo(m_currentIndex
));
2625 return shouldContinueParsing
;
2628 // Switch on the current bytecode opcode.
2629 Instruction
* currentInstruction
= instructionsBegin
+ m_currentIndex
;
2630 m_currentInstruction
= currentInstruction
; // Some methods want to use this, and we'd rather not thread it through calls.
2631 OpcodeID opcodeID
= interpreter
->getOpcodeID(currentInstruction
->u
.opcode
);
2633 if (Options::verboseDFGByteCodeParsing())
2634 dataLog(" parsing ", currentCodeOrigin(), "\n");
2636 if (m_graph
.compilation()) {
2637 addToGraph(CountExecution
, OpInfo(m_graph
.compilation()->executionCounterFor(
2638 Profiler::OriginStack(*m_vm
->m_perBytecodeProfiler
, m_codeBlock
, currentCodeOrigin()))));
2643 // === Function entry opcodes ===
2646 Node
* undefined
= addToGraph(JSConstant
, OpInfo(m_constantUndefined
));
2647 // Initialize all locals to undefined.
2648 for (int i
= 0; i
< m_inlineStackTop
->m_codeBlock
->m_numVars
; ++i
)
2649 set(virtualRegisterForLocal(i
), undefined
, ImmediateNakedSet
);
2650 NEXT_OPCODE(op_enter
);
2654 Node
* op1
= getThis();
2655 if (op1
->op() != ToThis
) {
2656 Structure
* cachedStructure
= currentInstruction
[2].u
.structure
.get();
2657 if (currentInstruction
[2].u
.toThisStatus
!= ToThisOK
2659 || cachedStructure
->classInfo()->methodTable
.toThis
!= JSObject::info()->methodTable
.toThis
2660 || m_inlineStackTop
->m_profiledBlock
->couldTakeSlowCase(m_currentIndex
)
2661 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)
2662 || (op1
->op() == GetLocal
&& op1
->variableAccessData()->structureCheckHoistingFailed())) {
2663 setThis(addToGraph(ToThis
, op1
));
2667 OpInfo(m_graph
.addStructureSet(cachedStructure
)),
2671 NEXT_OPCODE(op_to_this
);
2674 case op_create_this
: {
2675 int calleeOperand
= currentInstruction
[2].u
.operand
;
2676 Node
* callee
= get(VirtualRegister(calleeOperand
));
2678 JSFunction
* function
= callee
->dynamicCastConstant
<JSFunction
*>();
2680 JSCell
* cachedFunction
= currentInstruction
[4].u
.jsCell
.unvalidatedGet();
2682 && cachedFunction
!= JSCell::seenMultipleCalleeObjects()
2683 && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCell
)) {
2684 ASSERT(cachedFunction
->inherits(JSFunction::info()));
2686 FrozenValue
* frozen
= m_graph
.freeze(cachedFunction
);
2687 addToGraph(CheckCell
, OpInfo(frozen
), callee
);
2688 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(JSConstant
, OpInfo(frozen
)));
2690 function
= static_cast<JSFunction
*>(cachedFunction
);
2694 bool alreadyEmitted
= false;
2696 if (FunctionRareData
* rareData
= function
->rareData()) {
2697 if (Structure
* structure
= rareData
->allocationStructure()) {
2698 m_graph
.freeze(rareData
);
2699 m_graph
.watchpoints().addLazily(rareData
->allocationProfileWatchpointSet());
2700 // The callee is still live up to this point.
2701 addToGraph(Phantom
, callee
);
2702 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(NewObject
, OpInfo(structure
)));
2703 alreadyEmitted
= true;
2707 if (!alreadyEmitted
) {
2708 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2709 addToGraph(CreateThis
, OpInfo(currentInstruction
[3].u
.operand
), callee
));
2711 NEXT_OPCODE(op_create_this
);
2714 case op_new_object
: {
2715 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2716 addToGraph(NewObject
,
2717 OpInfo(currentInstruction
[3].u
.objectAllocationProfile
->structure())));
2718 NEXT_OPCODE(op_new_object
);
2721 case op_new_array
: {
2722 int startOperand
= currentInstruction
[2].u
.operand
;
2723 int numOperands
= currentInstruction
[3].u
.operand
;
2724 ArrayAllocationProfile
* profile
= currentInstruction
[4].u
.arrayAllocationProfile
;
2725 for (int operandIdx
= startOperand
; operandIdx
> startOperand
- numOperands
; --operandIdx
)
2726 addVarArgChild(get(VirtualRegister(operandIdx
)));
2727 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(Node::VarArg
, NewArray
, OpInfo(profile
->selectIndexingType()), OpInfo(0)));
2728 NEXT_OPCODE(op_new_array
);
2731 case op_new_array_with_size
: {
2732 int lengthOperand
= currentInstruction
[2].u
.operand
;
2733 ArrayAllocationProfile
* profile
= currentInstruction
[3].u
.arrayAllocationProfile
;
2734 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(NewArrayWithSize
, OpInfo(profile
->selectIndexingType()), get(VirtualRegister(lengthOperand
))));
2735 NEXT_OPCODE(op_new_array_with_size
);
2738 case op_new_array_buffer
: {
2739 int startConstant
= currentInstruction
[2].u
.operand
;
2740 int numConstants
= currentInstruction
[3].u
.operand
;
2741 ArrayAllocationProfile
* profile
= currentInstruction
[4].u
.arrayAllocationProfile
;
2742 NewArrayBufferData data
;
2743 data
.startConstant
= m_inlineStackTop
->m_constantBufferRemap
[startConstant
];
2744 data
.numConstants
= numConstants
;
2745 data
.indexingType
= profile
->selectIndexingType();
2747 // If this statement has never executed, we'll have the wrong indexing type in the profile.
2748 for (int i
= 0; i
< numConstants
; ++i
) {
2750 leastUpperBoundOfIndexingTypeAndValue(
2752 m_codeBlock
->constantBuffer(data
.startConstant
)[i
]);
2755 m_graph
.m_newArrayBufferData
.append(data
);
2756 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(NewArrayBuffer
, OpInfo(&m_graph
.m_newArrayBufferData
.last())));
2757 NEXT_OPCODE(op_new_array_buffer
);
2760 case op_new_regexp
: {
2761 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(NewRegexp
, OpInfo(currentInstruction
[2].u
.operand
)));
2762 NEXT_OPCODE(op_new_regexp
);
2765 // === Bitwise operations ===
2768 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2769 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2770 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(BitAnd
, op1
, op2
));
2771 NEXT_OPCODE(op_bitand
);
2775 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2776 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2777 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(BitOr
, op1
, op2
));
2778 NEXT_OPCODE(op_bitor
);
2782 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2783 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2784 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(BitXor
, op1
, op2
));
2785 NEXT_OPCODE(op_bitxor
);
2789 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2790 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2791 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2792 addToGraph(BitRShift
, op1
, op2
));
2793 NEXT_OPCODE(op_rshift
);
2797 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2798 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2799 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2800 addToGraph(BitLShift
, op1
, op2
));
2801 NEXT_OPCODE(op_lshift
);
2805 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2806 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2807 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2808 addToGraph(BitURShift
, op1
, op2
));
2809 NEXT_OPCODE(op_urshift
);
2813 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2814 makeSafe(addToGraph(UInt32ToNumber
, get(VirtualRegister(currentInstruction
[2].u
.operand
)))));
2815 NEXT_OPCODE(op_unsigned
);
2818 // === Increment/Decrement opcodes ===
2821 int srcDst
= currentInstruction
[1].u
.operand
;
2822 VirtualRegister srcDstVirtualRegister
= VirtualRegister(srcDst
);
2823 Node
* op
= get(srcDstVirtualRegister
);
2824 set(srcDstVirtualRegister
, makeSafe(addToGraph(ArithAdd
, op
, addToGraph(JSConstant
, OpInfo(m_constantOne
)))));
2825 NEXT_OPCODE(op_inc
);
2829 int srcDst
= currentInstruction
[1].u
.operand
;
2830 VirtualRegister srcDstVirtualRegister
= VirtualRegister(srcDst
);
2831 Node
* op
= get(srcDstVirtualRegister
);
2832 set(srcDstVirtualRegister
, makeSafe(addToGraph(ArithSub
, op
, addToGraph(JSConstant
, OpInfo(m_constantOne
)))));
2833 NEXT_OPCODE(op_dec
);
2836 // === Arithmetic operations ===
2839 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2840 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2841 if (op1
->hasNumberResult() && op2
->hasNumberResult())
2842 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ArithAdd
, op1
, op2
)));
2844 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ValueAdd
, op1
, op2
)));
2845 NEXT_OPCODE(op_add
);
2849 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2850 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2851 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ArithSub
, op1
, op2
)));
2852 NEXT_OPCODE(op_sub
);
2856 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2857 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ArithNegate
, op1
)));
2858 NEXT_OPCODE(op_negate
);
2862 // Multiply requires that the inputs are not truncated, unfortunately.
2863 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2864 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2865 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ArithMul
, op1
, op2
)));
2866 NEXT_OPCODE(op_mul
);
2870 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2871 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2872 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ArithMod
, op1
, op2
)));
2873 NEXT_OPCODE(op_mod
);
2877 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2878 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2879 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeDivSafe(addToGraph(ArithDiv
, op1
, op2
)));
2880 NEXT_OPCODE(op_div
);
2883 // === Misc operations ===
2886 addToGraph(Breakpoint
);
2887 NEXT_OPCODE(op_debug
);
2889 case op_profile_will_call
: {
2890 addToGraph(ProfileWillCall
);
2891 NEXT_OPCODE(op_profile_will_call
);
2894 case op_profile_did_call
: {
2895 addToGraph(ProfileDidCall
);
2896 NEXT_OPCODE(op_profile_did_call
);
2900 Node
* op
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2901 set(VirtualRegister(currentInstruction
[1].u
.operand
), op
);
2902 NEXT_OPCODE(op_mov
);
2905 case op_check_tdz
: {
2906 Node
* op
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2907 addToGraph(CheckNotEmpty
, op
);
2908 NEXT_OPCODE(op_check_tdz
);
2911 case op_check_has_instance
:
2912 addToGraph(CheckHasInstance
, get(VirtualRegister(currentInstruction
[3].u
.operand
)));
2913 NEXT_OPCODE(op_check_has_instance
);
2915 case op_instanceof
: {
2916 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2917 Node
* prototype
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2918 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(InstanceOf
, value
, prototype
));
2919 NEXT_OPCODE(op_instanceof
);
2922 case op_is_undefined
: {
2923 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2924 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsUndefined
, value
));
2925 NEXT_OPCODE(op_is_undefined
);
2928 case op_is_boolean
: {
2929 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2930 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsBoolean
, value
));
2931 NEXT_OPCODE(op_is_boolean
);
2934 case op_is_number
: {
2935 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2936 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsNumber
, value
));
2937 NEXT_OPCODE(op_is_number
);
2940 case op_is_string
: {
2941 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2942 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsString
, value
));
2943 NEXT_OPCODE(op_is_string
);
2946 case op_is_object
: {
2947 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2948 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsObject
, value
));
2949 NEXT_OPCODE(op_is_object
);
2952 case op_is_object_or_null
: {
2953 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2954 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsObjectOrNull
, value
));
2955 NEXT_OPCODE(op_is_object_or_null
);
2958 case op_is_function
: {
2959 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2960 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsFunction
, value
));
2961 NEXT_OPCODE(op_is_function
);
2965 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2966 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(LogicalNot
, value
));
2967 NEXT_OPCODE(op_not
);
2970 case op_to_primitive
: {
2971 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2972 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(ToPrimitive
, value
));
2973 NEXT_OPCODE(op_to_primitive
);
2977 int startOperand
= currentInstruction
[2].u
.operand
;
2978 int numOperands
= currentInstruction
[3].u
.operand
;
2980 // X86 doesn't have enough registers to compile MakeRope with three arguments.
2981 // Rather than try to be clever, we just make MakeRope dumber on this processor.
2982 const unsigned maxRopeArguments
= 2;
2984 const unsigned maxRopeArguments
= 3;
2986 auto toStringNodes
= std::make_unique
<Node
*[]>(numOperands
);
2987 for (int i
= 0; i
< numOperands
; i
++)
2988 toStringNodes
[i
] = addToGraph(ToString
, get(VirtualRegister(startOperand
- i
)));
2990 for (int i
= 0; i
< numOperands
; i
++)
2991 addToGraph(Phantom
, toStringNodes
[i
]);
2993 Node
* operands
[AdjacencyList::Size
];
2994 unsigned indexInOperands
= 0;
2995 for (unsigned i
= 0; i
< AdjacencyList::Size
; ++i
)
2997 for (int operandIdx
= 0; operandIdx
< numOperands
; ++operandIdx
) {
2998 if (indexInOperands
== maxRopeArguments
) {
2999 operands
[0] = addToGraph(MakeRope
, operands
[0], operands
[1], operands
[2]);
3000 for (unsigned i
= 1; i
< AdjacencyList::Size
; ++i
)
3002 indexInOperands
= 1;
3005 ASSERT(indexInOperands
< AdjacencyList::Size
);
3006 ASSERT(indexInOperands
< maxRopeArguments
);
3007 operands
[indexInOperands
++] = toStringNodes
[operandIdx
];
3009 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3010 addToGraph(MakeRope
, operands
[0], operands
[1], operands
[2]));
3011 NEXT_OPCODE(op_strcat
);
3015 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3016 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3017 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareLess
, op1
, op2
));
3018 NEXT_OPCODE(op_less
);
3022 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3023 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3024 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareLessEq
, op1
, op2
));
3025 NEXT_OPCODE(op_lesseq
);
3029 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3030 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3031 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareGreater
, op1
, op2
));
3032 NEXT_OPCODE(op_greater
);
3035 case op_greatereq
: {
3036 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3037 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3038 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareGreaterEq
, op1
, op2
));
3039 NEXT_OPCODE(op_greatereq
);
3043 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3044 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3045 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareEq
, op1
, op2
));
3050 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3051 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareEqConstant
, value
, addToGraph(JSConstant
, OpInfo(m_constantNull
))));
3052 NEXT_OPCODE(op_eq_null
);
3056 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3057 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3058 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareStrictEq
, op1
, op2
));
3059 NEXT_OPCODE(op_stricteq
);
3063 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3064 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3065 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(LogicalNot
, addToGraph(CompareEq
, op1
, op2
)));
3066 NEXT_OPCODE(op_neq
);
3070 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3071 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(LogicalNot
, addToGraph(CompareEqConstant
, value
, addToGraph(JSConstant
, OpInfo(m_constantNull
)))));
3072 NEXT_OPCODE(op_neq_null
);
3075 case op_nstricteq
: {
3076 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3077 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3078 Node
* invertedResult
;
3079 invertedResult
= addToGraph(CompareStrictEq
, op1
, op2
);
3080 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(LogicalNot
, invertedResult
));
3081 NEXT_OPCODE(op_nstricteq
);
3084 // === Property access operations ===
3086 case op_get_by_val
: {
3087 SpeculatedType prediction
= getPredictionWithoutOSRExit();
3089 Node
* base
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3090 ArrayMode arrayMode
= getArrayMode(currentInstruction
[4].u
.arrayProfile
, Array::Read
);
3091 Node
* property
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3092 Node
* getByVal
= addToGraph(GetByVal
, OpInfo(arrayMode
.asWord()), OpInfo(prediction
), base
, property
);
3093 set(VirtualRegister(currentInstruction
[1].u
.operand
), getByVal
);
3095 NEXT_OPCODE(op_get_by_val
);
3098 case op_put_by_val_direct
:
3099 case op_put_by_val
: {
3100 Node
* base
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3102 ArrayMode arrayMode
= getArrayMode(currentInstruction
[4].u
.arrayProfile
, Array::Write
);
3104 Node
* property
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3105 Node
* value
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3107 addVarArgChild(base
);
3108 addVarArgChild(property
);
3109 addVarArgChild(value
);
3110 addVarArgChild(0); // Leave room for property storage.
3111 addVarArgChild(0); // Leave room for length.
3112 addToGraph(Node::VarArg
, opcodeID
== op_put_by_val_direct
? PutByValDirect
: PutByVal
, OpInfo(arrayMode
.asWord()), OpInfo(0));
3114 NEXT_OPCODE(op_put_by_val
);
3118 case op_get_by_id_out_of_line
:
3119 case op_get_array_length
: {
3120 SpeculatedType prediction
= getPrediction();
3122 Node
* base
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3123 unsigned identifierNumber
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[3].u
.operand
];
3125 UniquedStringImpl
* uid
= m_graph
.identifiers()[identifierNumber
];
3126 GetByIdStatus getByIdStatus
= GetByIdStatus::computeFor(
3127 m_inlineStackTop
->m_profiledBlock
, m_dfgCodeBlock
,
3128 m_inlineStackTop
->m_stubInfos
, m_dfgStubInfos
,
3129 currentCodeOrigin(), uid
);
3132 currentInstruction
[1].u
.operand
, prediction
, base
, identifierNumber
, getByIdStatus
);
3134 NEXT_OPCODE(op_get_by_id
);
3137 case op_put_by_id_out_of_line
:
3138 case op_put_by_id_transition_direct
:
3139 case op_put_by_id_transition_normal
:
3140 case op_put_by_id_transition_direct_out_of_line
:
3141 case op_put_by_id_transition_normal_out_of_line
: {
3142 Node
* value
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3143 Node
* base
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3144 unsigned identifierNumber
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[2].u
.operand
];
3145 bool direct
= currentInstruction
[8].u
.operand
;
3147 PutByIdStatus putByIdStatus
= PutByIdStatus::computeFor(
3148 m_inlineStackTop
->m_profiledBlock
, m_dfgCodeBlock
,
3149 m_inlineStackTop
->m_stubInfos
, m_dfgStubInfos
,
3150 currentCodeOrigin(), m_graph
.identifiers()[identifierNumber
]);
3152 handlePutById(base
, identifierNumber
, value
, putByIdStatus
, direct
);
3153 NEXT_OPCODE(op_put_by_id
);
3156 case op_init_global_const_nop
: {
3157 NEXT_OPCODE(op_init_global_const_nop
);
3160 case op_init_global_const
: {
3161 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3162 JSGlobalObject
* globalObject
= m_inlineStackTop
->m_codeBlock
->globalObject();
3165 OpInfo(globalObject
->assertVariableIsInThisObject(currentInstruction
[1].u
.variablePointer
)),
3166 weakJSConstant(globalObject
), value
);
3167 NEXT_OPCODE(op_init_global_const
);
3170 case op_profile_type
: {
3171 Node
* valueToProfile
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3172 addToGraph(ProfileType
, OpInfo(currentInstruction
[2].u
.location
), valueToProfile
);
3173 NEXT_OPCODE(op_profile_type
);
3176 case op_profile_control_flow
: {
3177 BasicBlockLocation
* basicBlockLocation
= currentInstruction
[1].u
.basicBlockLocation
;
3178 addToGraph(ProfileControlFlow
, OpInfo(basicBlockLocation
));
3179 NEXT_OPCODE(op_profile_control_flow
);
3182 // === Block terminators. ===
3185 int relativeOffset
= currentInstruction
[1].u
.operand
;
3186 addToGraph(Jump
, OpInfo(m_currentIndex
+ relativeOffset
));
3187 if (relativeOffset
<= 0)
3189 LAST_OPCODE(op_jmp
);
3193 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
3194 Node
* condition
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3195 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jtrue
))), condition
);
3196 LAST_OPCODE(op_jtrue
);
3200 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
3201 Node
* condition
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3202 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jfalse
), m_currentIndex
+ relativeOffset
)), condition
);
3203 LAST_OPCODE(op_jfalse
);
3207 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
3208 Node
* value
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3209 Node
* condition
= addToGraph(CompareEqConstant
, value
, addToGraph(JSConstant
, OpInfo(m_constantNull
)));
3210 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jeq_null
))), condition
);
3211 LAST_OPCODE(op_jeq_null
);
3214 case op_jneq_null
: {
3215 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
3216 Node
* value
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3217 Node
* condition
= addToGraph(CompareEqConstant
, value
, addToGraph(JSConstant
, OpInfo(m_constantNull
)));
3218 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jneq_null
), m_currentIndex
+ relativeOffset
)), condition
);
3219 LAST_OPCODE(op_jneq_null
);
3223 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
3224 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3225 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3226 Node
* condition
= addToGraph(CompareLess
, op1
, op2
);
3227 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jless
))), condition
);
3228 LAST_OPCODE(op_jless
);
3232 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
3233 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3234 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3235 Node
* condition
= addToGraph(CompareLessEq
, op1
, op2
);
3236 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jlesseq
))), condition
);
3237 LAST_OPCODE(op_jlesseq
);
3241 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
3242 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3243 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3244 Node
* condition
= addToGraph(CompareGreater
, op1
, op2
);
3245 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jgreater
))), condition
);
3246 LAST_OPCODE(op_jgreater
);
3249 case op_jgreatereq
: {
3250 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
3251 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3252 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3253 Node
* condition
= addToGraph(CompareGreaterEq
, op1
, op2
);
3254 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jgreatereq
))), condition
);
3255 LAST_OPCODE(op_jgreatereq
);
3259 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
3260 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3261 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3262 Node
* condition
= addToGraph(CompareLess
, op1
, op2
);
3263 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jnless
), m_currentIndex
+ relativeOffset
)), condition
);
3264 LAST_OPCODE(op_jnless
);
3268 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
3269 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3270 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3271 Node
* condition
= addToGraph(CompareLessEq
, op1
, op2
);
3272 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jnlesseq
), m_currentIndex
+ relativeOffset
)), condition
);
3273 LAST_OPCODE(op_jnlesseq
);
3276 case op_jngreater
: {
3277 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
3278 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3279 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3280 Node
* condition
= addToGraph(CompareGreater
, op1
, op2
);
3281 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jngreater
), m_currentIndex
+ relativeOffset
)), condition
);
3282 LAST_OPCODE(op_jngreater
);
3285 case op_jngreatereq
: {
3286 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
3287 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
3288 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3289 Node
* condition
= addToGraph(CompareGreaterEq
, op1
, op2
);
3290 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jngreatereq
), m_currentIndex
+ relativeOffset
)), condition
);
3291 LAST_OPCODE(op_jngreatereq
);
3294 case op_switch_imm
: {
3295 SwitchData
& data
= *m_graph
.m_switchData
.add();
3296 data
.kind
= SwitchImm
;
3297 data
.switchTableIndex
= m_inlineStackTop
->m_switchRemap
[currentInstruction
[1].u
.operand
];
3298 data
.fallThrough
.setBytecodeIndex(m_currentIndex
+ currentInstruction
[2].u
.operand
);
3299 SimpleJumpTable
& table
= m_codeBlock
->switchJumpTable(data
.switchTableIndex
);
3300 for (unsigned i
= 0; i
< table
.branchOffsets
.size(); ++i
) {
3301 if (!table
.branchOffsets
[i
])
3303 unsigned target
= m_currentIndex
+ table
.branchOffsets
[i
];
3304 if (target
== data
.fallThrough
.bytecodeIndex())
3306 data
.cases
.append(SwitchCase::withBytecodeIndex(m_graph
.freeze(jsNumber(static_cast<int32_t>(table
.min
+ i
))), target
));
3308 addToGraph(Switch
, OpInfo(&data
), get(VirtualRegister(currentInstruction
[3].u
.operand
)));
3309 flushIfTerminal(data
);
3310 LAST_OPCODE(op_switch_imm
);
3313 case op_switch_char
: {
3314 SwitchData
& data
= *m_graph
.m_switchData
.add();
3315 data
.kind
= SwitchChar
;
3316 data
.switchTableIndex
= m_inlineStackTop
->m_switchRemap
[currentInstruction
[1].u
.operand
];
3317 data
.fallThrough
.setBytecodeIndex(m_currentIndex
+ currentInstruction
[2].u
.operand
);
3318 SimpleJumpTable
& table
= m_codeBlock
->switchJumpTable(data
.switchTableIndex
);
3319 for (unsigned i
= 0; i
< table
.branchOffsets
.size(); ++i
) {
3320 if (!table
.branchOffsets
[i
])
3322 unsigned target
= m_currentIndex
+ table
.branchOffsets
[i
];
3323 if (target
== data
.fallThrough
.bytecodeIndex())
3326 SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table
.min
+ i
), target
));
3328 addToGraph(Switch
, OpInfo(&data
), get(VirtualRegister(currentInstruction
[3].u
.operand
)));
3329 flushIfTerminal(data
);
3330 LAST_OPCODE(op_switch_char
);
3333 case op_switch_string
: {
3334 SwitchData
& data
= *m_graph
.m_switchData
.add();
3335 data
.kind
= SwitchString
;
3336 data
.switchTableIndex
= currentInstruction
[1].u
.operand
;
3337 data
.fallThrough
.setBytecodeIndex(m_currentIndex
+ currentInstruction
[2].u
.operand
);
3338 StringJumpTable
& table
= m_codeBlock
->stringSwitchJumpTable(data
.switchTableIndex
);
3339 StringJumpTable::StringOffsetTable::iterator iter
;
3340 StringJumpTable::StringOffsetTable::iterator end
= table
.offsetTable
.end();
3341 for (iter
= table
.offsetTable
.begin(); iter
!= end
; ++iter
) {
3342 unsigned target
= m_currentIndex
+ iter
->value
.branchOffset
;
3343 if (target
== data
.fallThrough
.bytecodeIndex())
3346 SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter
->key
.get()), target
));
3348 addToGraph(Switch
, OpInfo(&data
), get(VirtualRegister(currentInstruction
[3].u
.operand
)));
3349 flushIfTerminal(data
);
3350 LAST_OPCODE(op_switch_string
);
3354 if (inlineCallFrame()) {
3356 if (m_inlineStackTop
->m_returnValue
.isValid())
3357 setDirect(m_inlineStackTop
->m_returnValue
, get(VirtualRegister(currentInstruction
[1].u
.operand
)), ImmediateSetWithFlush
);
3358 m_inlineStackTop
->m_didReturn
= true;
3359 if (m_inlineStackTop
->m_unlinkedBlocks
.isEmpty()) {
3360 // If we're returning from the first block, then we're done parsing.
3361 ASSERT(m_inlineStackTop
->m_callsiteBlockHead
== m_graph
.lastBlock());
3362 shouldContinueParsing
= false;
3363 LAST_OPCODE(op_ret
);
3365 // If inlining created blocks, and we're doing a return, then we need some
3367 ASSERT(m_inlineStackTop
->m_unlinkedBlocks
.last().m_block
== m_graph
.lastBlock());
3368 m_inlineStackTop
->m_unlinkedBlocks
.last().m_needsNormalLinking
= false;
3370 if (m_currentIndex
+ OPCODE_LENGTH(op_ret
) != m_inlineStackTop
->m_codeBlock
->instructions().size() || m_inlineStackTop
->m_didEarlyReturn
) {
3371 ASSERT(m_currentIndex
+ OPCODE_LENGTH(op_ret
) <= m_inlineStackTop
->m_codeBlock
->instructions().size());
3372 addToGraph(Jump
, OpInfo(0));
3373 m_inlineStackTop
->m_unlinkedBlocks
.last().m_needsEarlyReturnLinking
= true;
3374 m_inlineStackTop
->m_didEarlyReturn
= true;
3376 LAST_OPCODE(op_ret
);
3378 addToGraph(Return
, get(VirtualRegister(currentInstruction
[1].u
.operand
)));
3380 LAST_OPCODE(op_ret
);
3383 ASSERT(!inlineCallFrame());
3384 addToGraph(Return
, get(VirtualRegister(currentInstruction
[1].u
.operand
)));
3386 LAST_OPCODE(op_end
);
3389 addToGraph(Throw
, get(VirtualRegister(currentInstruction
[1].u
.operand
)));
3391 addToGraph(Unreachable
);
3392 LAST_OPCODE(op_throw
);
3394 case op_throw_static_error
:
3395 addToGraph(ThrowReferenceError
);
3397 addToGraph(Unreachable
);
3398 LAST_OPCODE(op_throw_static_error
);
3401 handleCall(currentInstruction
, Call
, CodeForCall
);
3402 // Verify that handleCall(), which could have inlined the callee, didn't trash m_currentInstruction
3403 ASSERT(m_currentInstruction
== currentInstruction
);
3404 NEXT_OPCODE(op_call
);
3407 handleCall(currentInstruction
, Construct
, CodeForConstruct
);
3408 NEXT_OPCODE(op_construct
);
3410 case op_call_varargs
: {
3411 handleVarargsCall(currentInstruction
, CallVarargs
, CodeForCall
);
3412 NEXT_OPCODE(op_call_varargs
);
3415 case op_construct_varargs
: {
3416 handleVarargsCall(currentInstruction
, ConstructVarargs
, CodeForConstruct
);
3417 NEXT_OPCODE(op_construct_varargs
);
3421 // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
3422 // support simmer for a while before making it more general, since it's
3423 // already gnarly enough as it is.
3424 ASSERT(pointerIsFunction(currentInstruction
[2].u
.specialPointer
));
3427 OpInfo(m_graph
.freeze(static_cast<JSCell
*>(actualPointerFor(
3428 m_inlineStackTop
->m_codeBlock
, currentInstruction
[2].u
.specialPointer
)))),
3429 get(VirtualRegister(currentInstruction
[1].u
.operand
)));
3430 addToGraph(Jump
, OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jneq_ptr
)));
3431 LAST_OPCODE(op_jneq_ptr
);
3433 case op_resolve_scope
: {
3434 int dst
= currentInstruction
[1].u
.operand
;
3435 ResolveType resolveType
= static_cast<ResolveType
>(currentInstruction
[4].u
.operand
);
3436 unsigned depth
= currentInstruction
[5].u
.operand
;
3438 // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
3439 if (needsVarInjectionChecks(resolveType
))
3440 addToGraph(VarInjectionWatchpoint
);
3442 switch (resolveType
) {
3443 case GlobalProperty
:
3445 case GlobalPropertyWithVarInjectionChecks
:
3446 case GlobalVarWithVarInjectionChecks
:
3447 set(VirtualRegister(dst
), weakJSConstant(m_inlineStackTop
->m_codeBlock
->globalObject()));
3448 if (resolveType
== GlobalPropertyWithVarInjectionChecks
|| resolveType
== GlobalVarWithVarInjectionChecks
)
3449 addToGraph(Phantom
, getDirect(m_inlineStackTop
->remapOperand(VirtualRegister(currentInstruction
[2].u
.operand
))));
3451 case LocalClosureVar
:
3453 case ClosureVarWithVarInjectionChecks
: {
3454 Node
* localBase
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3455 addToGraph(Phantom
, localBase
); // OSR exit cannot handle resolve_scope on a DCE'd scope.
3457 // We have various forms of constant folding here. This is necessary to avoid
3458 // spurious recompiles in dead-but-foldable code.
3459 if (SymbolTable
* symbolTable
= currentInstruction
[6].u
.symbolTable
.get()) {
3460 InferredValue
* singleton
= symbolTable
->singletonScope();
3461 if (JSValue value
= singleton
->inferredValue()) {
3462 m_graph
.watchpoints().addLazily(singleton
);
3463 set(VirtualRegister(dst
), weakJSConstant(value
));
3467 if (JSScope
* scope
= localBase
->dynamicCastConstant
<JSScope
*>()) {
3468 for (unsigned n
= depth
; n
--;)
3469 scope
= scope
->next();
3470 set(VirtualRegister(dst
), weakJSConstant(scope
));
3473 for (unsigned n
= depth
; n
--;)
3474 localBase
= addToGraph(SkipScope
, localBase
);
3475 set(VirtualRegister(dst
), localBase
);
3479 RELEASE_ASSERT_NOT_REACHED();
3482 NEXT_OPCODE(op_resolve_scope
);
3485 case op_get_from_scope
: {
3486 int dst
= currentInstruction
[1].u
.operand
;
3487 int scope
= currentInstruction
[2].u
.operand
;
3488 unsigned identifierNumber
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[3].u
.operand
];
3489 UniquedStringImpl
* uid
= m_graph
.identifiers()[identifierNumber
];
3490 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
3492 Structure
* structure
= 0;
3493 WatchpointSet
* watchpoints
= 0;
3496 ConcurrentJITLocker
locker(m_inlineStackTop
->m_profiledBlock
->m_lock
);
3497 if (resolveType
== GlobalVar
|| resolveType
== GlobalVarWithVarInjectionChecks
)
3498 watchpoints
= currentInstruction
[5].u
.watchpointSet
;
3500 structure
= currentInstruction
[5].u
.structure
.get();
3501 operand
= reinterpret_cast<uintptr_t>(currentInstruction
[6].u
.pointer
);
3504 UNUSED_PARAM(watchpoints
); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
3506 JSGlobalObject
* globalObject
= m_inlineStackTop
->m_codeBlock
->globalObject();
3508 switch (resolveType
) {
3509 case GlobalProperty
:
3510 case GlobalPropertyWithVarInjectionChecks
: {
3511 SpeculatedType prediction
= getPrediction();
3512 GetByIdStatus status
= GetByIdStatus::computeFor(structure
, uid
);
3513 if (status
.state() != GetByIdStatus::Simple
3514 || status
.numVariants() != 1
3515 || status
[0].structureSet().size() != 1) {
3516 set(VirtualRegister(dst
), addToGraph(GetByIdFlush
, OpInfo(identifierNumber
), OpInfo(prediction
), get(VirtualRegister(scope
))));
3519 Node
* base
= cellConstantWithStructureCheck(globalObject
, status
[0].structureSet().onlyStructure());
3520 addToGraph(Phantom
, get(VirtualRegister(scope
)));
3521 set(VirtualRegister(dst
), handleGetByOffset(prediction
, base
, status
[0].structureSet(), identifierNumber
, operand
));
3525 case GlobalVarWithVarInjectionChecks
: {
3526 addToGraph(Phantom
, get(VirtualRegister(scope
)));
3527 WatchpointSet
* watchpointSet
;
3530 ConcurrentJITLocker
locker(globalObject
->symbolTable()->m_lock
);
3531 SymbolTableEntry entry
= globalObject
->symbolTable()->get(locker
, uid
);
3532 watchpointSet
= entry
.watchpointSet();
3533 offset
= entry
.scopeOffset();
3535 if (watchpointSet
&& watchpointSet
->state() == IsWatched
) {
3536 // This has a fun concurrency story. There is the possibility of a race in two
3539 // We see that the set IsWatched, but in the meantime it gets invalidated: this is
3540 // fine because if we saw that it IsWatched then we add a watchpoint. If it gets
3541 // invalidated, then this compilation is invalidated. Note that in the meantime we
3542 // may load an absurd value from the global object. It's fine to load an absurd
3543 // value if the compilation is invalidated anyway.
3545 // We see that the set IsWatched, but the value isn't yet initialized: this isn't
3546 // possible because of the ordering of operations.
3548 // Here's how we order operations:
3550 // Main thread stores to the global object: always store a value first, and only
3551 // after that do we touch the watchpoint set. There is a fence in the touch, that
3552 // ensures that the store to the global object always happens before the touch on the
3555 // Compilation thread: always first load the state of the watchpoint set, and then
3556 // load the value. The WatchpointSet::state() method does fences for us to ensure
3557 // that the load of the state happens before our load of the value.
3559 // Finalizing compilation: this happens on the main thread and synchronously checks
3560 // validity of all watchpoint sets.
3562 // We will only perform optimizations if the load of the state yields IsWatched. That
3563 // means that at least one store would have happened to initialize the original value
3564 // of the variable (that is, the value we'd like to constant fold to). There may be
3565 // other stores that happen after that, but those stores will invalidate the
3566 // watchpoint set and also the compilation.
3568 // Note that we need to use the operand, which is a direct pointer at the global,
3569 // rather than looking up the global by doing variableAt(offset). That's because the
3570 // internal data structures of JSSegmentedVariableObject are not thread-safe even
3571 // though accessing the global itself is. The segmentation involves a vector spine
3572 // that resizes with malloc/free, so if new globals unrelated to the one we are
3573 // reading are added, we might access freed memory if we do variableAt().
3574 WriteBarrier
<Unknown
>* pointer
= bitwise_cast
<WriteBarrier
<Unknown
>*>(operand
);
3576 ASSERT(globalObject
->findVariableIndex(pointer
) == offset
);
3578 JSValue value
= pointer
->get();
3580 m_graph
.watchpoints().addLazily(watchpointSet
);
3581 set(VirtualRegister(dst
), weakJSConstant(value
));
3586 SpeculatedType prediction
= getPrediction();
3587 set(VirtualRegister(dst
), addToGraph(GetGlobalVar
, OpInfo(operand
), OpInfo(prediction
)));
3590 case LocalClosureVar
:
3592 case ClosureVarWithVarInjectionChecks
: {
3593 Node
* scopeNode
= get(VirtualRegister(scope
));
3595 // Ideally we wouldn't have to do this Phantom. But:
3597 // For the constant case: we must do it because otherwise we would have no way of knowing
3598 // that the scope is live at OSR here.
3600 // For the non-constant case: GetClosureVar could be DCE'd, but baseline's implementation
3601 // won't be able to handle an Undefined scope.
3602 addToGraph(Phantom
, scopeNode
);
3604 // Constant folding in the bytecode parser is important for performance. This may not
3605 // have executed yet. If it hasn't, then we won't have a prediction. Lacking a
3606 // prediction, we'd otherwise think that it has to exit. Then when it did execute, we
3607 // would recompile. But if we can fold it here, we avoid the exit.
3608 if (JSValue value
= m_graph
.tryGetConstantClosureVar(scopeNode
, ScopeOffset(operand
))) {
3609 set(VirtualRegister(dst
), weakJSConstant(value
));
3612 SpeculatedType prediction
= getPrediction();
3613 set(VirtualRegister(dst
),
3614 addToGraph(GetClosureVar
, OpInfo(operand
), OpInfo(prediction
), scopeNode
));
3618 RELEASE_ASSERT_NOT_REACHED();
3621 NEXT_OPCODE(op_get_from_scope
);
3624 case op_put_to_scope
: {
3625 unsigned scope
= currentInstruction
[1].u
.operand
;
3626 unsigned identifierNumber
= currentInstruction
[2].u
.operand
;
3627 if (identifierNumber
!= UINT_MAX
)
3628 identifierNumber
= m_inlineStackTop
->m_identifierRemap
[identifierNumber
];
3629 unsigned value
= currentInstruction
[3].u
.operand
;
3630 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
3631 UniquedStringImpl
* uid
;
3632 if (identifierNumber
!= UINT_MAX
)
3633 uid
= m_graph
.identifiers()[identifierNumber
];
3637 Structure
* structure
= nullptr;
3638 WatchpointSet
* watchpoints
= nullptr;
3641 ConcurrentJITLocker
locker(m_inlineStackTop
->m_profiledBlock
->m_lock
);
3642 if (resolveType
== GlobalVar
|| resolveType
== GlobalVarWithVarInjectionChecks
|| resolveType
== LocalClosureVar
)
3643 watchpoints
= currentInstruction
[5].u
.watchpointSet
;
3645 structure
= currentInstruction
[5].u
.structure
.get();
3646 operand
= reinterpret_cast<uintptr_t>(currentInstruction
[6].u
.pointer
);
3649 JSGlobalObject
* globalObject
= m_inlineStackTop
->m_codeBlock
->globalObject();
3651 switch (resolveType
) {
3652 case GlobalProperty
:
3653 case GlobalPropertyWithVarInjectionChecks
: {
3654 PutByIdStatus status
;
3656 status
= PutByIdStatus::computeFor(globalObject
, structure
, uid
, false);
3658 status
= PutByIdStatus(PutByIdStatus::TakesSlowPath
);
3659 if (status
.numVariants() != 1
3660 || status
[0].kind() != PutByIdVariant::Replace
3661 || status
[0].structure().size() != 1) {
3662 addToGraph(PutById
, OpInfo(identifierNumber
), get(VirtualRegister(scope
)), get(VirtualRegister(value
)));
3665 ASSERT(status
[0].structure().onlyStructure() == structure
);
3666 Node
* base
= cellConstantWithStructureCheck(globalObject
, structure
);
3667 addToGraph(Phantom
, get(VirtualRegister(scope
)));
3668 handlePutByOffset(base
, identifierNumber
, static_cast<PropertyOffset
>(operand
), get(VirtualRegister(value
)));
3669 // Keep scope alive until after put.
3670 addToGraph(Phantom
, get(VirtualRegister(scope
)));
3674 case GlobalVarWithVarInjectionChecks
: {
3676 SymbolTableEntry entry
= globalObject
->symbolTable()->get(uid
);
3677 ASSERT_UNUSED(entry
, watchpoints
== entry
.watchpointSet());
3679 Node
* valueNode
= get(VirtualRegister(value
));
3680 addToGraph(PutGlobalVar
, OpInfo(operand
), weakJSConstant(globalObject
), valueNode
);
3681 if (watchpoints
&& watchpoints
->state() != IsInvalidated
) {
3682 // Must happen after the store. See comment for GetGlobalVar.
3683 addToGraph(NotifyWrite
, OpInfo(watchpoints
));
3685 // Keep scope alive until after put.
3686 addToGraph(Phantom
, get(VirtualRegister(scope
)));
3689 case LocalClosureVar
:
3691 case ClosureVarWithVarInjectionChecks
: {
3692 Node
* scopeNode
= get(VirtualRegister(scope
));
3693 Node
* valueNode
= get(VirtualRegister(value
));
3695 addToGraph(PutClosureVar
, OpInfo(operand
), scopeNode
, valueNode
);
3697 if (watchpoints
&& watchpoints
->state() != IsInvalidated
) {
3698 // Must happen after the store. See comment for GetGlobalVar.
3699 addToGraph(NotifyWrite
, OpInfo(watchpoints
));
3704 RELEASE_ASSERT_NOT_REACHED();
3707 NEXT_OPCODE(op_put_to_scope
);
3710 case op_loop_hint
: {
3711 // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
3712 // OSR can only happen at basic block boundaries. Assert that these two statements
3714 RELEASE_ASSERT(m_currentIndex
== blockBegin
);
3716 // We never do OSR into an inlined code block. That could not happen, since OSR
3717 // looks up the code block that is the replacement for the baseline JIT code
3718 // block. Hence, machine code block = true code block = not inline code block.
3719 if (!m_inlineStackTop
->m_caller
)
3720 m_currentBlock
->isOSRTarget
= true;
3722 addToGraph(LoopHint
);
3724 if (m_vm
->watchdog
&& m_vm
->watchdog
->isEnabled())
3725 addToGraph(CheckWatchdogTimer
);
3727 NEXT_OPCODE(op_loop_hint
);
3730 case op_create_lexical_environment
: {
3731 FrozenValue
* symbolTable
= m_graph
.freezeStrong(m_graph
.symbolTableFor(currentNodeOrigin().semantic
));
3732 Node
* lexicalEnvironment
= addToGraph(CreateActivation
, OpInfo(symbolTable
), get(VirtualRegister(currentInstruction
[2].u
.operand
)));
3733 set(VirtualRegister(currentInstruction
[1].u
.operand
), lexicalEnvironment
);
3734 set(VirtualRegister(currentInstruction
[2].u
.operand
), lexicalEnvironment
);
3735 NEXT_OPCODE(op_create_lexical_environment
);
3738 case op_get_scope
: {
3739 // Help the later stages a bit by doing some small constant folding here. Note that this
3740 // only helps for the first basic block. It's extremely important not to constant fold
3741 // loads from the scope register later, as that would prevent the DFG from tracking the
3742 // bytecode-level liveness of the scope register.
3743 Node
* callee
= get(VirtualRegister(JSStack::Callee
));
3745 if (JSFunction
* function
= callee
->dynamicCastConstant
<JSFunction
*>())
3746 result
= weakJSConstant(function
->scope());
3748 result
= addToGraph(GetScope
, callee
);
3749 set(VirtualRegister(currentInstruction
[1].u
.operand
), result
);
3750 NEXT_OPCODE(op_get_scope
);
3753 case op_create_direct_arguments
: {
3754 noticeArgumentsUse();
3755 Node
* createArguments
= addToGraph(CreateDirectArguments
);
3756 set(VirtualRegister(currentInstruction
[1].u
.operand
), createArguments
);
3757 NEXT_OPCODE(op_create_direct_arguments
);
3760 case op_create_scoped_arguments
: {
3761 noticeArgumentsUse();
3762 Node
* createArguments
= addToGraph(CreateScopedArguments
, get(VirtualRegister(currentInstruction
[2].u
.operand
)));
3763 set(VirtualRegister(currentInstruction
[1].u
.operand
), createArguments
);
3764 NEXT_OPCODE(op_create_scoped_arguments
);
3767 case op_create_out_of_band_arguments
: {
3768 noticeArgumentsUse();
3769 Node
* createArguments
= addToGraph(CreateClonedArguments
);
3770 set(VirtualRegister(currentInstruction
[1].u
.operand
), createArguments
);
3771 NEXT_OPCODE(op_create_out_of_band_arguments
);
3774 case op_get_from_arguments
: {
3775 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3778 OpInfo(currentInstruction
[3].u
.operand
),
3779 OpInfo(getPrediction()),
3780 get(VirtualRegister(currentInstruction
[2].u
.operand
))));
3781 NEXT_OPCODE(op_get_from_arguments
);
3784 case op_put_to_arguments
: {
3787 OpInfo(currentInstruction
[2].u
.operand
),
3788 get(VirtualRegister(currentInstruction
[1].u
.operand
)),
3789 get(VirtualRegister(currentInstruction
[3].u
.operand
)));
3790 NEXT_OPCODE(op_put_to_arguments
);
3794 FunctionExecutable
* decl
= m_inlineStackTop
->m_profiledBlock
->functionDecl(currentInstruction
[3].u
.operand
);
3795 FrozenValue
* frozen
= m_graph
.freezeStrong(decl
);
3796 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3797 addToGraph(NewFunction
, OpInfo(frozen
), get(VirtualRegister(currentInstruction
[2].u
.operand
))));
3798 NEXT_OPCODE(op_new_func
);
3801 case op_new_func_exp
: {
3802 FunctionExecutable
* expr
= m_inlineStackTop
->m_profiledBlock
->functionExpr(currentInstruction
[3].u
.operand
);
3803 FrozenValue
* frozen
= m_graph
.freezeStrong(expr
);
3804 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3805 addToGraph(NewFunction
, OpInfo(frozen
), get(VirtualRegister(currentInstruction
[2].u
.operand
))));
3806 NEXT_OPCODE(op_new_func_exp
);
3810 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3811 addToGraph(TypeOf
, get(VirtualRegister(currentInstruction
[2].u
.operand
))));
3812 NEXT_OPCODE(op_typeof
);
3815 case op_to_number
: {
3816 Node
* node
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3817 addToGraph(Phantom
, Edge(node
, NumberUse
));
3818 set(VirtualRegister(currentInstruction
[1].u
.operand
), node
);
3819 NEXT_OPCODE(op_to_number
);
3822 case op_to_string
: {
3823 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3824 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(ToString
, value
));
3825 NEXT_OPCODE(op_to_string
);
3829 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3830 addToGraph(In
, get(VirtualRegister(currentInstruction
[2].u
.operand
)), get(VirtualRegister(currentInstruction
[3].u
.operand
))));
3834 case op_get_enumerable_length
: {
3835 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(GetEnumerableLength
,
3836 get(VirtualRegister(currentInstruction
[2].u
.operand
))));
3837 NEXT_OPCODE(op_get_enumerable_length
);
3840 case op_has_generic_property
: {
3841 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(HasGenericProperty
,
3842 get(VirtualRegister(currentInstruction
[2].u
.operand
)),
3843 get(VirtualRegister(currentInstruction
[3].u
.operand
))));
3844 NEXT_OPCODE(op_has_generic_property
);
3847 case op_has_structure_property
: {
3848 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(HasStructureProperty
,
3849 get(VirtualRegister(currentInstruction
[2].u
.operand
)),
3850 get(VirtualRegister(currentInstruction
[3].u
.operand
)),
3851 get(VirtualRegister(currentInstruction
[4].u
.operand
))));
3852 NEXT_OPCODE(op_has_structure_property
);
3855 case op_has_indexed_property
: {
3856 Node
* base
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3857 ArrayMode arrayMode
= getArrayMode(currentInstruction
[4].u
.arrayProfile
, Array::Read
);
3858 Node
* property
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3859 Node
* hasIterableProperty
= addToGraph(HasIndexedProperty
, OpInfo(arrayMode
.asWord()), base
, property
);
3860 set(VirtualRegister(currentInstruction
[1].u
.operand
), hasIterableProperty
);
3861 NEXT_OPCODE(op_has_indexed_property
);
3864 case op_get_direct_pname
: {
3865 SpeculatedType prediction
= getPredictionWithoutOSRExit();
3867 Node
* base
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3868 Node
* property
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
3869 Node
* index
= get(VirtualRegister(currentInstruction
[4].u
.operand
));
3870 Node
* enumerator
= get(VirtualRegister(currentInstruction
[5].u
.operand
));
3872 addVarArgChild(base
);
3873 addVarArgChild(property
);
3874 addVarArgChild(index
);
3875 addVarArgChild(enumerator
);
3876 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3877 addToGraph(Node::VarArg
, GetDirectPname
, OpInfo(0), OpInfo(prediction
)));
3879 NEXT_OPCODE(op_get_direct_pname
);
3882 case op_get_property_enumerator
: {
3883 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(GetPropertyEnumerator
,
3884 get(VirtualRegister(currentInstruction
[2].u
.operand
))));
3885 NEXT_OPCODE(op_get_property_enumerator
);
3888 case op_enumerator_structure_pname
: {
3889 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(GetEnumeratorStructurePname
,
3890 get(VirtualRegister(currentInstruction
[2].u
.operand
)),
3891 get(VirtualRegister(currentInstruction
[3].u
.operand
))));
3892 NEXT_OPCODE(op_enumerator_structure_pname
);
3895 case op_enumerator_generic_pname
: {
3896 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(GetEnumeratorGenericPname
,
3897 get(VirtualRegister(currentInstruction
[2].u
.operand
)),
3898 get(VirtualRegister(currentInstruction
[3].u
.operand
))));
3899 NEXT_OPCODE(op_enumerator_generic_pname
);
3902 case op_to_index_string
: {
3903 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(ToIndexString
,
3904 get(VirtualRegister(currentInstruction
[2].u
.operand
))));
3905 NEXT_OPCODE(op_to_index_string
);
3909 // Parse failed! This should not happen because the capabilities checker
3910 // should have caught it.
3911 RELEASE_ASSERT_NOT_REACHED();
3917 void ByteCodeParser::linkBlock(BasicBlock
* block
, Vector
<BasicBlock
*>& possibleTargets
)
3919 ASSERT(!block
->isLinked
);
3920 ASSERT(!block
->isEmpty());
3921 Node
* node
= block
->terminal();
3922 ASSERT(node
->isTerminal());
3924 switch (node
->op()) {
3926 node
->targetBlock() = blockForBytecodeOffset(possibleTargets
, node
->targetBytecodeOffsetDuringParsing());
3930 BranchData
* data
= node
->branchData();
3931 data
->taken
.block
= blockForBytecodeOffset(possibleTargets
, data
->takenBytecodeIndex());
3932 data
->notTaken
.block
= blockForBytecodeOffset(possibleTargets
, data
->notTakenBytecodeIndex());
3937 SwitchData
* data
= node
->switchData();
3938 for (unsigned i
= node
->switchData()->cases
.size(); i
--;)
3939 data
->cases
[i
].target
.block
= blockForBytecodeOffset(possibleTargets
, data
->cases
[i
].target
.bytecodeIndex());
3940 data
->fallThrough
.block
= blockForBytecodeOffset(possibleTargets
, data
->fallThrough
.bytecodeIndex());
3949 dataLog("Marking ", RawPointer(block
), " as linked (actually did linking)\n");
3953 void ByteCodeParser::linkBlocks(Vector
<UnlinkedBlock
>& unlinkedBlocks
, Vector
<BasicBlock
*>& possibleTargets
)
3955 for (size_t i
= 0; i
< unlinkedBlocks
.size(); ++i
) {
3957 dataLog("Attempting to link ", RawPointer(unlinkedBlocks
[i
].m_block
), "\n");
3958 if (unlinkedBlocks
[i
].m_needsNormalLinking
) {
3960 dataLog(" Does need normal linking.\n");
3961 linkBlock(unlinkedBlocks
[i
].m_block
, possibleTargets
);
3962 unlinkedBlocks
[i
].m_needsNormalLinking
= false;
3967 void ByteCodeParser::buildOperandMapsIfNecessary()
3969 if (m_haveBuiltOperandMaps
)
3972 for (size_t i
= 0; i
< m_codeBlock
->numberOfIdentifiers(); ++i
)
3973 m_identifierMap
.add(m_codeBlock
->identifier(i
).impl(), i
);
3975 m_haveBuiltOperandMaps
= true;
3978 ByteCodeParser::InlineStackEntry::InlineStackEntry(
3979 ByteCodeParser
* byteCodeParser
,
3980 CodeBlock
* codeBlock
,
3981 CodeBlock
* profiledBlock
,
3982 BasicBlock
* callsiteBlockHead
,
3983 JSFunction
* callee
, // Null if this is a closure call.
3984 VirtualRegister returnValueVR
,
3985 VirtualRegister inlineCallFrameStart
,
3986 int argumentCountIncludingThis
,
3987 InlineCallFrame::Kind kind
)
3988 : m_byteCodeParser(byteCodeParser
)
3989 , m_codeBlock(codeBlock
)
3990 , m_profiledBlock(profiledBlock
)
3991 , m_callsiteBlockHead(callsiteBlockHead
)
3992 , m_returnValue(returnValueVR
)
3993 , m_didReturn(false)
3994 , m_didEarlyReturn(false)
3995 , m_caller(byteCodeParser
->m_inlineStackTop
)
3998 ConcurrentJITLocker
locker(m_profiledBlock
->m_lock
);
3999 m_lazyOperands
.initialize(locker
, m_profiledBlock
->lazyOperandValueProfiles());
4000 m_exitProfile
.initialize(locker
, profiledBlock
->exitProfile());
4002 // We do this while holding the lock because we want to encourage StructureStubInfo's
4003 // to be potentially added to operations and because the profiled block could be in the
4004 // middle of LLInt->JIT tier-up in which case we would be adding the info's right now.
4005 if (m_profiledBlock
->hasBaselineJITProfiling()) {
4006 m_profiledBlock
->getStubInfoMap(locker
, m_stubInfos
);
4007 m_profiledBlock
->getCallLinkInfoMap(locker
, m_callLinkInfos
);
4011 m_argumentPositions
.resize(argumentCountIncludingThis
);
4012 for (int i
= 0; i
< argumentCountIncludingThis
; ++i
) {
4013 byteCodeParser
->m_graph
.m_argumentPositions
.append(ArgumentPosition());
4014 ArgumentPosition
* argumentPosition
= &byteCodeParser
->m_graph
.m_argumentPositions
.last();
4015 m_argumentPositions
[i
] = argumentPosition
;
4020 ASSERT(codeBlock
!= byteCodeParser
->m_codeBlock
);
4021 ASSERT(inlineCallFrameStart
.isValid());
4022 ASSERT(callsiteBlockHead
);
4024 m_inlineCallFrame
= byteCodeParser
->m_graph
.m_plan
.inlineCallFrames
->add();
4025 byteCodeParser
->m_graph
.freeze(codeBlock
->ownerExecutable());
4026 initializeLazyWriteBarrierForInlineCallFrameExecutable(
4027 byteCodeParser
->m_graph
.m_plan
.writeBarriers
,
4028 m_inlineCallFrame
->executable
,
4029 byteCodeParser
->m_codeBlock
,
4031 byteCodeParser
->m_codeBlock
->ownerExecutable(),
4032 codeBlock
->ownerExecutable());
4033 m_inlineCallFrame
->setStackOffset(inlineCallFrameStart
.offset() - JSStack::CallFrameHeaderSize
);
4035 m_inlineCallFrame
->calleeRecovery
= ValueRecovery::constant(callee
);
4036 m_inlineCallFrame
->isClosureCall
= false;
4038 m_inlineCallFrame
->isClosureCall
= true;
4039 m_inlineCallFrame
->caller
= byteCodeParser
->currentCodeOrigin();
4040 m_inlineCallFrame
->arguments
.resizeToFit(argumentCountIncludingThis
); // Set the number of arguments including this, but don't configure the value recoveries, yet.
4041 m_inlineCallFrame
->kind
= kind
;
4043 byteCodeParser
->buildOperandMapsIfNecessary();
4045 m_identifierRemap
.resize(codeBlock
->numberOfIdentifiers());
4046 m_constantBufferRemap
.resize(codeBlock
->numberOfConstantBuffers());
4047 m_switchRemap
.resize(codeBlock
->numberOfSwitchJumpTables());
4049 for (size_t i
= 0; i
< codeBlock
->numberOfIdentifiers(); ++i
) {
4050 UniquedStringImpl
* rep
= codeBlock
->identifier(i
).impl();
4051 BorrowedIdentifierMap::AddResult result
= byteCodeParser
->m_identifierMap
.add(rep
, byteCodeParser
->m_graph
.identifiers().numberOfIdentifiers());
4052 if (result
.isNewEntry
)
4053 byteCodeParser
->m_graph
.identifiers().addLazily(rep
);
4054 m_identifierRemap
[i
] = result
.iterator
->value
;
4056 for (unsigned i
= 0; i
< codeBlock
->numberOfConstantBuffers(); ++i
) {
4057 // If we inline the same code block multiple times, we don't want to needlessly
4058 // duplicate its constant buffers.
4059 HashMap
<ConstantBufferKey
, unsigned>::iterator iter
=
4060 byteCodeParser
->m_constantBufferCache
.find(ConstantBufferKey(codeBlock
, i
));
4061 if (iter
!= byteCodeParser
->m_constantBufferCache
.end()) {
4062 m_constantBufferRemap
[i
] = iter
->value
;
4065 Vector
<JSValue
>& buffer
= codeBlock
->constantBufferAsVector(i
);
4066 unsigned newIndex
= byteCodeParser
->m_codeBlock
->addConstantBuffer(buffer
);
4067 m_constantBufferRemap
[i
] = newIndex
;
4068 byteCodeParser
->m_constantBufferCache
.add(ConstantBufferKey(codeBlock
, i
), newIndex
);
4070 for (unsigned i
= 0; i
< codeBlock
->numberOfSwitchJumpTables(); ++i
) {
4071 m_switchRemap
[i
] = byteCodeParser
->m_codeBlock
->numberOfSwitchJumpTables();
4072 byteCodeParser
->m_codeBlock
->addSwitchJumpTable() = codeBlock
->switchJumpTable(i
);
4074 m_callsiteBlockHeadNeedsLinking
= true;
4076 // Machine code block case.
4077 ASSERT(codeBlock
== byteCodeParser
->m_codeBlock
);
4079 ASSERT(!returnValueVR
.isValid());
4080 ASSERT(!inlineCallFrameStart
.isValid());
4081 ASSERT(!callsiteBlockHead
);
4083 m_inlineCallFrame
= 0;
4085 m_identifierRemap
.resize(codeBlock
->numberOfIdentifiers());
4086 m_constantBufferRemap
.resize(codeBlock
->numberOfConstantBuffers());
4087 m_switchRemap
.resize(codeBlock
->numberOfSwitchJumpTables());
4088 for (size_t i
= 0; i
< codeBlock
->numberOfIdentifiers(); ++i
)
4089 m_identifierRemap
[i
] = i
;
4090 for (size_t i
= 0; i
< codeBlock
->numberOfConstantBuffers(); ++i
)
4091 m_constantBufferRemap
[i
] = i
;
4092 for (size_t i
= 0; i
< codeBlock
->numberOfSwitchJumpTables(); ++i
)
4093 m_switchRemap
[i
] = i
;
4094 m_callsiteBlockHeadNeedsLinking
= false;
4097 byteCodeParser
->m_inlineStackTop
= this;
4100 void ByteCodeParser::parseCodeBlock()
4104 CodeBlock
* codeBlock
= m_inlineStackTop
->m_codeBlock
;
4106 if (m_graph
.compilation()) {
4107 m_graph
.compilation()->addProfiledBytecodes(
4108 *m_vm
->m_perBytecodeProfiler
, m_inlineStackTop
->m_profiledBlock
);
4111 if (UNLIKELY(Options::dumpSourceAtDFGTime())) {
4112 Vector
<DeferredSourceDump
>& deferredSourceDump
= m_graph
.m_plan
.callback
->ensureDeferredSourceDump();
4113 if (inlineCallFrame()) {
4114 DeferredSourceDump
dump(codeBlock
->baselineVersion(), m_codeBlock
, JITCode::DFGJIT
, inlineCallFrame()->caller
);
4115 deferredSourceDump
.append(dump
);
4117 deferredSourceDump
.append(DeferredSourceDump(codeBlock
->baselineVersion()));
4120 if (Options::dumpBytecodeAtDFGTime()) {
4121 dataLog("Parsing ", *codeBlock
);
4122 if (inlineCallFrame()) {
4124 " for inlining at ", CodeBlockWithJITType(m_codeBlock
, JITCode::DFGJIT
),
4125 " ", inlineCallFrame()->caller
);
4128 ": needsActivation = ", codeBlock
->needsActivation(),
4129 ", isStrictMode = ", codeBlock
->ownerExecutable()->isStrictMode(), "\n");
4130 codeBlock
->baselineVersion()->dumpBytecode();
4133 Vector
<unsigned, 32> jumpTargets
;
4134 computePreciseJumpTargets(codeBlock
, jumpTargets
);
4135 if (Options::dumpBytecodeAtDFGTime()) {
4136 dataLog("Jump targets: ");
4138 for (unsigned i
= 0; i
< jumpTargets
.size(); ++i
)
4139 dataLog(comma
, jumpTargets
[i
]);
4143 for (unsigned jumpTargetIndex
= 0; jumpTargetIndex
<= jumpTargets
.size(); ++jumpTargetIndex
) {
4144 // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
4145 unsigned limit
= jumpTargetIndex
< jumpTargets
.size() ? jumpTargets
[jumpTargetIndex
] : codeBlock
->instructions().size();
4146 ASSERT(m_currentIndex
< limit
);
4148 // Loop until we reach the current limit (i.e. next jump target).
4150 if (!m_currentBlock
) {
4151 // Check if we can use the last block.
4152 if (m_graph
.numBlocks() && m_graph
.lastBlock()->isEmpty()) {
4153 // This must be a block belonging to us.
4154 ASSERT(m_inlineStackTop
->m_unlinkedBlocks
.last().m_block
== m_graph
.lastBlock());
4155 // Either the block is linkable or it isn't. If it's linkable then it's the last
4156 // block in the blockLinkingTargets list. If it's not then the last block will
4157 // have a lower bytecode index that the one we're about to give to this block.
4158 if (m_inlineStackTop
->m_blockLinkingTargets
.isEmpty() || m_inlineStackTop
->m_blockLinkingTargets
.last()->bytecodeBegin
!= m_currentIndex
) {
4159 // Make the block linkable.
4160 ASSERT(m_inlineStackTop
->m_blockLinkingTargets
.isEmpty() || m_inlineStackTop
->m_blockLinkingTargets
.last()->bytecodeBegin
< m_currentIndex
);
4161 m_inlineStackTop
->m_blockLinkingTargets
.append(m_graph
.lastBlock());
4163 // Change its bytecode begin and continue.
4164 m_currentBlock
= m_graph
.lastBlock();
4165 m_currentBlock
->bytecodeBegin
= m_currentIndex
;
4167 RefPtr
<BasicBlock
> block
= adoptRef(new BasicBlock(m_currentIndex
, m_numArguments
, m_numLocals
, PNaN
));
4168 m_currentBlock
= block
.get();
4169 // This assertion checks two things:
4170 // 1) If the bytecodeBegin is greater than currentIndex, then something has gone
4171 // horribly wrong. So, we're probably generating incorrect code.
4172 // 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do
4173 // a peephole coalescing of this block in the if statement above. So, we're
4174 // generating suboptimal code and leaving more work for the CFG simplifier.
4175 if (!m_inlineStackTop
->m_unlinkedBlocks
.isEmpty()) {
4176 unsigned lastBegin
=
4177 m_inlineStackTop
->m_unlinkedBlocks
.last().m_block
->bytecodeBegin
;
4179 lastBegin
, lastBegin
== UINT_MAX
|| lastBegin
< m_currentIndex
);
4181 m_inlineStackTop
->m_unlinkedBlocks
.append(UnlinkedBlock(block
.get()));
4182 m_inlineStackTop
->m_blockLinkingTargets
.append(block
.get());
4183 // The first block is definitely an OSR target.
4184 if (!m_graph
.numBlocks())
4185 block
->isOSRTarget
= true;
4186 m_graph
.appendBlock(block
);
4187 prepareToParseBlock();
4191 bool shouldContinueParsing
= parseBlock(limit
);
4193 // We should not have gone beyond the limit.
4194 ASSERT(m_currentIndex
<= limit
);
4196 // We should have planted a terminal, or we just gave up because
4197 // we realized that the jump target information is imprecise, or we
4198 // are at the end of an inline function, or we realized that we
4199 // should stop parsing because there was a return in the first
4201 ASSERT(m_currentBlock
->isEmpty() || m_currentBlock
->terminal() || (m_currentIndex
== codeBlock
->instructions().size() && inlineCallFrame()) || !shouldContinueParsing
);
4203 if (!shouldContinueParsing
) {
4204 if (Options::verboseDFGByteCodeParsing())
4205 dataLog("Done parsing ", *codeBlock
, "\n");
4210 } while (m_currentIndex
< limit
);
4213 // Should have reached the end of the instructions.
4214 ASSERT(m_currentIndex
== codeBlock
->instructions().size());
4216 if (Options::verboseDFGByteCodeParsing())
4217 dataLog("Done parsing ", *codeBlock
, " (fell off end)\n");
4220 bool ByteCodeParser::parse()
4222 // Set during construction.
4223 ASSERT(!m_currentIndex
);
4225 if (Options::verboseDFGByteCodeParsing())
4226 dataLog("Parsing ", *m_codeBlock
, "\n");
4228 m_dfgCodeBlock
= m_graph
.m_plan
.profiledDFGCodeBlock
.get();
4229 if (isFTL(m_graph
.m_plan
.mode
) && m_dfgCodeBlock
4230 && Options::enablePolyvariantDevirtualization()) {
4231 if (Options::enablePolyvariantCallInlining())
4232 CallLinkStatus::computeDFGStatuses(m_dfgCodeBlock
, m_callContextMap
);
4233 if (Options::enablePolyvariantByIdInlining())
4234 m_dfgCodeBlock
->getStubInfoMap(m_dfgStubInfos
);
4237 InlineStackEntry
inlineStackEntry(
4238 this, m_codeBlock
, m_profiledBlock
, 0, 0, VirtualRegister(), VirtualRegister(),
4239 m_codeBlock
->numParameters(), InlineCallFrame::Call
);
4243 linkBlocks(inlineStackEntry
.m_unlinkedBlocks
, inlineStackEntry
.m_blockLinkingTargets
);
4244 m_graph
.determineReachability();
4245 m_graph
.killUnreachableBlocks();
4247 for (BlockIndex blockIndex
= m_graph
.numBlocks(); blockIndex
--;) {
4248 BasicBlock
* block
= m_graph
.block(blockIndex
);
4251 ASSERT(block
->variablesAtHead
.numberOfLocals() == m_graph
.block(0)->variablesAtHead
.numberOfLocals());
4252 ASSERT(block
->variablesAtHead
.numberOfArguments() == m_graph
.block(0)->variablesAtHead
.numberOfArguments());
4253 ASSERT(block
->variablesAtTail
.numberOfLocals() == m_graph
.block(0)->variablesAtHead
.numberOfLocals());
4254 ASSERT(block
->variablesAtTail
.numberOfArguments() == m_graph
.block(0)->variablesAtHead
.numberOfArguments());
4257 m_graph
.m_localVars
= m_numLocals
;
4258 m_graph
.m_parameterSlots
= m_parameterSlots
;
4263 bool parse(Graph
& graph
)
4265 SamplingRegion
samplingRegion("DFG Parsing");
4266 return ByteCodeParser(graph
).parse();
4269 } } // namespace JSC::DFG