2 * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include "DFGByteCodeParser.h"
31 #include "ArrayConstructor.h"
32 #include "CallLinkStatus.h"
33 #include "CodeBlock.h"
34 #include "CodeBlockWithJITType.h"
35 #include "DFGArrayMode.h"
36 #include "DFGCapabilities.h"
37 #include "DFGJITCode.h"
38 #include "GetByIdStatus.h"
40 #include "JSActivation.h"
41 #include "JSCInlines.h"
42 #include "PreciseJumpTargets.h"
43 #include "PutByIdStatus.h"
44 #include "StackAlignment.h"
45 #include "StringConstructor.h"
46 #include <wtf/CommaPrinter.h>
47 #include <wtf/HashMap.h>
48 #include <wtf/MathExtras.h>
49 #include <wtf/StdLibExtras.h>
51 namespace JSC
{ namespace DFG
{
53 class ConstantBufferKey
{
61 ConstantBufferKey(WTF::HashTableDeletedValueType
)
67 ConstantBufferKey(CodeBlock
* codeBlock
, unsigned index
)
68 : m_codeBlock(codeBlock
)
73 bool operator==(const ConstantBufferKey
& other
) const
75 return m_codeBlock
== other
.m_codeBlock
76 && m_index
== other
.m_index
;
81 return WTF::PtrHash
<CodeBlock
*>::hash(m_codeBlock
) ^ m_index
;
84 bool isHashTableDeletedValue() const
86 return !m_codeBlock
&& m_index
;
89 CodeBlock
* codeBlock() const { return m_codeBlock
; }
90 unsigned index() const { return m_index
; }
93 CodeBlock
* m_codeBlock
;
97 struct ConstantBufferKeyHash
{
98 static unsigned hash(const ConstantBufferKey
& key
) { return key
.hash(); }
99 static bool equal(const ConstantBufferKey
& a
, const ConstantBufferKey
& b
)
104 static const bool safeToCompareToEmptyOrDeleted
= true;
107 } } // namespace JSC::DFG
111 template<typename T
> struct DefaultHash
;
112 template<> struct DefaultHash
<JSC::DFG::ConstantBufferKey
> {
113 typedef JSC::DFG::ConstantBufferKeyHash Hash
;
116 template<typename T
> struct HashTraits
;
117 template<> struct HashTraits
<JSC::DFG::ConstantBufferKey
> : SimpleClassHashTraits
<JSC::DFG::ConstantBufferKey
> { };
121 namespace JSC
{ namespace DFG
{
123 // === ByteCodeParser ===
125 // This class is used to compile the dataflow graph from a CodeBlock.
126 class ByteCodeParser
{
128 ByteCodeParser(Graph
& graph
)
130 , m_codeBlock(graph
.m_codeBlock
)
131 , m_profiledBlock(graph
.m_profiledBlock
)
135 , m_constantUndefined(UINT_MAX
)
136 , m_constantNull(UINT_MAX
)
137 , m_constantNaN(UINT_MAX
)
138 , m_constant1(UINT_MAX
)
139 , m_constants(m_codeBlock
->numberOfConstantRegisters())
140 , m_numArguments(m_codeBlock
->numParameters())
141 , m_numLocals(m_codeBlock
->m_numCalleeRegisters
)
142 , m_parameterSlots(0)
143 , m_numPassedVarArgs(0)
144 , m_inlineStackTop(0)
145 , m_haveBuiltOperandMaps(false)
146 , m_emptyJSValueIndex(UINT_MAX
)
147 , m_currentInstruction(0)
149 ASSERT(m_profiledBlock
);
152 // Parse a full CodeBlock of bytecode.
156 struct InlineStackEntry
;
158 // Just parse from m_currentIndex to the end of the current CodeBlock.
159 void parseCodeBlock();
161 void ensureLocals(unsigned newNumLocals
)
163 if (newNumLocals
<= m_numLocals
)
165 m_numLocals
= newNumLocals
;
166 for (size_t i
= 0; i
< m_graph
.numBlocks(); ++i
)
167 m_graph
.block(i
)->ensureLocals(newNumLocals
);
170 // Helper for min and max.
171 bool handleMinMax(int resultOperand
, NodeType op
, int registerOffset
, int argumentCountIncludingThis
);
173 // Handle calls. This resolves issues surrounding inlining and intrinsics.
174 void handleCall(int result
, NodeType op
, CodeSpecializationKind
, unsigned instructionSize
, int callee
, int argCount
, int registerOffset
);
175 void handleCall(Instruction
* pc
, NodeType op
, CodeSpecializationKind
);
176 void emitFunctionChecks(const CallLinkStatus
&, Node
* callTarget
, int registerOffset
, CodeSpecializationKind
);
177 void emitArgumentPhantoms(int registerOffset
, int argumentCountIncludingThis
, CodeSpecializationKind
);
178 // Handle inlining. Return true if it succeeded, false if we need to plant a call.
179 bool handleInlining(Node
* callTargetNode
, int resultOperand
, const CallLinkStatus
&, int registerOffset
, int argumentCountIncludingThis
, unsigned nextOffset
, CodeSpecializationKind
);
180 // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call.
181 bool handleIntrinsic(int resultOperand
, Intrinsic
, int registerOffset
, int argumentCountIncludingThis
, SpeculatedType prediction
);
182 bool handleTypedArrayConstructor(int resultOperand
, InternalFunction
*, int registerOffset
, int argumentCountIncludingThis
, TypedArrayType
);
183 bool handleConstantInternalFunction(int resultOperand
, InternalFunction
*, int registerOffset
, int argumentCountIncludingThis
, SpeculatedType prediction
, CodeSpecializationKind
);
184 Node
* handlePutByOffset(Node
* base
, unsigned identifier
, PropertyOffset
, Node
* value
);
185 Node
* handleGetByOffset(SpeculatedType
, Node
* base
, unsigned identifierNumber
, PropertyOffset
);
186 void handleGetByOffset(
187 int destinationOperand
, SpeculatedType
, Node
* base
, unsigned identifierNumber
,
190 int destinationOperand
, SpeculatedType
, Node
* base
, unsigned identifierNumber
,
191 const GetByIdStatus
&);
193 Node
* base
, unsigned identifierNumber
, Node
* value
, const PutByIdStatus
&, bool isDirect
);
195 Node
* base
, unsigned identifierNumber
, Node
* value
, const PutByIdStatus
&,
197 Node
* emitPrototypeChecks(Structure
*, IntendedStructureChain
*);
199 Node
* getScope(bool skipTop
, unsigned skipCount
);
201 // Prepare to parse a block.
202 void prepareToParseBlock();
203 // Parse a single basic block of bytecode instructions.
204 bool parseBlock(unsigned limit
);
205 // Link block successors.
206 void linkBlock(BasicBlock
*, Vector
<BasicBlock
*>& possibleTargets
);
207 void linkBlocks(Vector
<UnlinkedBlock
>& unlinkedBlocks
, Vector
<BasicBlock
*>& possibleTargets
);
209 VariableAccessData
* newVariableAccessData(VirtualRegister operand
, bool isCaptured
)
211 ASSERT(!operand
.isConstant());
213 m_graph
.m_variableAccessData
.append(VariableAccessData(operand
, isCaptured
));
214 return &m_graph
.m_variableAccessData
.last();
217 // Get/Set the operands/result of a bytecode instruction.
218 Node
* getDirect(VirtualRegister operand
)
220 // Is this a constant?
221 if (operand
.isConstant()) {
222 unsigned constant
= operand
.toConstantIndex();
223 ASSERT(constant
< m_constants
.size());
224 return getJSConstant(constant
);
227 // Is this an argument?
228 if (operand
.isArgument())
229 return getArgument(operand
);
232 return getLocal(operand
);
235 Node
* get(VirtualRegister operand
)
237 if (inlineCallFrame()) {
238 if (!inlineCallFrame()->isClosureCall
) {
239 JSFunction
* callee
= inlineCallFrame()->calleeConstant();
240 if (operand
.offset() == JSStack::Callee
)
241 return cellConstant(callee
);
242 if (operand
.offset() == JSStack::ScopeChain
)
243 return cellConstant(callee
->scope());
245 } else if (operand
.offset() == JSStack::Callee
)
246 return addToGraph(GetCallee
);
247 else if (operand
.offset() == JSStack::ScopeChain
)
248 return addToGraph(GetMyScope
);
250 return getDirect(m_inlineStackTop
->remapOperand(operand
));
254 // A normal set which follows a two-phase commit that spans code origins. During
255 // the current code origin it issues a MovHint, and at the start of the next
256 // code origin there will be a SetLocal. If the local needs flushing, the second
257 // SetLocal will be preceded with a Flush.
260 // A set where the SetLocal happens immediately and there is still a Flush. This
261 // is relevant when assigning to a local in tricky situations for the delayed
262 // SetLocal logic but where we know that we have not performed any side effects
263 // within this code origin. This is a safe replacement for NormalSet anytime we
264 // know that we have not yet performed side effects in this code origin.
265 ImmediateSetWithFlush
,
267 // A set where the SetLocal happens immediately and we do not Flush it even if
268 // this is a local that is marked as needing it. This is relevant when
269 // initializing locals at the top of a function.
272 Node
* setDirect(VirtualRegister operand
, Node
* value
, SetMode setMode
= NormalSet
)
274 addToGraph(MovHint
, OpInfo(operand
.offset()), value
);
276 DelayedSetLocal delayed
= DelayedSetLocal(operand
, value
);
278 if (setMode
== NormalSet
) {
279 m_setLocalQueue
.append(delayed
);
283 return delayed
.execute(this, setMode
);
286 Node
* set(VirtualRegister operand
, Node
* value
, SetMode setMode
= NormalSet
)
288 return setDirect(m_inlineStackTop
->remapOperand(operand
), value
, setMode
);
291 Node
* injectLazyOperandSpeculation(Node
* node
)
293 ASSERT(node
->op() == GetLocal
);
294 ASSERT(node
->origin
.semantic
.bytecodeIndex
== m_currentIndex
);
295 ConcurrentJITLocker
locker(m_inlineStackTop
->m_profiledBlock
->m_lock
);
296 LazyOperandValueProfileKey
key(m_currentIndex
, node
->local());
297 SpeculatedType prediction
= m_inlineStackTop
->m_lazyOperands
.prediction(locker
, key
);
298 node
->variableAccessData()->predict(prediction
);
302 // Used in implementing get/set, above, where the operand is a local variable.
303 Node
* getLocal(VirtualRegister operand
)
305 unsigned local
= operand
.toLocal();
307 if (local
< m_localWatchpoints
.size()) {
308 if (VariableWatchpointSet
* set
= m_localWatchpoints
[local
]) {
309 if (JSValue value
= set
->inferredValue()) {
310 addToGraph(FunctionReentryWatchpoint
, OpInfo(m_codeBlock
->symbolTable()));
311 addToGraph(VariableWatchpoint
, OpInfo(set
));
312 // Note: this is very special from an OSR exit standpoint. We wouldn't be
313 // able to do this for most locals, but it works here because we're dealing
314 // with a flushed local. For most locals we would need to issue a GetLocal
315 // here and ensure that we have uses in DFG IR wherever there would have
316 // been uses in bytecode. Clearly this optimization does not do this. But
317 // that's fine, because we don't need to track liveness for captured
318 // locals, and this optimization only kicks in for captured locals.
319 return inferredConstant(value
);
324 Node
* node
= m_currentBlock
->variablesAtTail
.local(local
);
325 bool isCaptured
= m_codeBlock
->isCaptured(operand
, inlineCallFrame());
327 // This has two goals: 1) link together variable access datas, and 2)
328 // try to avoid creating redundant GetLocals. (1) is required for
329 // correctness - no other phase will ensure that block-local variable
330 // access data unification is done correctly. (2) is purely opportunistic
331 // and is meant as an compile-time optimization only.
333 VariableAccessData
* variable
;
336 variable
= node
->variableAccessData();
337 variable
->mergeIsCaptured(isCaptured
);
340 switch (node
->op()) {
344 return node
->child1().node();
350 variable
= newVariableAccessData(operand
, isCaptured
);
352 node
= injectLazyOperandSpeculation(addToGraph(GetLocal
, OpInfo(variable
)));
353 m_currentBlock
->variablesAtTail
.local(local
) = node
;
357 Node
* setLocal(VirtualRegister operand
, Node
* value
, SetMode setMode
= NormalSet
)
359 unsigned local
= operand
.toLocal();
360 bool isCaptured
= m_codeBlock
->isCaptured(operand
, inlineCallFrame());
362 if (setMode
!= ImmediateNakedSet
) {
363 ArgumentPosition
* argumentPosition
= findArgumentPositionForLocal(operand
);
364 if (isCaptured
|| argumentPosition
)
365 flushDirect(operand
, argumentPosition
);
368 VariableAccessData
* variableAccessData
= newVariableAccessData(operand
, isCaptured
);
369 variableAccessData
->mergeStructureCheckHoistingFailed(
370 m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)
371 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCacheWatchpoint
));
372 variableAccessData
->mergeCheckArrayHoistingFailed(
373 m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadIndexingType
));
374 Node
* node
= addToGraph(SetLocal
, OpInfo(variableAccessData
), value
);
375 m_currentBlock
->variablesAtTail
.local(local
) = node
;
379 // Used in implementing get/set, above, where the operand is an argument.
380 Node
* getArgument(VirtualRegister operand
)
382 unsigned argument
= operand
.toArgument();
383 ASSERT(argument
< m_numArguments
);
385 Node
* node
= m_currentBlock
->variablesAtTail
.argument(argument
);
386 bool isCaptured
= m_codeBlock
->isCaptured(operand
);
388 VariableAccessData
* variable
;
391 variable
= node
->variableAccessData();
392 variable
->mergeIsCaptured(isCaptured
);
394 switch (node
->op()) {
398 return node
->child1().node();
403 variable
= newVariableAccessData(operand
, isCaptured
);
405 node
= injectLazyOperandSpeculation(addToGraph(GetLocal
, OpInfo(variable
)));
406 m_currentBlock
->variablesAtTail
.argument(argument
) = node
;
409 Node
* setArgument(VirtualRegister operand
, Node
* value
, SetMode setMode
= NormalSet
)
411 unsigned argument
= operand
.toArgument();
412 ASSERT(argument
< m_numArguments
);
414 bool isCaptured
= m_codeBlock
->isCaptured(operand
);
416 VariableAccessData
* variableAccessData
= newVariableAccessData(operand
, isCaptured
);
418 // Always flush arguments, except for 'this'. If 'this' is created by us,
419 // then make sure that it's never unboxed.
421 if (setMode
!= ImmediateNakedSet
)
422 flushDirect(operand
);
423 } else if (m_codeBlock
->specializationKind() == CodeForConstruct
)
424 variableAccessData
->mergeShouldNeverUnbox(true);
426 variableAccessData
->mergeStructureCheckHoistingFailed(
427 m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)
428 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCacheWatchpoint
));
429 variableAccessData
->mergeCheckArrayHoistingFailed(
430 m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadIndexingType
));
431 Node
* node
= addToGraph(SetLocal
, OpInfo(variableAccessData
), value
);
432 m_currentBlock
->variablesAtTail
.argument(argument
) = node
;
436 ArgumentPosition
* findArgumentPositionForArgument(int argument
)
438 InlineStackEntry
* stack
= m_inlineStackTop
;
439 while (stack
->m_inlineCallFrame
)
440 stack
= stack
->m_caller
;
441 return stack
->m_argumentPositions
[argument
];
444 ArgumentPosition
* findArgumentPositionForLocal(VirtualRegister operand
)
446 for (InlineStackEntry
* stack
= m_inlineStackTop
; ; stack
= stack
->m_caller
) {
447 InlineCallFrame
* inlineCallFrame
= stack
->m_inlineCallFrame
;
448 if (!inlineCallFrame
)
450 if (operand
.offset() < static_cast<int>(inlineCallFrame
->stackOffset
+ JSStack::CallFrameHeaderSize
))
452 if (operand
.offset() == inlineCallFrame
->stackOffset
+ CallFrame::thisArgumentOffset())
454 if (operand
.offset() >= static_cast<int>(inlineCallFrame
->stackOffset
+ CallFrame::thisArgumentOffset() + inlineCallFrame
->arguments
.size()))
456 int argument
= VirtualRegister(operand
.offset() - inlineCallFrame
->stackOffset
).toArgument();
457 return stack
->m_argumentPositions
[argument
];
462 ArgumentPosition
* findArgumentPosition(VirtualRegister operand
)
464 if (operand
.isArgument())
465 return findArgumentPositionForArgument(operand
.toArgument());
466 return findArgumentPositionForLocal(operand
);
469 void addConstant(JSValue value
)
471 unsigned constantIndex
= m_codeBlock
->addConstantLazily();
472 initializeLazyWriteBarrierForConstant(
473 m_graph
.m_plan
.writeBarriers
,
474 m_codeBlock
->constants()[constantIndex
],
477 m_codeBlock
->ownerExecutable(),
481 void flush(VirtualRegister operand
)
483 flushDirect(m_inlineStackTop
->remapOperand(operand
));
486 void flushDirect(VirtualRegister operand
)
488 flushDirect(operand
, findArgumentPosition(operand
));
491 void flushDirect(VirtualRegister operand
, ArgumentPosition
* argumentPosition
)
493 bool isCaptured
= m_codeBlock
->isCaptured(operand
, inlineCallFrame());
495 ASSERT(!operand
.isConstant());
497 Node
* node
= m_currentBlock
->variablesAtTail
.operand(operand
);
499 VariableAccessData
* variable
;
502 variable
= node
->variableAccessData();
503 variable
->mergeIsCaptured(isCaptured
);
505 variable
= newVariableAccessData(operand
, isCaptured
);
507 node
= addToGraph(Flush
, OpInfo(variable
));
508 m_currentBlock
->variablesAtTail
.operand(operand
) = node
;
509 if (argumentPosition
)
510 argumentPosition
->addVariable(variable
);
513 void flush(InlineStackEntry
* inlineStackEntry
)
516 if (InlineCallFrame
* inlineCallFrame
= inlineStackEntry
->m_inlineCallFrame
) {
517 numArguments
= inlineCallFrame
->arguments
.size();
518 if (inlineCallFrame
->isClosureCall
) {
519 flushDirect(inlineStackEntry
->remapOperand(VirtualRegister(JSStack::Callee
)));
520 flushDirect(inlineStackEntry
->remapOperand(VirtualRegister(JSStack::ScopeChain
)));
523 numArguments
= inlineStackEntry
->m_codeBlock
->numParameters();
524 for (unsigned argument
= numArguments
; argument
-- > 1;)
525 flushDirect(inlineStackEntry
->remapOperand(virtualRegisterForArgument(argument
)));
526 for (int local
= 0; local
< inlineStackEntry
->m_codeBlock
->m_numVars
; ++local
) {
527 if (!inlineStackEntry
->m_codeBlock
->isCaptured(virtualRegisterForLocal(local
)))
529 flushDirect(inlineStackEntry
->remapOperand(virtualRegisterForLocal(local
)));
533 void flushForTerminal()
535 for (InlineStackEntry
* inlineStackEntry
= m_inlineStackTop
; inlineStackEntry
; inlineStackEntry
= inlineStackEntry
->m_caller
)
536 flush(inlineStackEntry
);
539 void flushForReturn()
541 flush(m_inlineStackTop
);
544 void flushIfTerminal(SwitchData
& data
)
546 if (data
.fallThrough
.bytecodeIndex() > m_currentIndex
)
549 for (unsigned i
= data
.cases
.size(); i
--;) {
550 if (data
.cases
[i
].target
.bytecodeIndex() > m_currentIndex
)
557 // NOTE: Only use this to construct constants that arise from non-speculative
558 // constant folding. I.e. creating constants using this if we had constant
559 // field inference would be a bad idea, since the bytecode parser's folding
560 // doesn't handle liveness preservation.
561 Node
* getJSConstantForValue(JSValue constantValue
)
563 unsigned constantIndex
;
564 if (!m_codeBlock
->findConstant(constantValue
, constantIndex
)) {
565 addConstant(constantValue
);
566 m_constants
.append(ConstantRecord());
569 ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters());
571 return getJSConstant(constantIndex
);
574 Node
* getJSConstant(unsigned constant
)
576 Node
* node
= m_constants
[constant
].asJSValue
;
580 Node
* result
= addToGraph(JSConstant
, OpInfo(constant
));
581 m_constants
[constant
].asJSValue
= result
;
585 // Helper functions to get/set the this value.
588 return get(m_inlineStackTop
->m_codeBlock
->thisRegister());
591 void setThis(Node
* value
)
593 set(m_inlineStackTop
->m_codeBlock
->thisRegister(), value
);
596 // Convenience methods for checking nodes for constants.
597 bool isJSConstant(Node
* node
)
599 return node
->op() == JSConstant
;
601 bool isInt32Constant(Node
* node
)
603 return isJSConstant(node
) && valueOfJSConstant(node
).isInt32();
605 // Convenience methods for getting constant values.
606 JSValue
valueOfJSConstant(Node
* node
)
608 ASSERT(isJSConstant(node
));
609 return m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ node
->constantNumber());
611 int32_t valueOfInt32Constant(Node
* node
)
613 ASSERT(isInt32Constant(node
));
614 return valueOfJSConstant(node
).asInt32();
617 // This method returns a JSConstant with the value 'undefined'.
618 Node
* constantUndefined()
620 // Has m_constantUndefined been set up yet?
621 if (m_constantUndefined
== UINT_MAX
) {
622 // Search the constant pool for undefined, if we find it, we can just reuse this!
623 unsigned numberOfConstants
= m_codeBlock
->numberOfConstantRegisters();
624 for (m_constantUndefined
= 0; m_constantUndefined
< numberOfConstants
; ++m_constantUndefined
) {
625 JSValue testMe
= m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantUndefined
);
626 if (testMe
.isUndefined())
627 return getJSConstant(m_constantUndefined
);
630 // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants.
631 ASSERT(m_constants
.size() == numberOfConstants
);
632 addConstant(jsUndefined());
633 m_constants
.append(ConstantRecord());
634 ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters());
637 // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'.
638 ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantUndefined
).isUndefined());
639 return getJSConstant(m_constantUndefined
);
642 // This method returns a JSConstant with the value 'null'.
645 // Has m_constantNull been set up yet?
646 if (m_constantNull
== UINT_MAX
) {
647 // Search the constant pool for null, if we find it, we can just reuse this!
648 unsigned numberOfConstants
= m_codeBlock
->numberOfConstantRegisters();
649 for (m_constantNull
= 0; m_constantNull
< numberOfConstants
; ++m_constantNull
) {
650 JSValue testMe
= m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantNull
);
652 return getJSConstant(m_constantNull
);
655 // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants.
656 ASSERT(m_constants
.size() == numberOfConstants
);
657 addConstant(jsNull());
658 m_constants
.append(ConstantRecord());
659 ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters());
662 // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'.
663 ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantNull
).isNull());
664 return getJSConstant(m_constantNull
);
667 // This method returns a DoubleConstant with the value 1.
670 // Has m_constant1 been set up yet?
671 if (m_constant1
== UINT_MAX
) {
672 // Search the constant pool for the value 1, if we find it, we can just reuse this!
673 unsigned numberOfConstants
= m_codeBlock
->numberOfConstantRegisters();
674 for (m_constant1
= 0; m_constant1
< numberOfConstants
; ++m_constant1
) {
675 JSValue testMe
= m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constant1
);
676 if (testMe
.isInt32() && testMe
.asInt32() == 1)
677 return getJSConstant(m_constant1
);
680 // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants.
681 ASSERT(m_constants
.size() == numberOfConstants
);
682 addConstant(jsNumber(1));
683 m_constants
.append(ConstantRecord());
684 ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters());
687 // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1.
688 ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constant1
).isInt32());
689 ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constant1
).asInt32() == 1);
690 return getJSConstant(m_constant1
);
693 // This method returns a DoubleConstant with the value NaN.
696 JSValue nan
= jsNaN();
698 // Has m_constantNaN been set up yet?
699 if (m_constantNaN
== UINT_MAX
) {
700 // Search the constant pool for the value NaN, if we find it, we can just reuse this!
701 unsigned numberOfConstants
= m_codeBlock
->numberOfConstantRegisters();
702 for (m_constantNaN
= 0; m_constantNaN
< numberOfConstants
; ++m_constantNaN
) {
703 JSValue testMe
= m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantNaN
);
704 if (JSValue::encode(testMe
) == JSValue::encode(nan
))
705 return getJSConstant(m_constantNaN
);
708 // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants.
709 ASSERT(m_constants
.size() == numberOfConstants
);
711 m_constants
.append(ConstantRecord());
712 ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters());
715 // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan.
716 ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantNaN
).isDouble());
717 ASSERT(std::isnan(m_codeBlock
->getConstant(FirstConstantRegisterIndex
+ m_constantNaN
).asDouble()));
718 return getJSConstant(m_constantNaN
);
721 Node
* cellConstant(JSCell
* cell
)
723 HashMap
<JSCell
*, Node
*>::AddResult result
= m_cellConstantNodes
.add(cell
, nullptr);
724 if (result
.isNewEntry
) {
725 ASSERT(!Heap::isZombified(cell
));
726 result
.iterator
->value
= addToGraph(WeakJSConstant
, OpInfo(cell
));
729 return result
.iterator
->value
;
732 Node
* inferredConstant(JSValue value
)
735 return cellConstant(value
.asCell());
736 return getJSConstantForValue(value
);
739 InlineCallFrame
* inlineCallFrame()
741 return m_inlineStackTop
->m_inlineCallFrame
;
744 CodeOrigin
currentCodeOrigin()
746 return CodeOrigin(m_currentIndex
, inlineCallFrame());
749 BranchData
* branchData(unsigned taken
, unsigned notTaken
)
751 // We assume that branches originating from bytecode always have a fall-through. We
752 // use this assumption to avoid checking for the creation of terminal blocks.
753 ASSERT((taken
> m_currentIndex
) || (notTaken
> m_currentIndex
));
754 BranchData
* data
= m_graph
.m_branchData
.add();
755 *data
= BranchData::withBytecodeIndices(taken
, notTaken
);
759 Node
* addToGraph(NodeType op
, Node
* child1
= 0, Node
* child2
= 0, Node
* child3
= 0)
761 Node
* result
= m_graph
.addNode(
762 SpecNone
, op
, NodeOrigin(currentCodeOrigin()), Edge(child1
), Edge(child2
),
765 m_currentBlock
->append(result
);
768 Node
* addToGraph(NodeType op
, Edge child1
, Edge child2
= Edge(), Edge child3
= Edge())
770 Node
* result
= m_graph
.addNode(
771 SpecNone
, op
, NodeOrigin(currentCodeOrigin()), child1
, child2
, child3
);
773 m_currentBlock
->append(result
);
776 Node
* addToGraph(NodeType op
, OpInfo info
, Node
* child1
= 0, Node
* child2
= 0, Node
* child3
= 0)
778 Node
* result
= m_graph
.addNode(
779 SpecNone
, op
, NodeOrigin(currentCodeOrigin()), info
, Edge(child1
), Edge(child2
),
782 m_currentBlock
->append(result
);
785 Node
* addToGraph(NodeType op
, OpInfo info1
, OpInfo info2
, Node
* child1
= 0, Node
* child2
= 0, Node
* child3
= 0)
787 Node
* result
= m_graph
.addNode(
788 SpecNone
, op
, NodeOrigin(currentCodeOrigin()), info1
, info2
,
789 Edge(child1
), Edge(child2
), Edge(child3
));
791 m_currentBlock
->append(result
);
795 Node
* addToGraph(Node::VarArgTag
, NodeType op
, OpInfo info1
, OpInfo info2
)
797 Node
* result
= m_graph
.addNode(
798 SpecNone
, Node::VarArg
, op
, NodeOrigin(currentCodeOrigin()), info1
, info2
,
799 m_graph
.m_varArgChildren
.size() - m_numPassedVarArgs
, m_numPassedVarArgs
);
801 m_currentBlock
->append(result
);
803 m_numPassedVarArgs
= 0;
808 void addVarArgChild(Node
* child
)
810 m_graph
.m_varArgChildren
.append(Edge(child
));
811 m_numPassedVarArgs
++;
814 Node
* addCall(int result
, NodeType op
, int callee
, int argCount
, int registerOffset
)
816 SpeculatedType prediction
= getPrediction();
818 addVarArgChild(get(VirtualRegister(callee
)));
819 size_t parameterSlots
= JSStack::CallFrameHeaderSize
- JSStack::CallerFrameAndPCSize
+ argCount
;
820 if (parameterSlots
> m_parameterSlots
)
821 m_parameterSlots
= parameterSlots
;
823 int dummyThisArgument
= op
== Call
? 0 : 1;
824 for (int i
= 0 + dummyThisArgument
; i
< argCount
; ++i
)
825 addVarArgChild(get(virtualRegisterForArgument(i
, registerOffset
)));
827 Node
* call
= addToGraph(Node::VarArg
, op
, OpInfo(0), OpInfo(prediction
));
828 set(VirtualRegister(result
), call
);
832 Node
* cellConstantWithStructureCheck(JSCell
* object
, Structure
* structure
)
834 Node
* objectNode
= cellConstant(object
);
835 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(structure
)), objectNode
);
839 Node
* cellConstantWithStructureCheck(JSCell
* object
)
841 return cellConstantWithStructureCheck(object
, object
->structure());
844 SpeculatedType
getPredictionWithoutOSRExit(unsigned bytecodeIndex
)
846 ConcurrentJITLocker
locker(m_inlineStackTop
->m_profiledBlock
->m_lock
);
847 return m_inlineStackTop
->m_profiledBlock
->valueProfilePredictionForBytecodeOffset(locker
, bytecodeIndex
);
850 SpeculatedType
getPrediction(unsigned bytecodeIndex
)
852 SpeculatedType prediction
= getPredictionWithoutOSRExit(bytecodeIndex
);
854 if (prediction
== SpecNone
) {
855 // We have no information about what values this node generates. Give up
856 // on executing this code, since we're likely to do more damage than good.
857 addToGraph(ForceOSRExit
);
863 SpeculatedType
getPredictionWithoutOSRExit()
865 return getPredictionWithoutOSRExit(m_currentIndex
);
868 SpeculatedType
getPrediction()
870 return getPrediction(m_currentIndex
);
873 ArrayMode
getArrayMode(ArrayProfile
* profile
, Array::Action action
)
875 ConcurrentJITLocker
locker(m_inlineStackTop
->m_profiledBlock
->m_lock
);
876 profile
->computeUpdatedPrediction(locker
, m_inlineStackTop
->m_profiledBlock
);
877 return ArrayMode::fromObserved(locker
, profile
, action
, false);
880 ArrayMode
getArrayMode(ArrayProfile
* profile
)
882 return getArrayMode(profile
, Array::Read
);
885 ArrayMode
getArrayModeConsideringSlowPath(ArrayProfile
* profile
, Array::Action action
)
887 ConcurrentJITLocker
locker(m_inlineStackTop
->m_profiledBlock
->m_lock
);
889 profile
->computeUpdatedPrediction(locker
, m_inlineStackTop
->m_profiledBlock
);
892 m_inlineStackTop
->m_profiledBlock
->likelyToTakeSlowCase(m_currentIndex
)
893 || profile
->outOfBounds(locker
);
895 ArrayMode result
= ArrayMode::fromObserved(locker
, profile
, action
, makeSafe
);
900 Node
* makeSafe(Node
* node
)
902 if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
))
903 node
->mergeFlags(NodeMayOverflowInDFG
);
904 if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
))
905 node
->mergeFlags(NodeMayNegZeroInDFG
);
907 if (!isX86() && node
->op() == ArithMod
)
910 if (!m_inlineStackTop
->m_profiledBlock
->likelyToTakeSlowCase(m_currentIndex
))
913 switch (node
->op()) {
918 case ArithMod
: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double.
919 node
->mergeFlags(NodeMayOverflowInBaseline
);
923 // Currently we can't tell the difference between a negation overflowing
924 // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow
925 // path then we assume that it did both of those things.
926 node
->mergeFlags(NodeMayOverflowInBaseline
);
927 node
->mergeFlags(NodeMayNegZeroInBaseline
);
931 // FIXME: We should detect cases where we only overflowed but never created
933 // https://bugs.webkit.org/show_bug.cgi?id=132470
934 if (m_inlineStackTop
->m_profiledBlock
->likelyToTakeDeepestSlowCase(m_currentIndex
)
935 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
))
936 node
->mergeFlags(NodeMayOverflowInBaseline
| NodeMayNegZeroInBaseline
);
937 else if (m_inlineStackTop
->m_profiledBlock
->likelyToTakeSlowCase(m_currentIndex
)
938 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
))
939 node
->mergeFlags(NodeMayNegZeroInBaseline
);
943 RELEASE_ASSERT_NOT_REACHED();
950 Node
* makeDivSafe(Node
* node
)
952 ASSERT(node
->op() == ArithDiv
);
954 if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
))
955 node
->mergeFlags(NodeMayOverflowInDFG
);
956 if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
))
957 node
->mergeFlags(NodeMayNegZeroInDFG
);
959 // The main slow case counter for op_div in the old JIT counts only when
960 // the operands are not numbers. We don't care about that since we already
961 // have speculations in place that take care of that separately. We only
962 // care about when the outcome of the division is not an integer, which
963 // is what the special fast case counter tells us.
965 if (!m_inlineStackTop
->m_profiledBlock
->couldTakeSpecialFastCase(m_currentIndex
))
968 // FIXME: It might be possible to make this more granular.
969 node
->mergeFlags(NodeMayOverflowInBaseline
| NodeMayNegZeroInBaseline
);
974 bool structureChainIsStillValid(bool direct
, Structure
* previousStructure
, StructureChain
* chain
)
979 if (!previousStructure
->storedPrototype().isNull() && previousStructure
->storedPrototype().asCell()->structure() != chain
->head()->get())
982 for (WriteBarrier
<Structure
>* it
= chain
->head(); *it
; ++it
) {
983 if (!(*it
)->storedPrototype().isNull() && (*it
)->storedPrototype().asCell()->structure() != it
[1].get())
990 void buildOperandMapsIfNecessary();
993 CodeBlock
* m_codeBlock
;
994 CodeBlock
* m_profiledBlock
;
997 // The current block being generated.
998 BasicBlock
* m_currentBlock
;
999 // The bytecode index of the current instruction being generated.
1000 unsigned m_currentIndex
;
1002 // We use these values during code generation, and to avoid the need for
1003 // special handling we make sure they are available as constants in the
1004 // CodeBlock's constant pool. These variables are initialized to
1005 // UINT_MAX, and lazily updated to hold an index into the CodeBlock's
1006 // constant pool, as necessary.
1007 unsigned m_constantUndefined
;
1008 unsigned m_constantNull
;
1009 unsigned m_constantNaN
;
1010 unsigned m_constant1
;
1011 HashMap
<JSCell
*, unsigned> m_cellConstants
;
1012 HashMap
<JSCell
*, Node
*> m_cellConstantNodes
;
1014 // A constant in the constant pool may be represented by more than one
1015 // node in the graph, depending on the context in which it is being used.
1016 struct ConstantRecord
{
1029 // Track the index of the node whose result is the current value for every
1030 // register value in the bytecode - argument, local, and temporary.
1031 Vector
<ConstantRecord
, 16> m_constants
;
1033 // The number of arguments passed to the function.
1034 unsigned m_numArguments
;
1035 // The number of locals (vars + temporaries) used in the function.
1036 unsigned m_numLocals
;
1037 // The number of slots (in units of sizeof(Register)) that we need to
1038 // preallocate for arguments to outgoing calls from this frame. This
1039 // number includes the CallFrame slots that we initialize for the callee
1040 // (but not the callee-initialized CallerFrame and ReturnPC slots).
1041 // This number is 0 if and only if this function is a leaf.
1042 unsigned m_parameterSlots
;
1043 // The number of var args passed to the next var arg node.
1044 unsigned m_numPassedVarArgs
;
1046 HashMap
<ConstantBufferKey
, unsigned> m_constantBufferCache
;
1048 Vector
<VariableWatchpointSet
*, 16> m_localWatchpoints
;
1050 struct InlineStackEntry
{
1051 ByteCodeParser
* m_byteCodeParser
;
1053 CodeBlock
* m_codeBlock
;
1054 CodeBlock
* m_profiledBlock
;
1055 InlineCallFrame
* m_inlineCallFrame
;
1057 ScriptExecutable
* executable() { return m_codeBlock
->ownerExecutable(); }
1059 QueryableExitProfile m_exitProfile
;
1061 // Remapping of identifier and constant numbers from the code block being
1062 // inlined (inline callee) to the code block that we're inlining into
1063 // (the machine code block, which is the transitive, though not necessarily
1065 Vector
<unsigned> m_identifierRemap
;
1066 Vector
<unsigned> m_constantRemap
;
1067 Vector
<unsigned> m_constantBufferRemap
;
1068 Vector
<unsigned> m_switchRemap
;
1070 // Blocks introduced by this code block, which need successor linking.
1071 // May include up to one basic block that includes the continuation after
1072 // the callsite in the caller. These must be appended in the order that they
1073 // are created, but their bytecodeBegin values need not be in order as they
1075 Vector
<UnlinkedBlock
> m_unlinkedBlocks
;
1077 // Potential block linking targets. Must be sorted by bytecodeBegin, and
1078 // cannot have two blocks that have the same bytecodeBegin. For this very
1079 // reason, this is not equivalent to
1080 Vector
<BasicBlock
*> m_blockLinkingTargets
;
1082 // If the callsite's basic block was split into two, then this will be
1083 // the head of the callsite block. It needs its successors linked to the
1084 // m_unlinkedBlocks, but not the other way around: there's no way for
1085 // any blocks in m_unlinkedBlocks to jump back into this block.
1086 BasicBlock
* m_callsiteBlockHead
;
1088 // Does the callsite block head need linking? This is typically true
1089 // but will be false for the machine code block's inline stack entry
1090 // (since that one is not inlined) and for cases where an inline callee
1091 // did the linking for us.
1092 bool m_callsiteBlockHeadNeedsLinking
;
1094 VirtualRegister m_returnValue
;
1096 // Speculations about variable types collected from the profiled code block,
1097 // which are based on OSR exit profiles that past DFG compilatins of this
1098 // code block had gathered.
1099 LazyOperandValueProfileParser m_lazyOperands
;
1101 CallLinkInfoMap m_callLinkInfos
;
1102 StubInfoMap m_stubInfos
;
1104 // Did we see any returns? We need to handle the (uncommon but necessary)
1105 // case where a procedure that does not return was inlined.
1108 // Did we have any early returns?
1109 bool m_didEarlyReturn
;
1111 // Pointers to the argument position trackers for this slice of code.
1112 Vector
<ArgumentPosition
*> m_argumentPositions
;
1114 InlineStackEntry
* m_caller
;
1119 CodeBlock
* profiledBlock
,
1120 BasicBlock
* callsiteBlockHead
,
1121 JSFunction
* callee
, // Null if this is a closure call.
1122 VirtualRegister returnValueVR
,
1123 VirtualRegister inlineCallFrameStart
,
1124 int argumentCountIncludingThis
,
1125 CodeSpecializationKind
);
1129 m_byteCodeParser
->m_inlineStackTop
= m_caller
;
1132 VirtualRegister
remapOperand(VirtualRegister operand
) const
1134 if (!m_inlineCallFrame
)
1137 if (operand
.isConstant()) {
1138 VirtualRegister result
= VirtualRegister(m_constantRemap
[operand
.toConstantIndex()]);
1139 ASSERT(result
.isConstant());
1143 return VirtualRegister(operand
.offset() + m_inlineCallFrame
->stackOffset
);
1147 InlineStackEntry
* m_inlineStackTop
;
1149 struct DelayedSetLocal
{
1150 VirtualRegister m_operand
;
1153 DelayedSetLocal() { }
1154 DelayedSetLocal(VirtualRegister operand
, Node
* value
)
1155 : m_operand(operand
)
1160 Node
* execute(ByteCodeParser
* parser
, SetMode setMode
= NormalSet
)
1162 if (m_operand
.isArgument())
1163 return parser
->setArgument(m_operand
, m_value
, setMode
);
1164 return parser
->setLocal(m_operand
, m_value
, setMode
);
1168 Vector
<DelayedSetLocal
, 2> m_setLocalQueue
;
1170 // Have we built operand maps? We initialize them lazily, and only when doing
1172 bool m_haveBuiltOperandMaps
;
1173 // Mapping between identifier names and numbers.
1174 BorrowedIdentifierMap m_identifierMap
;
1175 // Mapping between values and constant numbers.
1176 JSValueMap m_jsValueMap
;
1177 // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible
1178 // work-around for the fact that JSValueMap can't handle "empty" values.
1179 unsigned m_emptyJSValueIndex
;
1181 CodeBlock
* m_dfgCodeBlock
;
1182 CallLinkStatus::ContextMap m_callContextMap
;
1183 StubInfoMap m_dfgStubInfos
;
1185 Instruction
* m_currentInstruction
;
1188 #define NEXT_OPCODE(name) \
1189 m_currentIndex += OPCODE_LENGTH(name); \
1192 #define LAST_OPCODE(name) \
1193 m_currentIndex += OPCODE_LENGTH(name); \
1194 return shouldContinueParsing
1196 void ByteCodeParser::handleCall(Instruction
* pc
, NodeType op
, CodeSpecializationKind kind
)
1198 ASSERT(OPCODE_LENGTH(op_call
) == OPCODE_LENGTH(op_construct
));
1200 pc
[1].u
.operand
, op
, kind
, OPCODE_LENGTH(op_call
),
1201 pc
[2].u
.operand
, pc
[3].u
.operand
, -pc
[4].u
.operand
);
1204 void ByteCodeParser::handleCall(
1205 int result
, NodeType op
, CodeSpecializationKind kind
, unsigned instructionSize
,
1206 int callee
, int argumentCountIncludingThis
, int registerOffset
)
1208 ASSERT(registerOffset
<= 0);
1210 Node
* callTarget
= get(VirtualRegister(callee
));
1212 CallLinkStatus callLinkStatus
;
1214 if (m_graph
.isConstant(callTarget
)) {
1215 callLinkStatus
= CallLinkStatus(
1216 m_graph
.valueOfJSConstant(callTarget
)).setIsProved(true);
1218 callLinkStatus
= CallLinkStatus::computeFor(
1219 m_inlineStackTop
->m_profiledBlock
, currentCodeOrigin(),
1220 m_inlineStackTop
->m_callLinkInfos
, m_callContextMap
);
1223 if (!callLinkStatus
.canOptimize()) {
1224 // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically
1225 // that we cannot optimize them.
1227 addCall(result
, op
, callee
, argumentCountIncludingThis
, registerOffset
);
1231 unsigned nextOffset
= m_currentIndex
+ instructionSize
;
1232 SpeculatedType prediction
= getPrediction();
1234 if (InternalFunction
* function
= callLinkStatus
.internalFunction()) {
1235 if (handleConstantInternalFunction(result
, function
, registerOffset
, argumentCountIncludingThis
, prediction
, kind
)) {
1236 // This phantoming has to be *after* the code for the intrinsic, to signify that
1237 // the inputs must be kept alive whatever exits the intrinsic may do.
1238 addToGraph(Phantom
, callTarget
);
1239 emitArgumentPhantoms(registerOffset
, argumentCountIncludingThis
, kind
);
1243 // Can only handle this using the generic call handler.
1244 addCall(result
, op
, callee
, argumentCountIncludingThis
, registerOffset
);
1248 Intrinsic intrinsic
= callLinkStatus
.intrinsicFor(kind
);
1249 if (intrinsic
!= NoIntrinsic
) {
1250 emitFunctionChecks(callLinkStatus
, callTarget
, registerOffset
, kind
);
1252 if (handleIntrinsic(result
, intrinsic
, registerOffset
, argumentCountIncludingThis
, prediction
)) {
1253 // This phantoming has to be *after* the code for the intrinsic, to signify that
1254 // the inputs must be kept alive whatever exits the intrinsic may do.
1255 addToGraph(Phantom
, callTarget
);
1256 emitArgumentPhantoms(registerOffset
, argumentCountIncludingThis
, kind
);
1257 if (m_graph
.compilation())
1258 m_graph
.compilation()->noticeInlinedCall();
1261 } else if (handleInlining(callTarget
, result
, callLinkStatus
, registerOffset
, argumentCountIncludingThis
, nextOffset
, kind
)) {
1262 if (m_graph
.compilation())
1263 m_graph
.compilation()->noticeInlinedCall();
1267 addCall(result
, op
, callee
, argumentCountIncludingThis
, registerOffset
);
1270 void ByteCodeParser::emitFunctionChecks(const CallLinkStatus
& callLinkStatus
, Node
* callTarget
, int registerOffset
, CodeSpecializationKind kind
)
1273 if (kind
== CodeForCall
)
1274 thisArgument
= get(virtualRegisterForArgument(0, registerOffset
));
1278 if (callLinkStatus
.isProved()) {
1279 addToGraph(Phantom
, callTarget
, thisArgument
);
1283 ASSERT(callLinkStatus
.canOptimize());
1285 if (JSFunction
* function
= callLinkStatus
.function())
1286 addToGraph(CheckFunction
, OpInfo(function
), callTarget
, thisArgument
);
1288 ASSERT(callLinkStatus
.structure());
1289 ASSERT(callLinkStatus
.executable());
1291 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(callLinkStatus
.structure())), callTarget
);
1292 addToGraph(CheckExecutable
, OpInfo(callLinkStatus
.executable()), callTarget
, thisArgument
);
1296 void ByteCodeParser::emitArgumentPhantoms(int registerOffset
, int argumentCountIncludingThis
, CodeSpecializationKind kind
)
1298 for (int i
= kind
== CodeForCall
? 0 : 1; i
< argumentCountIncludingThis
; ++i
)
1299 addToGraph(Phantom
, get(virtualRegisterForArgument(i
, registerOffset
)));
1302 bool ByteCodeParser::handleInlining(Node
* callTargetNode
, int resultOperand
, const CallLinkStatus
& callLinkStatus
, int registerOffset
, int argumentCountIncludingThis
, unsigned nextOffset
, CodeSpecializationKind kind
)
1304 static const bool verbose
= false;
1307 dataLog("Considering inlining ", callLinkStatus
, " into ", currentCodeOrigin(), "\n");
1309 // First, the really simple checks: do we have an actual JS function?
1310 if (!callLinkStatus
.executable()) {
1312 dataLog(" Failing because there is no executable.\n");
1315 if (callLinkStatus
.executable()->isHostFunction()) {
1317 dataLog(" Failing because it's a host function.\n");
1321 FunctionExecutable
* executable
= jsCast
<FunctionExecutable
*>(callLinkStatus
.executable());
1323 // Does the number of arguments we're passing match the arity of the target? We currently
1324 // inline only if the number of arguments passed is greater than or equal to the number
1325 // arguments expected.
1326 if (static_cast<int>(executable
->parameterCount()) + 1 > argumentCountIncludingThis
) {
1328 dataLog(" Failing because of arity mismatch.\n");
1332 // Do we have a code block, and does the code block's size match the heuristics/requirements for
1333 // being an inline candidate? We might not have a code block if code was thrown away or if we
1334 // simply hadn't actually made this call yet. We could still theoretically attempt to inline it
1335 // if we had a static proof of what was being called; this might happen for example if you call a
1336 // global function, where watchpointing gives us static information. Overall, it's a rare case
1337 // because we expect that any hot callees would have already been compiled.
1338 CodeBlock
* codeBlock
= executable
->baselineCodeBlockFor(kind
);
1341 dataLog(" Failing because no code block available.\n");
1344 CapabilityLevel capabilityLevel
= inlineFunctionForCapabilityLevel(
1345 codeBlock
, kind
, callLinkStatus
.isClosureCall());
1346 if (!canInline(capabilityLevel
)) {
1348 dataLog(" Failing because the function is not inlineable.\n");
1352 // Check if the caller is already too large. We do this check here because that's just
1353 // where we happen to also have the callee's code block, and we want that for the
1354 // purpose of unsetting SABI.
1355 if (!isSmallEnoughToInlineCodeInto(m_codeBlock
)) {
1356 codeBlock
->m_shouldAlwaysBeInlined
= false;
1358 dataLog(" Failing because the caller is too large.\n");
1362 // FIXME: this should be better at predicting how much bloat we will introduce by inlining
1364 // https://bugs.webkit.org/show_bug.cgi?id=127627
1366 // Have we exceeded inline stack depth, or are we trying to inline a recursive call to
1367 // too many levels? If either of these are detected, then don't inline. We adjust our
1368 // heuristics if we are dealing with a function that cannot otherwise be compiled.
1371 unsigned recursion
= 0;
1373 for (InlineStackEntry
* entry
= m_inlineStackTop
; entry
; entry
= entry
->m_caller
) {
1375 if (depth
>= Options::maximumInliningDepth()) {
1377 dataLog(" Failing because depth exceeded.\n");
1381 if (entry
->executable() == executable
) {
1383 if (recursion
>= Options::maximumInliningRecursion()) {
1385 dataLog(" Failing because recursion detected.\n");
1392 dataLog(" Committing to inlining.\n");
1394 // Now we know without a doubt that we are committed to inlining. So begin the process
1395 // by checking the callee (if necessary) and making sure that arguments and the callee
1397 emitFunctionChecks(callLinkStatus
, callTargetNode
, registerOffset
, kind
);
1399 // FIXME: Don't flush constants!
1401 int inlineCallFrameStart
= m_inlineStackTop
->remapOperand(VirtualRegister(registerOffset
)).offset() + JSStack::CallFrameHeaderSize
;
1404 VirtualRegister(inlineCallFrameStart
).toLocal() + 1 +
1405 JSStack::CallFrameHeaderSize
+ codeBlock
->m_numCalleeRegisters
);
1407 size_t argumentPositionStart
= m_graph
.m_argumentPositions
.size();
1409 InlineStackEntry
inlineStackEntry(
1410 this, codeBlock
, codeBlock
, m_graph
.lastBlock(), callLinkStatus
.function(),
1411 m_inlineStackTop
->remapOperand(VirtualRegister(resultOperand
)),
1412 (VirtualRegister
)inlineCallFrameStart
, argumentCountIncludingThis
, kind
);
1414 // This is where the actual inlining really happens.
1415 unsigned oldIndex
= m_currentIndex
;
1418 InlineVariableData inlineVariableData
;
1419 inlineVariableData
.inlineCallFrame
= m_inlineStackTop
->m_inlineCallFrame
;
1420 inlineVariableData
.argumentPositionStart
= argumentPositionStart
;
1421 inlineVariableData
.calleeVariable
= 0;
1424 m_inlineStackTop
->m_inlineCallFrame
->isClosureCall
1425 == callLinkStatus
.isClosureCall());
1426 if (callLinkStatus
.isClosureCall()) {
1427 VariableAccessData
* calleeVariable
=
1428 set(VirtualRegister(JSStack::Callee
), callTargetNode
, ImmediateNakedSet
)->variableAccessData();
1429 VariableAccessData
* scopeVariable
=
1430 set(VirtualRegister(JSStack::ScopeChain
), addToGraph(GetScope
, callTargetNode
), ImmediateNakedSet
)->variableAccessData();
1432 calleeVariable
->mergeShouldNeverUnbox(true);
1433 scopeVariable
->mergeShouldNeverUnbox(true);
1435 inlineVariableData
.calleeVariable
= calleeVariable
;
1438 m_graph
.m_inlineVariableData
.append(inlineVariableData
);
1442 m_currentIndex
= oldIndex
;
1444 // If the inlined code created some new basic blocks, then we have linking to do.
1445 if (inlineStackEntry
.m_callsiteBlockHead
!= m_graph
.lastBlock()) {
1447 ASSERT(!inlineStackEntry
.m_unlinkedBlocks
.isEmpty());
1448 if (inlineStackEntry
.m_callsiteBlockHeadNeedsLinking
)
1449 linkBlock(inlineStackEntry
.m_callsiteBlockHead
, inlineStackEntry
.m_blockLinkingTargets
);
1451 ASSERT(inlineStackEntry
.m_callsiteBlockHead
->isLinked
);
1453 // It's possible that the callsite block head is not owned by the caller.
1454 if (!inlineStackEntry
.m_caller
->m_unlinkedBlocks
.isEmpty()) {
1455 // It's definitely owned by the caller, because the caller created new blocks.
1456 // Assert that this all adds up.
1457 ASSERT(inlineStackEntry
.m_caller
->m_unlinkedBlocks
.last().m_block
== inlineStackEntry
.m_callsiteBlockHead
);
1458 ASSERT(inlineStackEntry
.m_caller
->m_unlinkedBlocks
.last().m_needsNormalLinking
);
1459 inlineStackEntry
.m_caller
->m_unlinkedBlocks
.last().m_needsNormalLinking
= false;
1461 // It's definitely not owned by the caller. Tell the caller that he does not
1462 // need to link his callsite block head, because we did it for him.
1463 ASSERT(inlineStackEntry
.m_caller
->m_callsiteBlockHeadNeedsLinking
);
1464 ASSERT(inlineStackEntry
.m_caller
->m_callsiteBlockHead
== inlineStackEntry
.m_callsiteBlockHead
);
1465 inlineStackEntry
.m_caller
->m_callsiteBlockHeadNeedsLinking
= false;
1468 linkBlocks(inlineStackEntry
.m_unlinkedBlocks
, inlineStackEntry
.m_blockLinkingTargets
);
1470 ASSERT(inlineStackEntry
.m_unlinkedBlocks
.isEmpty());
1472 BasicBlock
* lastBlock
= m_graph
.lastBlock();
1473 // If there was a return, but no early returns, then we're done. We allow parsing of
1474 // the caller to continue in whatever basic block we're in right now.
1475 if (!inlineStackEntry
.m_didEarlyReturn
&& inlineStackEntry
.m_didReturn
) {
1476 ASSERT(lastBlock
->isEmpty() || !lastBlock
->last()->isTerminal());
1478 // If we created new blocks then the last block needs linking, but in the
1479 // caller. It doesn't need to be linked to, but it needs outgoing links.
1480 if (!inlineStackEntry
.m_unlinkedBlocks
.isEmpty()) {
1481 // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter
1482 // for release builds because this block will never serve as a potential target
1483 // in the linker's binary search.
1484 lastBlock
->bytecodeBegin
= m_currentIndex
;
1485 m_inlineStackTop
->m_caller
->m_unlinkedBlocks
.append(UnlinkedBlock(m_graph
.lastBlock()));
1488 m_currentBlock
= m_graph
.lastBlock();
1492 // If we get to this point then all blocks must end in some sort of terminals.
1493 ASSERT(lastBlock
->last()->isTerminal());
1496 // Need to create a new basic block for the continuation at the caller.
1497 RefPtr
<BasicBlock
> block
= adoptRef(new BasicBlock(nextOffset
, m_numArguments
, m_numLocals
, PNaN
));
1499 // Link the early returns to the basic block we're about to create.
1500 for (size_t i
= 0; i
< inlineStackEntry
.m_unlinkedBlocks
.size(); ++i
) {
1501 if (!inlineStackEntry
.m_unlinkedBlocks
[i
].m_needsEarlyReturnLinking
)
1503 BasicBlock
* blockToLink
= inlineStackEntry
.m_unlinkedBlocks
[i
].m_block
;
1504 ASSERT(!blockToLink
->isLinked
);
1505 Node
* node
= blockToLink
->last();
1506 ASSERT(node
->op() == Jump
);
1507 ASSERT(!node
->targetBlock());
1508 node
->targetBlock() = block
.get();
1509 inlineStackEntry
.m_unlinkedBlocks
[i
].m_needsEarlyReturnLinking
= false;
1510 #if !ASSERT_DISABLED
1511 blockToLink
->isLinked
= true;
1515 m_currentBlock
= block
.get();
1516 ASSERT(m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.isEmpty() || m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.last()->bytecodeBegin
< nextOffset
);
1517 m_inlineStackTop
->m_caller
->m_unlinkedBlocks
.append(UnlinkedBlock(block
.get()));
1518 m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.append(block
.get());
1519 m_graph
.appendBlock(block
);
1520 prepareToParseBlock();
1522 // At this point we return and continue to generate code for the caller, but
1523 // in the new basic block.
1527 bool ByteCodeParser::handleMinMax(int resultOperand
, NodeType op
, int registerOffset
, int argumentCountIncludingThis
)
1529 if (argumentCountIncludingThis
== 1) { // Math.min()
1530 set(VirtualRegister(resultOperand
), constantNaN());
1534 if (argumentCountIncludingThis
== 2) { // Math.min(x)
1535 Node
* result
= get(VirtualRegister(virtualRegisterForArgument(1, registerOffset
)));
1536 addToGraph(Phantom
, Edge(result
, NumberUse
));
1537 set(VirtualRegister(resultOperand
), result
);
1541 if (argumentCountIncludingThis
== 3) { // Math.min(x, y)
1542 set(VirtualRegister(resultOperand
), addToGraph(op
, get(virtualRegisterForArgument(1, registerOffset
)), get(virtualRegisterForArgument(2, registerOffset
))));
1546 // Don't handle >=3 arguments for now.
1550 bool ByteCodeParser::handleIntrinsic(int resultOperand
, Intrinsic intrinsic
, int registerOffset
, int argumentCountIncludingThis
, SpeculatedType prediction
)
1552 switch (intrinsic
) {
1553 case AbsIntrinsic
: {
1554 if (argumentCountIncludingThis
== 1) { // Math.abs()
1555 set(VirtualRegister(resultOperand
), constantNaN());
1559 if (!MacroAssembler::supportsFloatingPointAbs())
1562 Node
* node
= addToGraph(ArithAbs
, get(virtualRegisterForArgument(1, registerOffset
)));
1563 if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
))
1564 node
->mergeFlags(NodeMayOverflowInDFG
);
1565 set(VirtualRegister(resultOperand
), node
);
1570 return handleMinMax(resultOperand
, ArithMin
, registerOffset
, argumentCountIncludingThis
);
1573 return handleMinMax(resultOperand
, ArithMax
, registerOffset
, argumentCountIncludingThis
);
1577 case SinIntrinsic
: {
1578 if (argumentCountIncludingThis
== 1) {
1579 set(VirtualRegister(resultOperand
), constantNaN());
1583 switch (intrinsic
) {
1585 if (!MacroAssembler::supportsFloatingPointSqrt())
1588 set(VirtualRegister(resultOperand
), addToGraph(ArithSqrt
, get(virtualRegisterForArgument(1, registerOffset
))));
1592 set(VirtualRegister(resultOperand
), addToGraph(ArithCos
, get(virtualRegisterForArgument(1, registerOffset
))));
1596 set(VirtualRegister(resultOperand
), addToGraph(ArithSin
, get(virtualRegisterForArgument(1, registerOffset
))));
1600 RELEASE_ASSERT_NOT_REACHED();
1605 case ArrayPushIntrinsic
: {
1606 if (argumentCountIncludingThis
!= 2)
1609 ArrayMode arrayMode
= getArrayMode(m_currentInstruction
[OPCODE_LENGTH(op_call
) - 2].u
.arrayProfile
);
1610 if (!arrayMode
.isJSArray())
1612 switch (arrayMode
.type()) {
1613 case Array::Undecided
:
1616 case Array::Contiguous
:
1617 case Array::ArrayStorage
: {
1618 Node
* arrayPush
= addToGraph(ArrayPush
, OpInfo(arrayMode
.asWord()), OpInfo(prediction
), get(virtualRegisterForArgument(0, registerOffset
)), get(virtualRegisterForArgument(1, registerOffset
)));
1619 set(VirtualRegister(resultOperand
), arrayPush
);
1629 case ArrayPopIntrinsic
: {
1630 if (argumentCountIncludingThis
!= 1)
1633 ArrayMode arrayMode
= getArrayMode(m_currentInstruction
[OPCODE_LENGTH(op_call
) - 2].u
.arrayProfile
);
1634 if (!arrayMode
.isJSArray())
1636 switch (arrayMode
.type()) {
1639 case Array::Contiguous
:
1640 case Array::ArrayStorage
: {
1641 Node
* arrayPop
= addToGraph(ArrayPop
, OpInfo(arrayMode
.asWord()), OpInfo(prediction
), get(virtualRegisterForArgument(0, registerOffset
)));
1642 set(VirtualRegister(resultOperand
), arrayPop
);
1651 case CharCodeAtIntrinsic
: {
1652 if (argumentCountIncludingThis
!= 2)
1655 VirtualRegister thisOperand
= virtualRegisterForArgument(0, registerOffset
);
1656 VirtualRegister indexOperand
= virtualRegisterForArgument(1, registerOffset
);
1657 Node
* charCode
= addToGraph(StringCharCodeAt
, OpInfo(ArrayMode(Array::String
).asWord()), get(thisOperand
), get(indexOperand
));
1659 set(VirtualRegister(resultOperand
), charCode
);
1663 case CharAtIntrinsic
: {
1664 if (argumentCountIncludingThis
!= 2)
1667 VirtualRegister thisOperand
= virtualRegisterForArgument(0, registerOffset
);
1668 VirtualRegister indexOperand
= virtualRegisterForArgument(1, registerOffset
);
1669 Node
* charCode
= addToGraph(StringCharAt
, OpInfo(ArrayMode(Array::String
).asWord()), get(thisOperand
), get(indexOperand
));
1671 set(VirtualRegister(resultOperand
), charCode
);
1674 case FromCharCodeIntrinsic
: {
1675 if (argumentCountIncludingThis
!= 2)
1678 VirtualRegister indexOperand
= virtualRegisterForArgument(1, registerOffset
);
1679 Node
* charCode
= addToGraph(StringFromCharCode
, get(indexOperand
));
1681 set(VirtualRegister(resultOperand
), charCode
);
1686 case RegExpExecIntrinsic
: {
1687 if (argumentCountIncludingThis
!= 2)
1690 Node
* regExpExec
= addToGraph(RegExpExec
, OpInfo(0), OpInfo(prediction
), get(virtualRegisterForArgument(0, registerOffset
)), get(virtualRegisterForArgument(1, registerOffset
)));
1691 set(VirtualRegister(resultOperand
), regExpExec
);
1696 case RegExpTestIntrinsic
: {
1697 if (argumentCountIncludingThis
!= 2)
1700 Node
* regExpExec
= addToGraph(RegExpTest
, OpInfo(0), OpInfo(prediction
), get(virtualRegisterForArgument(0, registerOffset
)), get(virtualRegisterForArgument(1, registerOffset
)));
1701 set(VirtualRegister(resultOperand
), regExpExec
);
1706 case IMulIntrinsic
: {
1707 if (argumentCountIncludingThis
!= 3)
1709 VirtualRegister leftOperand
= virtualRegisterForArgument(1, registerOffset
);
1710 VirtualRegister rightOperand
= virtualRegisterForArgument(2, registerOffset
);
1711 Node
* left
= get(leftOperand
);
1712 Node
* right
= get(rightOperand
);
1713 set(VirtualRegister(resultOperand
), addToGraph(ArithIMul
, left
, right
));
1717 case FRoundIntrinsic
: {
1718 if (argumentCountIncludingThis
!= 2)
1720 VirtualRegister operand
= virtualRegisterForArgument(1, registerOffset
);
1721 set(VirtualRegister(resultOperand
), addToGraph(ArithFRound
, get(operand
)));
1725 case DFGTrueIntrinsic
: {
1726 set(VirtualRegister(resultOperand
), getJSConstantForValue(jsBoolean(true)));
1730 case OSRExitIntrinsic
: {
1731 addToGraph(ForceOSRExit
);
1732 set(VirtualRegister(resultOperand
), constantUndefined());
1736 case IsFinalTierIntrinsic
: {
1737 set(VirtualRegister(resultOperand
),
1738 getJSConstantForValue(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph
.m_plan
.mode
) : true)));
1742 case SetInt32HeapPredictionIntrinsic
: {
1743 for (int i
= 1; i
< argumentCountIncludingThis
; ++i
) {
1744 Node
* node
= get(virtualRegisterForArgument(i
, registerOffset
));
1745 if (node
->hasHeapPrediction())
1746 node
->setHeapPrediction(SpecInt32
);
1748 set(VirtualRegister(resultOperand
), constantUndefined());
1752 case FiatInt52Intrinsic
: {
1753 if (argumentCountIncludingThis
!= 2)
1755 VirtualRegister operand
= virtualRegisterForArgument(1, registerOffset
);
1757 set(VirtualRegister(resultOperand
), addToGraph(FiatInt52
, get(operand
)));
1759 set(VirtualRegister(resultOperand
), get(operand
));
1768 bool ByteCodeParser::handleTypedArrayConstructor(
1769 int resultOperand
, InternalFunction
* function
, int registerOffset
,
1770 int argumentCountIncludingThis
, TypedArrayType type
)
1772 if (!isTypedView(type
))
1775 if (function
->classInfo() != constructorClassInfoForType(type
))
1778 if (function
->globalObject() != m_inlineStackTop
->m_codeBlock
->globalObject())
1781 // We only have an intrinsic for the case where you say:
1783 // new FooArray(blah);
1785 // Of course, 'blah' could be any of the following:
1787 // - Integer, indicating that you want to allocate an array of that length.
1788 // This is the thing we're hoping for, and what we can actually do meaningful
1789 // optimizations for.
1791 // - Array buffer, indicating that you want to create a view onto that _entire_
1794 // - Non-buffer object, indicating that you want to create a copy of that
1795 // object by pretending that it quacks like an array.
1797 // - Anything else, indicating that you want to have an exception thrown at
1800 // The intrinsic, NewTypedArray, will behave as if it could do any of these
1801 // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is
1802 // predicted Int32, then we lock it in as a normal typed array allocation.
1803 // Otherwise, NewTypedArray turns into a totally opaque function call that
1804 // may clobber the world - by virtue of it accessing properties on what could
1807 // Note that although the generic form of NewTypedArray sounds sort of awful,
1808 // it is actually quite likely to be more efficient than a fully generic
1809 // Construct. So, we might want to think about making NewTypedArray variadic,
1810 // or else making Construct not super slow.
1812 if (argumentCountIncludingThis
!= 2)
1815 set(VirtualRegister(resultOperand
),
1816 addToGraph(NewTypedArray
, OpInfo(type
), get(virtualRegisterForArgument(1, registerOffset
))));
1820 bool ByteCodeParser::handleConstantInternalFunction(
1821 int resultOperand
, InternalFunction
* function
, int registerOffset
,
1822 int argumentCountIncludingThis
, SpeculatedType prediction
, CodeSpecializationKind kind
)
1824 // If we ever find that we have a lot of internal functions that we specialize for,
1825 // then we should probably have some sort of hashtable dispatch, or maybe even
1826 // dispatch straight through the MethodTable of the InternalFunction. But for now,
1827 // it seems that this case is hit infrequently enough, and the number of functions
1828 // we know about is small enough, that having just a linear cascade of if statements
1831 UNUSED_PARAM(prediction
); // Remove this once we do more things.
1833 if (function
->classInfo() == ArrayConstructor::info()) {
1834 if (function
->globalObject() != m_inlineStackTop
->m_codeBlock
->globalObject())
1837 if (argumentCountIncludingThis
== 2) {
1838 set(VirtualRegister(resultOperand
),
1839 addToGraph(NewArrayWithSize
, OpInfo(ArrayWithUndecided
), get(virtualRegisterForArgument(1, registerOffset
))));
1843 for (int i
= 1; i
< argumentCountIncludingThis
; ++i
)
1844 addVarArgChild(get(virtualRegisterForArgument(i
, registerOffset
)));
1845 set(VirtualRegister(resultOperand
),
1846 addToGraph(Node::VarArg
, NewArray
, OpInfo(ArrayWithUndecided
), OpInfo(0)));
1850 if (function
->classInfo() == StringConstructor::info()) {
1853 if (argumentCountIncludingThis
<= 1)
1854 result
= cellConstant(m_vm
->smallStrings
.emptyString());
1856 result
= addToGraph(ToString
, get(virtualRegisterForArgument(1, registerOffset
)));
1858 if (kind
== CodeForConstruct
)
1859 result
= addToGraph(NewStringObject
, OpInfo(function
->globalObject()->stringObjectStructure()), result
);
1861 set(VirtualRegister(resultOperand
), result
);
1865 for (unsigned typeIndex
= 0; typeIndex
< NUMBER_OF_TYPED_ARRAY_TYPES
; ++typeIndex
) {
1866 bool result
= handleTypedArrayConstructor(
1867 resultOperand
, function
, registerOffset
, argumentCountIncludingThis
,
1868 indexToTypedArrayType(typeIndex
));
1876 Node
* ByteCodeParser::handleGetByOffset(SpeculatedType prediction
, Node
* base
, unsigned identifierNumber
, PropertyOffset offset
)
1878 Node
* propertyStorage
;
1879 if (isInlineOffset(offset
))
1880 propertyStorage
= base
;
1882 propertyStorage
= addToGraph(GetButterfly
, base
);
1883 Node
* getByOffset
= addToGraph(GetByOffset
, OpInfo(m_graph
.m_storageAccessData
.size()), OpInfo(prediction
), propertyStorage
, base
);
1885 StorageAccessData storageAccessData
;
1886 storageAccessData
.offset
= offset
;
1887 storageAccessData
.identifierNumber
= identifierNumber
;
1888 m_graph
.m_storageAccessData
.append(storageAccessData
);
1893 void ByteCodeParser::handleGetByOffset(
1894 int destinationOperand
, SpeculatedType prediction
, Node
* base
, unsigned identifierNumber
,
1895 PropertyOffset offset
)
1897 set(VirtualRegister(destinationOperand
), handleGetByOffset(prediction
, base
, identifierNumber
, offset
));
1900 Node
* ByteCodeParser::handlePutByOffset(Node
* base
, unsigned identifier
, PropertyOffset offset
, Node
* value
)
1902 Node
* propertyStorage
;
1903 if (isInlineOffset(offset
))
1904 propertyStorage
= base
;
1906 propertyStorage
= addToGraph(GetButterfly
, base
);
1907 Node
* result
= addToGraph(PutByOffset
, OpInfo(m_graph
.m_storageAccessData
.size()), propertyStorage
, base
, value
);
1909 StorageAccessData storageAccessData
;
1910 storageAccessData
.offset
= offset
;
1911 storageAccessData
.identifierNumber
= identifier
;
1912 m_graph
.m_storageAccessData
.append(storageAccessData
);
1917 Node
* ByteCodeParser::emitPrototypeChecks(
1918 Structure
* structure
, IntendedStructureChain
* chain
)
1921 m_graph
.chains().addLazily(chain
);
1922 Structure
* currentStructure
= structure
;
1923 JSObject
* currentObject
= 0;
1924 for (unsigned i
= 0; i
< chain
->size(); ++i
) {
1925 currentObject
= asObject(currentStructure
->prototypeForLookup(m_inlineStackTop
->m_codeBlock
));
1926 currentStructure
= chain
->at(i
);
1927 base
= cellConstantWithStructureCheck(currentObject
, currentStructure
);
1932 void ByteCodeParser::handleGetById(
1933 int destinationOperand
, SpeculatedType prediction
, Node
* base
, unsigned identifierNumber
,
1934 const GetByIdStatus
& getByIdStatus
)
1936 if (!getByIdStatus
.isSimple() || !Options::enableAccessInlining()) {
1937 set(VirtualRegister(destinationOperand
),
1939 getByIdStatus
.makesCalls() ? GetByIdFlush
: GetById
,
1940 OpInfo(identifierNumber
), OpInfo(prediction
), base
));
1944 if (getByIdStatus
.numVariants() > 1) {
1945 if (!isFTL(m_graph
.m_plan
.mode
) || !Options::enablePolymorphicAccessInlining()) {
1946 set(VirtualRegister(destinationOperand
),
1947 addToGraph(GetById
, OpInfo(identifierNumber
), OpInfo(prediction
), base
));
1951 if (m_graph
.compilation())
1952 m_graph
.compilation()->noticeInlinedGetById();
1954 // 1) Emit prototype structure checks for all chains. This could sort of maybe not be
1955 // optimal, if there is some rarely executed case in the chain that requires a lot
1956 // of checks and those checks are not watchpointable.
1957 for (unsigned variantIndex
= getByIdStatus
.numVariants(); variantIndex
--;) {
1958 if (getByIdStatus
[variantIndex
].chain()) {
1959 emitPrototypeChecks(
1960 getByIdStatus
[variantIndex
].structureSet().singletonStructure(),
1961 getByIdStatus
[variantIndex
].chain());
1965 // 2) Emit a MultiGetByOffset
1966 MultiGetByOffsetData
* data
= m_graph
.m_multiGetByOffsetData
.add();
1967 data
->variants
= getByIdStatus
.variants();
1968 data
->identifierNumber
= identifierNumber
;
1969 set(VirtualRegister(destinationOperand
),
1970 addToGraph(MultiGetByOffset
, OpInfo(data
), OpInfo(prediction
), base
));
1974 ASSERT(getByIdStatus
.numVariants() == 1);
1975 GetByIdVariant variant
= getByIdStatus
[0];
1977 if (m_graph
.compilation())
1978 m_graph
.compilation()->noticeInlinedGetById();
1980 Node
* originalBaseForBaselineJIT
= base
;
1982 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(variant
.structureSet())), base
);
1984 if (variant
.chain()) {
1985 base
= emitPrototypeChecks(
1986 variant
.structureSet().singletonStructure(), variant
.chain());
1989 // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to
1990 // ensure that the base of the original get_by_id is kept alive until we're done with
1991 // all of the speculations. We only insert the Phantom if there had been a CheckStructure
1992 // on something other than the base following the CheckStructure on base, or if the
1993 // access was compiled to a WeakJSConstant specific value, in which case we might not
1994 // have any explicit use of the base at all.
1995 if (variant
.specificValue() || originalBaseForBaselineJIT
!= base
)
1996 addToGraph(Phantom
, originalBaseForBaselineJIT
);
1998 if (variant
.specificValue()) {
1999 ASSERT(variant
.specificValue().isCell());
2001 set(VirtualRegister(destinationOperand
), cellConstant(variant
.specificValue().asCell()));
2006 destinationOperand
, prediction
, base
, identifierNumber
, variant
.offset());
2009 void ByteCodeParser::emitPutById(
2010 Node
* base
, unsigned identifierNumber
, Node
* value
, const PutByIdStatus
& putByIdStatus
, bool isDirect
)
2013 addToGraph(PutByIdDirect
, OpInfo(identifierNumber
), base
, value
);
2015 addToGraph(putByIdStatus
.makesCalls() ? PutByIdFlush
: PutById
, OpInfo(identifierNumber
), base
, value
);
2018 void ByteCodeParser::handlePutById(
2019 Node
* base
, unsigned identifierNumber
, Node
* value
,
2020 const PutByIdStatus
& putByIdStatus
, bool isDirect
)
2022 if (!putByIdStatus
.isSimple() || !Options::enableAccessInlining()) {
2023 if (!putByIdStatus
.isSet())
2024 addToGraph(ForceOSRExit
);
2025 emitPutById(base
, identifierNumber
, value
, putByIdStatus
, isDirect
);
2029 if (putByIdStatus
.numVariants() > 1) {
2030 if (!isFTL(m_graph
.m_plan
.mode
) || putByIdStatus
.makesCalls()
2031 || !Options::enablePolymorphicAccessInlining()) {
2032 emitPutById(base
, identifierNumber
, value
, putByIdStatus
, isDirect
);
2036 if (m_graph
.compilation())
2037 m_graph
.compilation()->noticeInlinedPutById();
2040 for (unsigned variantIndex
= putByIdStatus
.numVariants(); variantIndex
--;) {
2041 if (putByIdStatus
[variantIndex
].kind() != PutByIdVariant::Transition
)
2043 if (!putByIdStatus
[variantIndex
].structureChain())
2045 emitPrototypeChecks(
2046 putByIdStatus
[variantIndex
].oldStructure(),
2047 putByIdStatus
[variantIndex
].structureChain());
2051 MultiPutByOffsetData
* data
= m_graph
.m_multiPutByOffsetData
.add();
2052 data
->variants
= putByIdStatus
.variants();
2053 data
->identifierNumber
= identifierNumber
;
2054 addToGraph(MultiPutByOffset
, OpInfo(data
), base
, value
);
2058 ASSERT(putByIdStatus
.numVariants() == 1);
2059 const PutByIdVariant
& variant
= putByIdStatus
[0];
2061 if (variant
.kind() == PutByIdVariant::Replace
) {
2062 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(variant
.structure())), base
);
2063 handlePutByOffset(base
, identifierNumber
, variant
.offset(), value
);
2064 if (m_graph
.compilation())
2065 m_graph
.compilation()->noticeInlinedPutById();
2069 if (variant
.kind() != PutByIdVariant::Transition
) {
2070 emitPutById(base
, identifierNumber
, value
, putByIdStatus
, isDirect
);
2074 if (variant
.structureChain() && !variant
.structureChain()->isStillValid()) {
2075 emitPutById(base
, identifierNumber
, value
, putByIdStatus
, isDirect
);
2079 m_graph
.chains().addLazily(variant
.structureChain());
2081 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(variant
.oldStructure())), base
);
2083 emitPrototypeChecks(variant
.oldStructure(), variant
.structureChain());
2085 ASSERT(variant
.oldStructure()->transitionWatchpointSetHasBeenInvalidated());
2087 Node
* propertyStorage
;
2088 StructureTransitionData
* transitionData
= m_graph
.addStructureTransitionData(
2089 StructureTransitionData(variant
.oldStructure(), variant
.newStructure()));
2091 if (variant
.oldStructure()->outOfLineCapacity()
2092 != variant
.newStructure()->outOfLineCapacity()) {
2094 // If we're growing the property storage then it must be because we're
2095 // storing into the out-of-line storage.
2096 ASSERT(!isInlineOffset(variant
.offset()));
2098 if (!variant
.oldStructure()->outOfLineCapacity()) {
2099 propertyStorage
= addToGraph(
2100 AllocatePropertyStorage
, OpInfo(transitionData
), base
);
2102 propertyStorage
= addToGraph(
2103 ReallocatePropertyStorage
, OpInfo(transitionData
),
2104 base
, addToGraph(GetButterfly
, base
));
2107 if (isInlineOffset(variant
.offset()))
2108 propertyStorage
= base
;
2110 propertyStorage
= addToGraph(GetButterfly
, base
);
2113 addToGraph(PutStructure
, OpInfo(transitionData
), base
);
2117 OpInfo(m_graph
.m_storageAccessData
.size()),
2122 StorageAccessData storageAccessData
;
2123 storageAccessData
.offset
= variant
.offset();
2124 storageAccessData
.identifierNumber
= identifierNumber
;
2125 m_graph
.m_storageAccessData
.append(storageAccessData
);
2127 if (m_graph
.compilation())
2128 m_graph
.compilation()->noticeInlinedPutById();
2131 void ByteCodeParser::prepareToParseBlock()
2133 for (unsigned i
= 0; i
< m_constants
.size(); ++i
)
2134 m_constants
[i
] = ConstantRecord();
2135 m_cellConstantNodes
.clear();
2138 Node
* ByteCodeParser::getScope(bool skipTop
, unsigned skipCount
)
2140 Node
* localBase
= get(VirtualRegister(JSStack::ScopeChain
));
2142 ASSERT(!inlineCallFrame());
2143 localBase
= addToGraph(SkipTopScope
, localBase
);
2145 for (unsigned n
= skipCount
; n
--;)
2146 localBase
= addToGraph(SkipScope
, localBase
);
2150 bool ByteCodeParser::parseBlock(unsigned limit
)
2152 bool shouldContinueParsing
= true;
2154 Interpreter
* interpreter
= m_vm
->interpreter
;
2155 Instruction
* instructionsBegin
= m_inlineStackTop
->m_codeBlock
->instructions().begin();
2156 unsigned blockBegin
= m_currentIndex
;
2158 // If we are the first basic block, introduce markers for arguments. This allows
2159 // us to track if a use of an argument may use the actual argument passed, as
2160 // opposed to using a value we set explicitly.
2161 if (m_currentBlock
== m_graph
.block(0) && !inlineCallFrame()) {
2162 m_graph
.m_arguments
.resize(m_numArguments
);
2163 for (unsigned argument
= 0; argument
< m_numArguments
; ++argument
) {
2164 VariableAccessData
* variable
= newVariableAccessData(
2165 virtualRegisterForArgument(argument
), m_codeBlock
->isCaptured(virtualRegisterForArgument(argument
)));
2166 variable
->mergeStructureCheckHoistingFailed(
2167 m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)
2168 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCacheWatchpoint
));
2169 variable
->mergeCheckArrayHoistingFailed(
2170 m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadIndexingType
));
2172 Node
* setArgument
= addToGraph(SetArgument
, OpInfo(variable
));
2173 m_graph
.m_arguments
[argument
] = setArgument
;
2174 m_currentBlock
->variablesAtTail
.setArgumentFirstTime(argument
, setArgument
);
2179 for (unsigned i
= 0; i
< m_setLocalQueue
.size(); ++i
)
2180 m_setLocalQueue
[i
].execute(this);
2181 m_setLocalQueue
.resize(0);
2183 // Don't extend over jump destinations.
2184 if (m_currentIndex
== limit
) {
2185 // Ordinarily we want to plant a jump. But refuse to do this if the block is
2186 // empty. This is a special case for inlining, which might otherwise create
2187 // some empty blocks in some cases. When parseBlock() returns with an empty
2188 // block, it will get repurposed instead of creating a new one. Note that this
2189 // logic relies on every bytecode resulting in one or more nodes, which would
2190 // be true anyway except for op_loop_hint, which emits a Phantom to force this
2192 if (!m_currentBlock
->isEmpty())
2193 addToGraph(Jump
, OpInfo(m_currentIndex
));
2194 return shouldContinueParsing
;
2197 // Switch on the current bytecode opcode.
2198 Instruction
* currentInstruction
= instructionsBegin
+ m_currentIndex
;
2199 m_currentInstruction
= currentInstruction
; // Some methods want to use this, and we'd rather not thread it through calls.
2200 OpcodeID opcodeID
= interpreter
->getOpcodeID(currentInstruction
->u
.opcode
);
2202 if (Options::verboseDFGByteCodeParsing())
2203 dataLog(" parsing ", currentCodeOrigin(), "\n");
2205 if (m_graph
.compilation()) {
2206 addToGraph(CountExecution
, OpInfo(m_graph
.compilation()->executionCounterFor(
2207 Profiler::OriginStack(*m_vm
->m_perBytecodeProfiler
, m_codeBlock
, currentCodeOrigin()))));
2212 // === Function entry opcodes ===
2215 // Initialize all locals to undefined.
2216 for (int i
= 0; i
< m_inlineStackTop
->m_codeBlock
->m_numVars
; ++i
)
2217 set(virtualRegisterForLocal(i
), constantUndefined(), ImmediateNakedSet
);
2218 if (m_inlineStackTop
->m_codeBlock
->specializationKind() == CodeForConstruct
)
2219 set(virtualRegisterForArgument(0), constantUndefined(), ImmediateNakedSet
);
2220 NEXT_OPCODE(op_enter
);
2222 case op_touch_entry
:
2223 if (m_inlineStackTop
->m_codeBlock
->symbolTable()->m_functionEnteredOnce
.isStillValid())
2224 addToGraph(ForceOSRExit
);
2225 NEXT_OPCODE(op_touch_entry
);
2228 Node
* op1
= getThis();
2229 if (op1
->op() != ToThis
) {
2230 Structure
* cachedStructure
= currentInstruction
[2].u
.structure
.get();
2231 if (!cachedStructure
2232 || cachedStructure
->classInfo()->methodTable
.toThis
!= JSObject::info()->methodTable
.toThis
2233 || m_inlineStackTop
->m_profiledBlock
->couldTakeSlowCase(m_currentIndex
)
2234 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)
2235 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCacheWatchpoint
)
2236 || (op1
->op() == GetLocal
&& op1
->variableAccessData()->structureCheckHoistingFailed())) {
2237 setThis(addToGraph(ToThis
, op1
));
2241 OpInfo(m_graph
.addStructureSet(cachedStructure
)),
2245 NEXT_OPCODE(op_to_this
);
2248 case op_create_this
: {
2249 int calleeOperand
= currentInstruction
[2].u
.operand
;
2250 Node
* callee
= get(VirtualRegister(calleeOperand
));
2251 bool alreadyEmitted
= false;
2252 if (callee
->op() == WeakJSConstant
) {
2253 JSCell
* cell
= callee
->weakConstant();
2254 ASSERT(cell
->inherits(JSFunction::info()));
2256 JSFunction
* function
= jsCast
<JSFunction
*>(cell
);
2257 if (Structure
* structure
= function
->allocationStructure()) {
2258 addToGraph(AllocationProfileWatchpoint
, OpInfo(function
));
2259 // The callee is still live up to this point.
2260 addToGraph(Phantom
, callee
);
2261 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(NewObject
, OpInfo(structure
)));
2262 alreadyEmitted
= true;
2265 if (!alreadyEmitted
) {
2266 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2267 addToGraph(CreateThis
, OpInfo(currentInstruction
[3].u
.operand
), callee
));
2269 NEXT_OPCODE(op_create_this
);
2272 case op_new_object
: {
2273 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2274 addToGraph(NewObject
,
2275 OpInfo(currentInstruction
[3].u
.objectAllocationProfile
->structure())));
2276 NEXT_OPCODE(op_new_object
);
2279 case op_new_array
: {
2280 int startOperand
= currentInstruction
[2].u
.operand
;
2281 int numOperands
= currentInstruction
[3].u
.operand
;
2282 ArrayAllocationProfile
* profile
= currentInstruction
[4].u
.arrayAllocationProfile
;
2283 for (int operandIdx
= startOperand
; operandIdx
> startOperand
- numOperands
; --operandIdx
)
2284 addVarArgChild(get(VirtualRegister(operandIdx
)));
2285 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(Node::VarArg
, NewArray
, OpInfo(profile
->selectIndexingType()), OpInfo(0)));
2286 NEXT_OPCODE(op_new_array
);
2289 case op_new_array_with_size
: {
2290 int lengthOperand
= currentInstruction
[2].u
.operand
;
2291 ArrayAllocationProfile
* profile
= currentInstruction
[3].u
.arrayAllocationProfile
;
2292 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(NewArrayWithSize
, OpInfo(profile
->selectIndexingType()), get(VirtualRegister(lengthOperand
))));
2293 NEXT_OPCODE(op_new_array_with_size
);
2296 case op_new_array_buffer
: {
2297 int startConstant
= currentInstruction
[2].u
.operand
;
2298 int numConstants
= currentInstruction
[3].u
.operand
;
2299 ArrayAllocationProfile
* profile
= currentInstruction
[4].u
.arrayAllocationProfile
;
2300 NewArrayBufferData data
;
2301 data
.startConstant
= m_inlineStackTop
->m_constantBufferRemap
[startConstant
];
2302 data
.numConstants
= numConstants
;
2303 data
.indexingType
= profile
->selectIndexingType();
2305 // If this statement has never executed, we'll have the wrong indexing type in the profile.
2306 for (int i
= 0; i
< numConstants
; ++i
) {
2308 leastUpperBoundOfIndexingTypeAndValue(
2310 m_codeBlock
->constantBuffer(data
.startConstant
)[i
]);
2313 m_graph
.m_newArrayBufferData
.append(data
);
2314 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(NewArrayBuffer
, OpInfo(&m_graph
.m_newArrayBufferData
.last())));
2315 NEXT_OPCODE(op_new_array_buffer
);
2318 case op_new_regexp
: {
2319 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(NewRegexp
, OpInfo(currentInstruction
[2].u
.operand
)));
2320 NEXT_OPCODE(op_new_regexp
);
2323 case op_get_callee
: {
2324 JSCell
* cachedFunction
= currentInstruction
[2].u
.jsCell
.get();
2326 || m_inlineStackTop
->m_profiledBlock
->couldTakeSlowCase(m_currentIndex
)
2327 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadFunction
)) {
2328 set(VirtualRegister(currentInstruction
[1].u
.operand
), get(VirtualRegister(JSStack::Callee
)));
2330 ASSERT(cachedFunction
->inherits(JSFunction::info()));
2331 Node
* actualCallee
= get(VirtualRegister(JSStack::Callee
));
2332 addToGraph(CheckFunction
, OpInfo(cachedFunction
), actualCallee
);
2333 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(WeakJSConstant
, OpInfo(cachedFunction
)));
2335 NEXT_OPCODE(op_get_callee
);
2338 // === Bitwise operations ===
2341 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2342 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2343 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(BitAnd
, op1
, op2
));
2344 NEXT_OPCODE(op_bitand
);
2348 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2349 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2350 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(BitOr
, op1
, op2
));
2351 NEXT_OPCODE(op_bitor
);
2355 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2356 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2357 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(BitXor
, op1
, op2
));
2358 NEXT_OPCODE(op_bitxor
);
2362 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2363 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2364 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2365 addToGraph(BitRShift
, op1
, op2
));
2366 NEXT_OPCODE(op_rshift
);
2370 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2371 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2372 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2373 addToGraph(BitLShift
, op1
, op2
));
2374 NEXT_OPCODE(op_lshift
);
2378 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2379 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2380 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2381 addToGraph(BitURShift
, op1
, op2
));
2382 NEXT_OPCODE(op_urshift
);
2386 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2387 makeSafe(addToGraph(UInt32ToNumber
, get(VirtualRegister(currentInstruction
[2].u
.operand
)))));
2388 NEXT_OPCODE(op_unsigned
);
2391 // === Increment/Decrement opcodes ===
2394 int srcDst
= currentInstruction
[1].u
.operand
;
2395 VirtualRegister srcDstVirtualRegister
= VirtualRegister(srcDst
);
2396 Node
* op
= get(srcDstVirtualRegister
);
2397 set(srcDstVirtualRegister
, makeSafe(addToGraph(ArithAdd
, op
, one())));
2398 NEXT_OPCODE(op_inc
);
2402 int srcDst
= currentInstruction
[1].u
.operand
;
2403 VirtualRegister srcDstVirtualRegister
= VirtualRegister(srcDst
);
2404 Node
* op
= get(srcDstVirtualRegister
);
2405 set(srcDstVirtualRegister
, makeSafe(addToGraph(ArithSub
, op
, one())));
2406 NEXT_OPCODE(op_dec
);
2409 // === Arithmetic operations ===
2412 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2413 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2414 if (op1
->hasNumberResult() && op2
->hasNumberResult())
2415 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ArithAdd
, op1
, op2
)));
2417 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ValueAdd
, op1
, op2
)));
2418 NEXT_OPCODE(op_add
);
2422 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2423 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2424 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ArithSub
, op1
, op2
)));
2425 NEXT_OPCODE(op_sub
);
2429 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2430 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ArithNegate
, op1
)));
2431 NEXT_OPCODE(op_negate
);
2435 // Multiply requires that the inputs are not truncated, unfortunately.
2436 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2437 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2438 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ArithMul
, op1
, op2
)));
2439 NEXT_OPCODE(op_mul
);
2443 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2444 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2445 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeSafe(addToGraph(ArithMod
, op1
, op2
)));
2446 NEXT_OPCODE(op_mod
);
2450 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2451 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2452 set(VirtualRegister(currentInstruction
[1].u
.operand
), makeDivSafe(addToGraph(ArithDiv
, op1
, op2
)));
2453 NEXT_OPCODE(op_div
);
2456 // === Misc operations ===
2459 addToGraph(Breakpoint
);
2460 NEXT_OPCODE(op_debug
);
2462 case op_profile_will_call
: {
2463 addToGraph(ProfileWillCall
);
2464 NEXT_OPCODE(op_profile_will_call
);
2467 case op_profile_did_call
: {
2468 addToGraph(ProfileDidCall
);
2469 NEXT_OPCODE(op_profile_did_call
);
2473 Node
* op
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2474 set(VirtualRegister(currentInstruction
[1].u
.operand
), op
);
2475 NEXT_OPCODE(op_mov
);
2478 case op_captured_mov
: {
2479 Node
* op
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2480 if (VariableWatchpointSet
* set
= currentInstruction
[3].u
.watchpointSet
) {
2481 if (set
->state() != IsInvalidated
)
2482 addToGraph(NotifyWrite
, OpInfo(set
), op
);
2484 set(VirtualRegister(currentInstruction
[1].u
.operand
), op
);
2485 NEXT_OPCODE(op_captured_mov
);
2488 case op_check_has_instance
:
2489 addToGraph(CheckHasInstance
, get(VirtualRegister(currentInstruction
[3].u
.operand
)));
2490 NEXT_OPCODE(op_check_has_instance
);
2492 case op_instanceof
: {
2493 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2494 Node
* prototype
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2495 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(InstanceOf
, value
, prototype
));
2496 NEXT_OPCODE(op_instanceof
);
2499 case op_is_undefined
: {
2500 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2501 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsUndefined
, value
));
2502 NEXT_OPCODE(op_is_undefined
);
2505 case op_is_boolean
: {
2506 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2507 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsBoolean
, value
));
2508 NEXT_OPCODE(op_is_boolean
);
2511 case op_is_number
: {
2512 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2513 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsNumber
, value
));
2514 NEXT_OPCODE(op_is_number
);
2517 case op_is_string
: {
2518 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2519 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsString
, value
));
2520 NEXT_OPCODE(op_is_string
);
2523 case op_is_object
: {
2524 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2525 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsObject
, value
));
2526 NEXT_OPCODE(op_is_object
);
2529 case op_is_function
: {
2530 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2531 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(IsFunction
, value
));
2532 NEXT_OPCODE(op_is_function
);
2536 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2537 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(LogicalNot
, value
));
2538 NEXT_OPCODE(op_not
);
2541 case op_to_primitive
: {
2542 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2543 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(ToPrimitive
, value
));
2544 NEXT_OPCODE(op_to_primitive
);
2548 int startOperand
= currentInstruction
[2].u
.operand
;
2549 int numOperands
= currentInstruction
[3].u
.operand
;
2551 // X86 doesn't have enough registers to compile MakeRope with three arguments.
2552 // Rather than try to be clever, we just make MakeRope dumber on this processor.
2553 const unsigned maxRopeArguments
= 2;
2555 const unsigned maxRopeArguments
= 3;
2557 auto toStringNodes
= std::make_unique
<Node
*[]>(numOperands
);
2558 for (int i
= 0; i
< numOperands
; i
++)
2559 toStringNodes
[i
] = addToGraph(ToString
, get(VirtualRegister(startOperand
- i
)));
2561 for (int i
= 0; i
< numOperands
; i
++)
2562 addToGraph(Phantom
, toStringNodes
[i
]);
2564 Node
* operands
[AdjacencyList::Size
];
2565 unsigned indexInOperands
= 0;
2566 for (unsigned i
= 0; i
< AdjacencyList::Size
; ++i
)
2568 for (int operandIdx
= 0; operandIdx
< numOperands
; ++operandIdx
) {
2569 if (indexInOperands
== maxRopeArguments
) {
2570 operands
[0] = addToGraph(MakeRope
, operands
[0], operands
[1], operands
[2]);
2571 for (unsigned i
= 1; i
< AdjacencyList::Size
; ++i
)
2573 indexInOperands
= 1;
2576 ASSERT(indexInOperands
< AdjacencyList::Size
);
2577 ASSERT(indexInOperands
< maxRopeArguments
);
2578 operands
[indexInOperands
++] = toStringNodes
[operandIdx
];
2580 set(VirtualRegister(currentInstruction
[1].u
.operand
),
2581 addToGraph(MakeRope
, operands
[0], operands
[1], operands
[2]));
2582 NEXT_OPCODE(op_strcat
);
2586 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2587 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2588 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareLess
, op1
, op2
));
2589 NEXT_OPCODE(op_less
);
2593 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2594 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2595 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareLessEq
, op1
, op2
));
2596 NEXT_OPCODE(op_lesseq
);
2600 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2601 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2602 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareGreater
, op1
, op2
));
2603 NEXT_OPCODE(op_greater
);
2606 case op_greatereq
: {
2607 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2608 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2609 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareGreaterEq
, op1
, op2
));
2610 NEXT_OPCODE(op_greatereq
);
2614 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2615 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2616 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareEq
, op1
, op2
));
2621 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2622 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareEqConstant
, value
, constantNull()));
2623 NEXT_OPCODE(op_eq_null
);
2627 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2628 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2629 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CompareStrictEq
, op1
, op2
));
2630 NEXT_OPCODE(op_stricteq
);
2634 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2635 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2636 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(LogicalNot
, addToGraph(CompareEq
, op1
, op2
)));
2637 NEXT_OPCODE(op_neq
);
2641 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2642 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(LogicalNot
, addToGraph(CompareEqConstant
, value
, constantNull())));
2643 NEXT_OPCODE(op_neq_null
);
2646 case op_nstricteq
: {
2647 Node
* op1
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2648 Node
* op2
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2649 Node
* invertedResult
;
2650 invertedResult
= addToGraph(CompareStrictEq
, op1
, op2
);
2651 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(LogicalNot
, invertedResult
));
2652 NEXT_OPCODE(op_nstricteq
);
2655 // === Property access operations ===
2657 case op_get_by_val
: {
2658 SpeculatedType prediction
= getPrediction();
2660 Node
* base
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2661 ArrayMode arrayMode
= getArrayModeConsideringSlowPath(currentInstruction
[4].u
.arrayProfile
, Array::Read
);
2662 Node
* property
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2663 Node
* getByVal
= addToGraph(GetByVal
, OpInfo(arrayMode
.asWord()), OpInfo(prediction
), base
, property
);
2664 set(VirtualRegister(currentInstruction
[1].u
.operand
), getByVal
);
2666 NEXT_OPCODE(op_get_by_val
);
2669 case op_put_by_val_direct
:
2670 case op_put_by_val
: {
2671 Node
* base
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2673 ArrayMode arrayMode
= getArrayModeConsideringSlowPath(currentInstruction
[4].u
.arrayProfile
, Array::Write
);
2675 Node
* property
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2676 Node
* value
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2678 addVarArgChild(base
);
2679 addVarArgChild(property
);
2680 addVarArgChild(value
);
2681 addVarArgChild(0); // Leave room for property storage.
2682 addVarArgChild(0); // Leave room for length.
2683 addToGraph(Node::VarArg
, opcodeID
== op_put_by_val_direct
? PutByValDirect
: PutByVal
, OpInfo(arrayMode
.asWord()), OpInfo(0));
2685 NEXT_OPCODE(op_put_by_val
);
2689 case op_get_by_id_out_of_line
:
2690 case op_get_array_length
: {
2691 SpeculatedType prediction
= getPrediction();
2693 Node
* base
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2694 unsigned identifierNumber
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[3].u
.operand
];
2696 StringImpl
* uid
= m_graph
.identifiers()[identifierNumber
];
2697 GetByIdStatus getByIdStatus
= GetByIdStatus::computeFor(
2698 m_inlineStackTop
->m_profiledBlock
, m_dfgCodeBlock
,
2699 m_inlineStackTop
->m_stubInfos
, m_dfgStubInfos
,
2700 currentCodeOrigin(), uid
);
2703 currentInstruction
[1].u
.operand
, prediction
, base
, identifierNumber
, getByIdStatus
);
2705 NEXT_OPCODE(op_get_by_id
);
2708 case op_put_by_id_out_of_line
:
2709 case op_put_by_id_transition_direct
:
2710 case op_put_by_id_transition_normal
:
2711 case op_put_by_id_transition_direct_out_of_line
:
2712 case op_put_by_id_transition_normal_out_of_line
: {
2713 Node
* value
= get(VirtualRegister(currentInstruction
[3].u
.operand
));
2714 Node
* base
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2715 unsigned identifierNumber
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[2].u
.operand
];
2716 bool direct
= currentInstruction
[8].u
.operand
;
2718 PutByIdStatus putByIdStatus
= PutByIdStatus::computeFor(
2719 m_inlineStackTop
->m_profiledBlock
, m_dfgCodeBlock
,
2720 m_inlineStackTop
->m_stubInfos
, m_dfgStubInfos
,
2721 currentCodeOrigin(), m_graph
.identifiers()[identifierNumber
]);
2723 handlePutById(base
, identifierNumber
, value
, putByIdStatus
, direct
);
2724 NEXT_OPCODE(op_put_by_id
);
2727 case op_init_global_const_nop
: {
2728 NEXT_OPCODE(op_init_global_const_nop
);
2731 case op_init_global_const
: {
2732 Node
* value
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2735 OpInfo(m_inlineStackTop
->m_codeBlock
->globalObject()->assertRegisterIsInThisObject(currentInstruction
[1].u
.registerPointer
)),
2737 NEXT_OPCODE(op_init_global_const
);
2740 // === Block terminators. ===
2743 int relativeOffset
= currentInstruction
[1].u
.operand
;
2744 if (relativeOffset
<= 0)
2746 addToGraph(Jump
, OpInfo(m_currentIndex
+ relativeOffset
));
2747 LAST_OPCODE(op_jmp
);
2751 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
2752 Node
* condition
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2753 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jtrue
))), condition
);
2754 LAST_OPCODE(op_jtrue
);
2758 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
2759 Node
* condition
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2760 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jfalse
), m_currentIndex
+ relativeOffset
)), condition
);
2761 LAST_OPCODE(op_jfalse
);
2765 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
2766 Node
* value
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2767 Node
* condition
= addToGraph(CompareEqConstant
, value
, constantNull());
2768 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jeq_null
))), condition
);
2769 LAST_OPCODE(op_jeq_null
);
2772 case op_jneq_null
: {
2773 unsigned relativeOffset
= currentInstruction
[2].u
.operand
;
2774 Node
* value
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2775 Node
* condition
= addToGraph(CompareEqConstant
, value
, constantNull());
2776 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jneq_null
), m_currentIndex
+ relativeOffset
)), condition
);
2777 LAST_OPCODE(op_jneq_null
);
2781 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2782 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2783 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2784 Node
* condition
= addToGraph(CompareLess
, op1
, op2
);
2785 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jless
))), condition
);
2786 LAST_OPCODE(op_jless
);
2790 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2791 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2792 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2793 Node
* condition
= addToGraph(CompareLessEq
, op1
, op2
);
2794 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jlesseq
))), condition
);
2795 LAST_OPCODE(op_jlesseq
);
2799 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2800 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2801 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2802 Node
* condition
= addToGraph(CompareGreater
, op1
, op2
);
2803 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jgreater
))), condition
);
2804 LAST_OPCODE(op_jgreater
);
2807 case op_jgreatereq
: {
2808 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2809 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2810 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2811 Node
* condition
= addToGraph(CompareGreaterEq
, op1
, op2
);
2812 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ relativeOffset
, m_currentIndex
+ OPCODE_LENGTH(op_jgreatereq
))), condition
);
2813 LAST_OPCODE(op_jgreatereq
);
2817 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2818 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2819 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2820 Node
* condition
= addToGraph(CompareLess
, op1
, op2
);
2821 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jnless
), m_currentIndex
+ relativeOffset
)), condition
);
2822 LAST_OPCODE(op_jnless
);
2826 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2827 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2828 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2829 Node
* condition
= addToGraph(CompareLessEq
, op1
, op2
);
2830 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jnlesseq
), m_currentIndex
+ relativeOffset
)), condition
);
2831 LAST_OPCODE(op_jnlesseq
);
2834 case op_jngreater
: {
2835 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2836 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2837 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2838 Node
* condition
= addToGraph(CompareGreater
, op1
, op2
);
2839 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jngreater
), m_currentIndex
+ relativeOffset
)), condition
);
2840 LAST_OPCODE(op_jngreater
);
2843 case op_jngreatereq
: {
2844 unsigned relativeOffset
= currentInstruction
[3].u
.operand
;
2845 Node
* op1
= get(VirtualRegister(currentInstruction
[1].u
.operand
));
2846 Node
* op2
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
2847 Node
* condition
= addToGraph(CompareGreaterEq
, op1
, op2
);
2848 addToGraph(Branch
, OpInfo(branchData(m_currentIndex
+ OPCODE_LENGTH(op_jngreatereq
), m_currentIndex
+ relativeOffset
)), condition
);
2849 LAST_OPCODE(op_jngreatereq
);
2852 case op_switch_imm
: {
2853 SwitchData
& data
= *m_graph
.m_switchData
.add();
2854 data
.kind
= SwitchImm
;
2855 data
.switchTableIndex
= m_inlineStackTop
->m_switchRemap
[currentInstruction
[1].u
.operand
];
2856 data
.fallThrough
.setBytecodeIndex(m_currentIndex
+ currentInstruction
[2].u
.operand
);
2857 SimpleJumpTable
& table
= m_codeBlock
->switchJumpTable(data
.switchTableIndex
);
2858 for (unsigned i
= 0; i
< table
.branchOffsets
.size(); ++i
) {
2859 if (!table
.branchOffsets
[i
])
2861 unsigned target
= m_currentIndex
+ table
.branchOffsets
[i
];
2862 if (target
== data
.fallThrough
.bytecodeIndex())
2864 data
.cases
.append(SwitchCase::withBytecodeIndex(jsNumber(static_cast<int32_t>(table
.min
+ i
)), target
));
2866 flushIfTerminal(data
);
2867 addToGraph(Switch
, OpInfo(&data
), get(VirtualRegister(currentInstruction
[3].u
.operand
)));
2868 LAST_OPCODE(op_switch_imm
);
2871 case op_switch_char
: {
2872 SwitchData
& data
= *m_graph
.m_switchData
.add();
2873 data
.kind
= SwitchChar
;
2874 data
.switchTableIndex
= m_inlineStackTop
->m_switchRemap
[currentInstruction
[1].u
.operand
];
2875 data
.fallThrough
.setBytecodeIndex(m_currentIndex
+ currentInstruction
[2].u
.operand
);
2876 SimpleJumpTable
& table
= m_codeBlock
->switchJumpTable(data
.switchTableIndex
);
2877 for (unsigned i
= 0; i
< table
.branchOffsets
.size(); ++i
) {
2878 if (!table
.branchOffsets
[i
])
2880 unsigned target
= m_currentIndex
+ table
.branchOffsets
[i
];
2881 if (target
== data
.fallThrough
.bytecodeIndex())
2884 SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table
.min
+ i
), target
));
2886 flushIfTerminal(data
);
2887 addToGraph(Switch
, OpInfo(&data
), get(VirtualRegister(currentInstruction
[3].u
.operand
)));
2888 LAST_OPCODE(op_switch_char
);
2891 case op_switch_string
: {
2892 SwitchData
& data
= *m_graph
.m_switchData
.add();
2893 data
.kind
= SwitchString
;
2894 data
.switchTableIndex
= currentInstruction
[1].u
.operand
;
2895 data
.fallThrough
.setBytecodeIndex(m_currentIndex
+ currentInstruction
[2].u
.operand
);
2896 StringJumpTable
& table
= m_codeBlock
->stringSwitchJumpTable(data
.switchTableIndex
);
2897 StringJumpTable::StringOffsetTable::iterator iter
;
2898 StringJumpTable::StringOffsetTable::iterator end
= table
.offsetTable
.end();
2899 for (iter
= table
.offsetTable
.begin(); iter
!= end
; ++iter
) {
2900 unsigned target
= m_currentIndex
+ iter
->value
.branchOffset
;
2901 if (target
== data
.fallThrough
.bytecodeIndex())
2904 SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter
->key
.get()), target
));
2906 flushIfTerminal(data
);
2907 addToGraph(Switch
, OpInfo(&data
), get(VirtualRegister(currentInstruction
[3].u
.operand
)));
2908 LAST_OPCODE(op_switch_string
);
2913 if (inlineCallFrame()) {
2914 ASSERT(m_inlineStackTop
->m_returnValue
.isValid());
2915 setDirect(m_inlineStackTop
->m_returnValue
, get(VirtualRegister(currentInstruction
[1].u
.operand
)), ImmediateSetWithFlush
);
2916 m_inlineStackTop
->m_didReturn
= true;
2917 if (m_inlineStackTop
->m_unlinkedBlocks
.isEmpty()) {
2918 // If we're returning from the first block, then we're done parsing.
2919 ASSERT(m_inlineStackTop
->m_callsiteBlockHead
== m_graph
.lastBlock());
2920 shouldContinueParsing
= false;
2921 LAST_OPCODE(op_ret
);
2923 // If inlining created blocks, and we're doing a return, then we need some
2925 ASSERT(m_inlineStackTop
->m_unlinkedBlocks
.last().m_block
== m_graph
.lastBlock());
2926 m_inlineStackTop
->m_unlinkedBlocks
.last().m_needsNormalLinking
= false;
2928 if (m_currentIndex
+ OPCODE_LENGTH(op_ret
) != m_inlineStackTop
->m_codeBlock
->instructions().size() || m_inlineStackTop
->m_didEarlyReturn
) {
2929 ASSERT(m_currentIndex
+ OPCODE_LENGTH(op_ret
) <= m_inlineStackTop
->m_codeBlock
->instructions().size());
2930 addToGraph(Jump
, OpInfo(0));
2931 m_inlineStackTop
->m_unlinkedBlocks
.last().m_needsEarlyReturnLinking
= true;
2932 m_inlineStackTop
->m_didEarlyReturn
= true;
2934 LAST_OPCODE(op_ret
);
2936 addToGraph(Return
, get(VirtualRegister(currentInstruction
[1].u
.operand
)));
2937 LAST_OPCODE(op_ret
);
2941 ASSERT(!inlineCallFrame());
2942 addToGraph(Return
, get(VirtualRegister(currentInstruction
[1].u
.operand
)));
2943 LAST_OPCODE(op_end
);
2946 addToGraph(Throw
, get(VirtualRegister(currentInstruction
[1].u
.operand
)));
2948 addToGraph(Unreachable
);
2949 LAST_OPCODE(op_throw
);
2951 case op_throw_static_error
:
2952 addToGraph(ThrowReferenceError
);
2954 addToGraph(Unreachable
);
2955 LAST_OPCODE(op_throw_static_error
);
2958 handleCall(currentInstruction
, Call
, CodeForCall
);
2959 NEXT_OPCODE(op_call
);
2962 handleCall(currentInstruction
, Construct
, CodeForConstruct
);
2963 NEXT_OPCODE(op_construct
);
2965 case op_call_varargs
: {
2966 int result
= currentInstruction
[1].u
.operand
;
2967 int callee
= currentInstruction
[2].u
.operand
;
2968 int thisReg
= currentInstruction
[3].u
.operand
;
2969 int arguments
= currentInstruction
[4].u
.operand
;
2970 int firstFreeReg
= currentInstruction
[5].u
.operand
;
2972 ASSERT(inlineCallFrame());
2973 ASSERT_UNUSED(arguments
, arguments
== m_inlineStackTop
->m_codeBlock
->argumentsRegister().offset());
2974 ASSERT(!m_inlineStackTop
->m_codeBlock
->symbolTable()->slowArguments());
2976 addToGraph(CheckArgumentsNotCreated
);
2978 unsigned argCount
= inlineCallFrame()->arguments
.size();
2980 // Let's compute the register offset. We start with the last used register, and
2981 // then adjust for the things we want in the call frame.
2982 int registerOffset
= firstFreeReg
+ 1;
2983 registerOffset
-= argCount
; // We will be passing some arguments.
2984 registerOffset
-= JSStack::CallFrameHeaderSize
; // We will pretend to have a call frame header.
2986 // Get the alignment right.
2987 registerOffset
= -WTF::roundUpToMultipleOf(
2988 stackAlignmentRegisters(),
2992 m_inlineStackTop
->remapOperand(
2993 VirtualRegister(registerOffset
)).toLocal());
2995 // The bytecode wouldn't have set up the arguments. But we'll do it and make it
2996 // look like the bytecode had done it.
2997 int nextRegister
= registerOffset
+ JSStack::CallFrameHeaderSize
;
2998 set(VirtualRegister(nextRegister
++), get(VirtualRegister(thisReg
)), ImmediateNakedSet
);
2999 for (unsigned argument
= 1; argument
< argCount
; ++argument
)
3000 set(VirtualRegister(nextRegister
++), get(virtualRegisterForArgument(argument
)), ImmediateNakedSet
);
3003 result
, Call
, CodeForCall
, OPCODE_LENGTH(op_call_varargs
),
3004 callee
, argCount
, registerOffset
);
3005 NEXT_OPCODE(op_call_varargs
);
3009 // Statically speculate for now. It makes sense to let speculate-only jneq_ptr
3010 // support simmer for a while before making it more general, since it's
3011 // already gnarly enough as it is.
3012 ASSERT(pointerIsFunction(currentInstruction
[2].u
.specialPointer
));
3015 OpInfo(actualPointerFor(m_inlineStackTop
->m_codeBlock
, currentInstruction
[2].u
.specialPointer
)),
3016 get(VirtualRegister(currentInstruction
[1].u
.operand
)));
3017 addToGraph(Jump
, OpInfo(m_currentIndex
+ OPCODE_LENGTH(op_jneq_ptr
)));
3018 LAST_OPCODE(op_jneq_ptr
);
3020 case op_resolve_scope
: {
3021 int dst
= currentInstruction
[1].u
.operand
;
3022 ResolveType resolveType
= static_cast<ResolveType
>(currentInstruction
[3].u
.operand
);
3023 unsigned depth
= currentInstruction
[4].u
.operand
;
3025 // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints.
3026 if (needsVarInjectionChecks(resolveType
))
3027 addToGraph(VarInjectionWatchpoint
);
3029 switch (resolveType
) {
3030 case GlobalProperty
:
3032 case GlobalPropertyWithVarInjectionChecks
:
3033 case GlobalVarWithVarInjectionChecks
:
3034 set(VirtualRegister(dst
), cellConstant(m_inlineStackTop
->m_codeBlock
->globalObject()));
3037 case ClosureVarWithVarInjectionChecks
: {
3038 JSActivation
* activation
= currentInstruction
[5].u
.activation
.get();
3040 && activation
->symbolTable()->m_functionEnteredOnce
.isStillValid()) {
3041 addToGraph(FunctionReentryWatchpoint
, OpInfo(activation
->symbolTable()));
3042 set(VirtualRegister(dst
), cellConstant(activation
));
3045 set(VirtualRegister(dst
),
3046 getScope(m_inlineStackTop
->m_codeBlock
->needsActivation(), depth
));
3050 RELEASE_ASSERT_NOT_REACHED();
3053 NEXT_OPCODE(op_resolve_scope
);
3056 case op_get_from_scope
: {
3057 int dst
= currentInstruction
[1].u
.operand
;
3058 int scope
= currentInstruction
[2].u
.operand
;
3059 unsigned identifierNumber
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[3].u
.operand
];
3060 StringImpl
* uid
= m_graph
.identifiers()[identifierNumber
];
3061 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
3063 Structure
* structure
= 0;
3064 WatchpointSet
* watchpoints
= 0;
3067 ConcurrentJITLocker
locker(m_inlineStackTop
->m_profiledBlock
->m_lock
);
3068 if (resolveType
== GlobalVar
|| resolveType
== GlobalVarWithVarInjectionChecks
)
3069 watchpoints
= currentInstruction
[5].u
.watchpointSet
;
3071 structure
= currentInstruction
[5].u
.structure
.get();
3072 operand
= reinterpret_cast<uintptr_t>(currentInstruction
[6].u
.pointer
);
3075 UNUSED_PARAM(watchpoints
); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode.
3077 SpeculatedType prediction
= getPrediction();
3078 JSGlobalObject
* globalObject
= m_inlineStackTop
->m_codeBlock
->globalObject();
3080 switch (resolveType
) {
3081 case GlobalProperty
:
3082 case GlobalPropertyWithVarInjectionChecks
: {
3083 GetByIdStatus status
= GetByIdStatus::computeFor(*m_vm
, structure
, uid
);
3084 if (status
.state() != GetByIdStatus::Simple
|| status
.numVariants() != 1) {
3085 set(VirtualRegister(dst
), addToGraph(GetByIdFlush
, OpInfo(identifierNumber
), OpInfo(prediction
), get(VirtualRegister(scope
))));
3088 Node
* base
= cellConstantWithStructureCheck(globalObject
, status
[0].structureSet().singletonStructure());
3089 addToGraph(Phantom
, get(VirtualRegister(scope
)));
3090 if (JSValue specificValue
= status
[0].specificValue())
3091 set(VirtualRegister(dst
), cellConstant(specificValue
.asCell()));
3093 set(VirtualRegister(dst
), handleGetByOffset(prediction
, base
, identifierNumber
, operand
));
3097 case GlobalVarWithVarInjectionChecks
: {
3098 addToGraph(Phantom
, get(VirtualRegister(scope
)));
3099 SymbolTableEntry entry
= globalObject
->symbolTable()->get(uid
);
3100 VariableWatchpointSet
* watchpointSet
= entry
.watchpointSet();
3101 JSValue specificValue
=
3102 watchpointSet
? watchpointSet
->inferredValue() : JSValue();
3103 if (!specificValue
) {
3104 set(VirtualRegister(dst
), addToGraph(GetGlobalVar
, OpInfo(operand
), OpInfo(prediction
)));
3108 addToGraph(VariableWatchpoint
, OpInfo(watchpointSet
));
3109 set(VirtualRegister(dst
), inferredConstant(specificValue
));
3113 case ClosureVarWithVarInjectionChecks
: {
3114 Node
* scopeNode
= get(VirtualRegister(scope
));
3115 if (JSActivation
* activation
= m_graph
.tryGetActivation(scopeNode
)) {
3116 SymbolTable
* symbolTable
= activation
->symbolTable();
3117 ConcurrentJITLocker
locker(symbolTable
->m_lock
);
3118 SymbolTable::Map::iterator iter
= symbolTable
->find(locker
, uid
);
3119 ASSERT(iter
!= symbolTable
->end(locker
));
3120 VariableWatchpointSet
* watchpointSet
= iter
->value
.watchpointSet();
3121 if (watchpointSet
) {
3122 if (JSValue value
= watchpointSet
->inferredValue()) {
3123 addToGraph(Phantom
, scopeNode
);
3124 addToGraph(VariableWatchpoint
, OpInfo(watchpointSet
));
3125 set(VirtualRegister(dst
), inferredConstant(value
));
3130 set(VirtualRegister(dst
),
3131 addToGraph(GetClosureVar
, OpInfo(operand
), OpInfo(prediction
),
3132 addToGraph(GetClosureRegisters
, scopeNode
)));
3136 RELEASE_ASSERT_NOT_REACHED();
3139 NEXT_OPCODE(op_get_from_scope
);
3142 case op_put_to_scope
: {
3143 unsigned scope
= currentInstruction
[1].u
.operand
;
3144 unsigned identifierNumber
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[2].u
.operand
];
3145 unsigned value
= currentInstruction
[3].u
.operand
;
3146 ResolveType resolveType
= ResolveModeAndType(currentInstruction
[4].u
.operand
).type();
3147 StringImpl
* uid
= m_graph
.identifiers()[identifierNumber
];
3149 Structure
* structure
= 0;
3150 VariableWatchpointSet
* watchpoints
= 0;
3153 ConcurrentJITLocker
locker(m_inlineStackTop
->m_profiledBlock
->m_lock
);
3154 if (resolveType
== GlobalVar
|| resolveType
== GlobalVarWithVarInjectionChecks
)
3155 watchpoints
= currentInstruction
[5].u
.watchpointSet
;
3157 structure
= currentInstruction
[5].u
.structure
.get();
3158 operand
= reinterpret_cast<uintptr_t>(currentInstruction
[6].u
.pointer
);
3161 JSGlobalObject
* globalObject
= m_inlineStackTop
->m_codeBlock
->globalObject();
3163 switch (resolveType
) {
3164 case GlobalProperty
:
3165 case GlobalPropertyWithVarInjectionChecks
: {
3166 PutByIdStatus status
= PutByIdStatus::computeFor(*m_vm
, globalObject
, structure
, uid
, false);
3167 if (status
.numVariants() != 1 || status
[0].kind() != PutByIdVariant::Replace
) {
3168 addToGraph(PutById
, OpInfo(identifierNumber
), get(VirtualRegister(scope
)), get(VirtualRegister(value
)));
3171 Node
* base
= cellConstantWithStructureCheck(globalObject
, status
[0].structure());
3172 addToGraph(Phantom
, get(VirtualRegister(scope
)));
3173 handlePutByOffset(base
, identifierNumber
, static_cast<PropertyOffset
>(operand
), get(VirtualRegister(value
)));
3174 // Keep scope alive until after put.
3175 addToGraph(Phantom
, get(VirtualRegister(scope
)));
3179 case GlobalVarWithVarInjectionChecks
: {
3180 SymbolTableEntry entry
= globalObject
->symbolTable()->get(uid
);
3181 ASSERT(watchpoints
== entry
.watchpointSet());
3182 Node
* valueNode
= get(VirtualRegister(value
));
3183 addToGraph(PutGlobalVar
, OpInfo(operand
), valueNode
);
3184 if (watchpoints
->state() != IsInvalidated
)
3185 addToGraph(NotifyWrite
, OpInfo(watchpoints
), valueNode
);
3186 // Keep scope alive until after put.
3187 addToGraph(Phantom
, get(VirtualRegister(scope
)));
3191 case ClosureVarWithVarInjectionChecks
: {
3192 Node
* scopeNode
= get(VirtualRegister(scope
));
3193 Node
* scopeRegisters
= addToGraph(GetClosureRegisters
, scopeNode
);
3194 addToGraph(PutClosureVar
, OpInfo(operand
), scopeNode
, scopeRegisters
, get(VirtualRegister(value
)));
3198 RELEASE_ASSERT_NOT_REACHED();
3201 NEXT_OPCODE(op_put_to_scope
);
3204 case op_loop_hint
: {
3205 // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG
3206 // OSR can only happen at basic block boundaries. Assert that these two statements
3208 RELEASE_ASSERT(m_currentIndex
== blockBegin
);
3210 // We never do OSR into an inlined code block. That could not happen, since OSR
3211 // looks up the code block that is the replacement for the baseline JIT code
3212 // block. Hence, machine code block = true code block = not inline code block.
3213 if (!m_inlineStackTop
->m_caller
)
3214 m_currentBlock
->isOSRTarget
= true;
3216 addToGraph(LoopHint
);
3218 if (m_vm
->watchdog
&& m_vm
->watchdog
->isEnabled())
3219 addToGraph(CheckWatchdogTimer
);
3221 NEXT_OPCODE(op_loop_hint
);
3224 case op_init_lazy_reg
: {
3225 set(VirtualRegister(currentInstruction
[1].u
.operand
), getJSConstantForValue(JSValue()));
3226 ASSERT(operandIsLocal(currentInstruction
[1].u
.operand
));
3227 m_graph
.m_lazyVars
.set(VirtualRegister(currentInstruction
[1].u
.operand
).toLocal());
3228 NEXT_OPCODE(op_init_lazy_reg
);
3231 case op_create_activation
: {
3232 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(CreateActivation
, get(VirtualRegister(currentInstruction
[1].u
.operand
))));
3233 NEXT_OPCODE(op_create_activation
);
3236 case op_create_arguments
: {
3237 m_graph
.m_hasArguments
= true;
3238 Node
* createArguments
= addToGraph(CreateArguments
, get(VirtualRegister(currentInstruction
[1].u
.operand
)));
3239 set(VirtualRegister(currentInstruction
[1].u
.operand
), createArguments
);
3240 set(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction
[1].u
.operand
)), createArguments
);
3241 NEXT_OPCODE(op_create_arguments
);
3244 case op_tear_off_activation
: {
3245 addToGraph(TearOffActivation
, get(VirtualRegister(currentInstruction
[1].u
.operand
)));
3246 NEXT_OPCODE(op_tear_off_activation
);
3249 case op_tear_off_arguments
: {
3250 m_graph
.m_hasArguments
= true;
3251 addToGraph(TearOffArguments
, get(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction
[1].u
.operand
))), get(VirtualRegister(currentInstruction
[2].u
.operand
)));
3252 NEXT_OPCODE(op_tear_off_arguments
);
3255 case op_get_arguments_length
: {
3256 m_graph
.m_hasArguments
= true;
3257 set(VirtualRegister(currentInstruction
[1].u
.operand
), addToGraph(GetMyArgumentsLengthSafe
));
3258 NEXT_OPCODE(op_get_arguments_length
);
3261 case op_get_argument_by_val
: {
3262 m_graph
.m_hasArguments
= true;
3263 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3265 GetMyArgumentByValSafe
, OpInfo(0), OpInfo(getPrediction()),
3266 get(VirtualRegister(currentInstruction
[3].u
.operand
))));
3267 NEXT_OPCODE(op_get_argument_by_val
);
3271 if (!currentInstruction
[3].u
.operand
) {
3272 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3273 addToGraph(NewFunctionNoCheck
, OpInfo(currentInstruction
[2].u
.operand
)));
3275 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3278 OpInfo(currentInstruction
[2].u
.operand
),
3279 get(VirtualRegister(currentInstruction
[1].u
.operand
))));
3281 NEXT_OPCODE(op_new_func
);
3284 case op_new_captured_func
: {
3285 Node
* function
= addToGraph(
3286 NewFunctionNoCheck
, OpInfo(currentInstruction
[2].u
.operand
));
3287 if (VariableWatchpointSet
* set
= currentInstruction
[3].u
.watchpointSet
)
3288 addToGraph(NotifyWrite
, OpInfo(set
), function
);
3289 set(VirtualRegister(currentInstruction
[1].u
.operand
), function
);
3290 NEXT_OPCODE(op_new_captured_func
);
3293 case op_new_func_exp
: {
3294 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3295 addToGraph(NewFunctionExpression
, OpInfo(currentInstruction
[2].u
.operand
)));
3296 NEXT_OPCODE(op_new_func_exp
);
3300 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3301 addToGraph(TypeOf
, get(VirtualRegister(currentInstruction
[2].u
.operand
))));
3302 NEXT_OPCODE(op_typeof
);
3305 case op_to_number
: {
3306 Node
* node
= get(VirtualRegister(currentInstruction
[2].u
.operand
));
3307 addToGraph(Phantom
, Edge(node
, NumberUse
));
3308 set(VirtualRegister(currentInstruction
[1].u
.operand
), node
);
3309 NEXT_OPCODE(op_to_number
);
3313 set(VirtualRegister(currentInstruction
[1].u
.operand
),
3314 addToGraph(In
, get(VirtualRegister(currentInstruction
[2].u
.operand
)), get(VirtualRegister(currentInstruction
[3].u
.operand
))));
3319 // Parse failed! This should not happen because the capabilities checker
3320 // should have caught it.
3321 RELEASE_ASSERT_NOT_REACHED();
3327 void ByteCodeParser::linkBlock(BasicBlock
* block
, Vector
<BasicBlock
*>& possibleTargets
)
3329 ASSERT(!block
->isLinked
);
3330 ASSERT(!block
->isEmpty());
3331 Node
* node
= block
->last();
3332 ASSERT(node
->isTerminal());
3334 switch (node
->op()) {
3336 node
->targetBlock() = blockForBytecodeOffset(possibleTargets
, node
->targetBytecodeOffsetDuringParsing());
3340 BranchData
* data
= node
->branchData();
3341 data
->taken
.block
= blockForBytecodeOffset(possibleTargets
, data
->takenBytecodeIndex());
3342 data
->notTaken
.block
= blockForBytecodeOffset(possibleTargets
, data
->notTakenBytecodeIndex());
3347 SwitchData
* data
= node
->switchData();
3348 for (unsigned i
= node
->switchData()->cases
.size(); i
--;)
3349 data
->cases
[i
].target
.block
= blockForBytecodeOffset(possibleTargets
, data
->cases
[i
].target
.bytecodeIndex());
3350 data
->fallThrough
.block
= blockForBytecodeOffset(possibleTargets
, data
->fallThrough
.bytecodeIndex());
3358 #if !ASSERT_DISABLED
3359 block
->isLinked
= true;
3363 void ByteCodeParser::linkBlocks(Vector
<UnlinkedBlock
>& unlinkedBlocks
, Vector
<BasicBlock
*>& possibleTargets
)
3365 for (size_t i
= 0; i
< unlinkedBlocks
.size(); ++i
) {
3366 if (unlinkedBlocks
[i
].m_needsNormalLinking
) {
3367 linkBlock(unlinkedBlocks
[i
].m_block
, possibleTargets
);
3368 unlinkedBlocks
[i
].m_needsNormalLinking
= false;
3373 void ByteCodeParser::buildOperandMapsIfNecessary()
3375 if (m_haveBuiltOperandMaps
)
3378 for (size_t i
= 0; i
< m_codeBlock
->numberOfIdentifiers(); ++i
)
3379 m_identifierMap
.add(m_codeBlock
->identifier(i
).impl(), i
);
3380 for (size_t i
= 0; i
< m_codeBlock
->numberOfConstantRegisters(); ++i
) {
3381 JSValue value
= m_codeBlock
->getConstant(i
+ FirstConstantRegisterIndex
);
3383 m_emptyJSValueIndex
= i
+ FirstConstantRegisterIndex
;
3385 m_jsValueMap
.add(JSValue::encode(value
), i
+ FirstConstantRegisterIndex
);
3388 m_haveBuiltOperandMaps
= true;
3391 ByteCodeParser::InlineStackEntry::InlineStackEntry(
3392 ByteCodeParser
* byteCodeParser
,
3393 CodeBlock
* codeBlock
,
3394 CodeBlock
* profiledBlock
,
3395 BasicBlock
* callsiteBlockHead
,
3396 JSFunction
* callee
, // Null if this is a closure call.
3397 VirtualRegister returnValueVR
,
3398 VirtualRegister inlineCallFrameStart
,
3399 int argumentCountIncludingThis
,
3400 CodeSpecializationKind kind
)
3401 : m_byteCodeParser(byteCodeParser
)
3402 , m_codeBlock(codeBlock
)
3403 , m_profiledBlock(profiledBlock
)
3404 , m_callsiteBlockHead(callsiteBlockHead
)
3405 , m_returnValue(returnValueVR
)
3406 , m_didReturn(false)
3407 , m_didEarlyReturn(false)
3408 , m_caller(byteCodeParser
->m_inlineStackTop
)
3411 ConcurrentJITLocker
locker(m_profiledBlock
->m_lock
);
3412 m_lazyOperands
.initialize(locker
, m_profiledBlock
->lazyOperandValueProfiles());
3413 m_exitProfile
.initialize(locker
, profiledBlock
->exitProfile());
3415 // We do this while holding the lock because we want to encourage StructureStubInfo's
3416 // to be potentially added to operations and because the profiled block could be in the
3417 // middle of LLInt->JIT tier-up in which case we would be adding the info's right now.
3418 if (m_profiledBlock
->hasBaselineJITProfiling()) {
3419 m_profiledBlock
->getStubInfoMap(locker
, m_stubInfos
);
3420 m_profiledBlock
->getCallLinkInfoMap(locker
, m_callLinkInfos
);
3424 m_argumentPositions
.resize(argumentCountIncludingThis
);
3425 for (int i
= 0; i
< argumentCountIncludingThis
; ++i
) {
3426 byteCodeParser
->m_graph
.m_argumentPositions
.append(ArgumentPosition());
3427 ArgumentPosition
* argumentPosition
= &byteCodeParser
->m_graph
.m_argumentPositions
.last();
3428 m_argumentPositions
[i
] = argumentPosition
;
3431 // Track the code-block-global exit sites.
3432 if (m_exitProfile
.hasExitSite(ArgumentsEscaped
)) {
3433 byteCodeParser
->m_graph
.m_executablesWhoseArgumentsEscaped
.add(
3434 codeBlock
->ownerExecutable());
3439 ASSERT(codeBlock
!= byteCodeParser
->m_codeBlock
);
3440 ASSERT(inlineCallFrameStart
.isValid());
3441 ASSERT(callsiteBlockHead
);
3443 m_inlineCallFrame
= byteCodeParser
->m_graph
.m_plan
.inlineCallFrames
->add();
3444 initializeLazyWriteBarrierForInlineCallFrameExecutable(
3445 byteCodeParser
->m_graph
.m_plan
.writeBarriers
,
3446 m_inlineCallFrame
->executable
,
3447 byteCodeParser
->m_codeBlock
,
3449 byteCodeParser
->m_codeBlock
->ownerExecutable(),
3450 codeBlock
->ownerExecutable());
3451 m_inlineCallFrame
->stackOffset
= inlineCallFrameStart
.offset() - JSStack::CallFrameHeaderSize
;
3453 m_inlineCallFrame
->calleeRecovery
= ValueRecovery::constant(callee
);
3454 m_inlineCallFrame
->isClosureCall
= false;
3456 m_inlineCallFrame
->isClosureCall
= true;
3457 m_inlineCallFrame
->caller
= byteCodeParser
->currentCodeOrigin();
3458 m_inlineCallFrame
->arguments
.resize(argumentCountIncludingThis
); // Set the number of arguments including this, but don't configure the value recoveries, yet.
3459 m_inlineCallFrame
->isCall
= isCall(kind
);
3461 if (m_inlineCallFrame
->caller
.inlineCallFrame
)
3462 m_inlineCallFrame
->capturedVars
= m_inlineCallFrame
->caller
.inlineCallFrame
->capturedVars
;
3464 for (int i
= byteCodeParser
->m_codeBlock
->m_numVars
; i
--;) {
3465 if (byteCodeParser
->m_codeBlock
->isCaptured(virtualRegisterForLocal(i
)))
3466 m_inlineCallFrame
->capturedVars
.set(i
);
3470 for (int i
= argumentCountIncludingThis
; i
--;) {
3471 VirtualRegister argument
= virtualRegisterForArgument(i
);
3472 if (codeBlock
->isCaptured(argument
))
3473 m_inlineCallFrame
->capturedVars
.set(VirtualRegister(argument
.offset() + m_inlineCallFrame
->stackOffset
).toLocal());
3475 for (size_t i
= codeBlock
->m_numVars
; i
--;) {
3476 VirtualRegister local
= virtualRegisterForLocal(i
);
3477 if (codeBlock
->isCaptured(local
))
3478 m_inlineCallFrame
->capturedVars
.set(VirtualRegister(local
.offset() + m_inlineCallFrame
->stackOffset
).toLocal());
3481 byteCodeParser
->buildOperandMapsIfNecessary();
3483 m_identifierRemap
.resize(codeBlock
->numberOfIdentifiers());
3484 m_constantRemap
.resize(codeBlock
->numberOfConstantRegisters());
3485 m_constantBufferRemap
.resize(codeBlock
->numberOfConstantBuffers());
3486 m_switchRemap
.resize(codeBlock
->numberOfSwitchJumpTables());
3488 for (size_t i
= 0; i
< codeBlock
->numberOfIdentifiers(); ++i
) {
3489 StringImpl
* rep
= codeBlock
->identifier(i
).impl();
3490 BorrowedIdentifierMap::AddResult result
= byteCodeParser
->m_identifierMap
.add(rep
, byteCodeParser
->m_graph
.identifiers().numberOfIdentifiers());
3491 if (result
.isNewEntry
)
3492 byteCodeParser
->m_graph
.identifiers().addLazily(rep
);
3493 m_identifierRemap
[i
] = result
.iterator
->value
;
3495 for (size_t i
= 0; i
< codeBlock
->numberOfConstantRegisters(); ++i
) {
3496 JSValue value
= codeBlock
->getConstant(i
+ FirstConstantRegisterIndex
);
3498 if (byteCodeParser
->m_emptyJSValueIndex
== UINT_MAX
) {
3499 byteCodeParser
->m_emptyJSValueIndex
= byteCodeParser
->m_codeBlock
->numberOfConstantRegisters() + FirstConstantRegisterIndex
;
3500 byteCodeParser
->addConstant(JSValue());
3501 byteCodeParser
->m_constants
.append(ConstantRecord());
3503 m_constantRemap
[i
] = byteCodeParser
->m_emptyJSValueIndex
;
3506 JSValueMap::AddResult result
= byteCodeParser
->m_jsValueMap
.add(JSValue::encode(value
), byteCodeParser
->m_codeBlock
->numberOfConstantRegisters() + FirstConstantRegisterIndex
);
3507 if (result
.isNewEntry
) {
3508 byteCodeParser
->addConstant(value
);
3509 byteCodeParser
->m_constants
.append(ConstantRecord());
3511 m_constantRemap
[i
] = result
.iterator
->value
;
3513 for (unsigned i
= 0; i
< codeBlock
->numberOfConstantBuffers(); ++i
) {
3514 // If we inline the same code block multiple times, we don't want to needlessly
3515 // duplicate its constant buffers.
3516 HashMap
<ConstantBufferKey
, unsigned>::iterator iter
=
3517 byteCodeParser
->m_constantBufferCache
.find(ConstantBufferKey(codeBlock
, i
));
3518 if (iter
!= byteCodeParser
->m_constantBufferCache
.end()) {
3519 m_constantBufferRemap
[i
] = iter
->value
;
3522 Vector
<JSValue
>& buffer
= codeBlock
->constantBufferAsVector(i
);
3523 unsigned newIndex
= byteCodeParser
->m_codeBlock
->addConstantBuffer(buffer
);
3524 m_constantBufferRemap
[i
] = newIndex
;
3525 byteCodeParser
->m_constantBufferCache
.add(ConstantBufferKey(codeBlock
, i
), newIndex
);
3527 for (unsigned i
= 0; i
< codeBlock
->numberOfSwitchJumpTables(); ++i
) {
3528 m_switchRemap
[i
] = byteCodeParser
->m_codeBlock
->numberOfSwitchJumpTables();
3529 byteCodeParser
->m_codeBlock
->addSwitchJumpTable() = codeBlock
->switchJumpTable(i
);
3531 m_callsiteBlockHeadNeedsLinking
= true;
3533 // Machine code block case.
3534 ASSERT(codeBlock
== byteCodeParser
->m_codeBlock
);
3536 ASSERT(!returnValueVR
.isValid());
3537 ASSERT(!inlineCallFrameStart
.isValid());
3538 ASSERT(!callsiteBlockHead
);
3540 m_inlineCallFrame
= 0;
3542 m_identifierRemap
.resize(codeBlock
->numberOfIdentifiers());
3543 m_constantRemap
.resize(codeBlock
->numberOfConstantRegisters());
3544 m_constantBufferRemap
.resize(codeBlock
->numberOfConstantBuffers());
3545 m_switchRemap
.resize(codeBlock
->numberOfSwitchJumpTables());
3546 for (size_t i
= 0; i
< codeBlock
->numberOfIdentifiers(); ++i
)
3547 m_identifierRemap
[i
] = i
;
3548 for (size_t i
= 0; i
< codeBlock
->numberOfConstantRegisters(); ++i
)
3549 m_constantRemap
[i
] = i
+ FirstConstantRegisterIndex
;
3550 for (size_t i
= 0; i
< codeBlock
->numberOfConstantBuffers(); ++i
)
3551 m_constantBufferRemap
[i
] = i
;
3552 for (size_t i
= 0; i
< codeBlock
->numberOfSwitchJumpTables(); ++i
)
3553 m_switchRemap
[i
] = i
;
3554 m_callsiteBlockHeadNeedsLinking
= false;
3557 for (size_t i
= 0; i
< m_constantRemap
.size(); ++i
)
3558 ASSERT(m_constantRemap
[i
] >= static_cast<unsigned>(FirstConstantRegisterIndex
));
3560 byteCodeParser
->m_inlineStackTop
= this;
3563 void ByteCodeParser::parseCodeBlock()
3565 CodeBlock
* codeBlock
= m_inlineStackTop
->m_codeBlock
;
3567 if (m_graph
.compilation()) {
3568 m_graph
.compilation()->addProfiledBytecodes(
3569 *m_vm
->m_perBytecodeProfiler
, m_inlineStackTop
->m_profiledBlock
);
3572 bool shouldDumpBytecode
= Options::dumpBytecodeAtDFGTime();
3573 if (shouldDumpBytecode
) {
3574 dataLog("Parsing ", *codeBlock
);
3575 if (inlineCallFrame()) {
3577 " for inlining at ", CodeBlockWithJITType(m_codeBlock
, JITCode::DFGJIT
),
3578 " ", inlineCallFrame()->caller
);
3581 ": captureCount = ", codeBlock
->symbolTable() ? codeBlock
->symbolTable()->captureCount() : 0,
3582 ", needsActivation = ", codeBlock
->needsActivation(),
3583 ", isStrictMode = ", codeBlock
->ownerExecutable()->isStrictMode(), "\n");
3584 codeBlock
->baselineVersion()->dumpBytecode();
3587 Vector
<unsigned, 32> jumpTargets
;
3588 computePreciseJumpTargets(codeBlock
, jumpTargets
);
3589 if (Options::dumpBytecodeAtDFGTime()) {
3590 dataLog("Jump targets: ");
3592 for (unsigned i
= 0; i
< jumpTargets
.size(); ++i
)
3593 dataLog(comma
, jumpTargets
[i
]);
3597 for (unsigned jumpTargetIndex
= 0; jumpTargetIndex
<= jumpTargets
.size(); ++jumpTargetIndex
) {
3598 // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions.
3599 unsigned limit
= jumpTargetIndex
< jumpTargets
.size() ? jumpTargets
[jumpTargetIndex
] : codeBlock
->instructions().size();
3600 ASSERT(m_currentIndex
< limit
);
3602 // Loop until we reach the current limit (i.e. next jump target).
3604 if (!m_currentBlock
) {
3605 // Check if we can use the last block.
3606 if (m_graph
.numBlocks() && m_graph
.lastBlock()->isEmpty()) {
3607 // This must be a block belonging to us.
3608 ASSERT(m_inlineStackTop
->m_unlinkedBlocks
.last().m_block
== m_graph
.lastBlock());
3609 // Either the block is linkable or it isn't. If it's linkable then it's the last
3610 // block in the blockLinkingTargets list. If it's not then the last block will
3611 // have a lower bytecode index that the one we're about to give to this block.
3612 if (m_inlineStackTop
->m_blockLinkingTargets
.isEmpty() || m_inlineStackTop
->m_blockLinkingTargets
.last()->bytecodeBegin
!= m_currentIndex
) {
3613 // Make the block linkable.
3614 ASSERT(m_inlineStackTop
->m_blockLinkingTargets
.isEmpty() || m_inlineStackTop
->m_blockLinkingTargets
.last()->bytecodeBegin
< m_currentIndex
);
3615 m_inlineStackTop
->m_blockLinkingTargets
.append(m_graph
.lastBlock());
3617 // Change its bytecode begin and continue.
3618 m_currentBlock
= m_graph
.lastBlock();
3619 m_currentBlock
->bytecodeBegin
= m_currentIndex
;
3621 RefPtr
<BasicBlock
> block
= adoptRef(new BasicBlock(m_currentIndex
, m_numArguments
, m_numLocals
, PNaN
));
3622 m_currentBlock
= block
.get();
3623 // This assertion checks two things:
3624 // 1) If the bytecodeBegin is greater than currentIndex, then something has gone
3625 // horribly wrong. So, we're probably generating incorrect code.
3626 // 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do
3627 // a peephole coalescing of this block in the if statement above. So, we're
3628 // generating suboptimal code and leaving more work for the CFG simplifier.
3629 ASSERT(m_inlineStackTop
->m_unlinkedBlocks
.isEmpty() || m_inlineStackTop
->m_unlinkedBlocks
.last().m_block
->bytecodeBegin
< m_currentIndex
);
3630 m_inlineStackTop
->m_unlinkedBlocks
.append(UnlinkedBlock(block
.get()));
3631 m_inlineStackTop
->m_blockLinkingTargets
.append(block
.get());
3632 // The first block is definitely an OSR target.
3633 if (!m_graph
.numBlocks())
3634 block
->isOSRTarget
= true;
3635 m_graph
.appendBlock(block
);
3636 prepareToParseBlock();
3640 bool shouldContinueParsing
= parseBlock(limit
);
3642 // We should not have gone beyond the limit.
3643 ASSERT(m_currentIndex
<= limit
);
3645 // We should have planted a terminal, or we just gave up because
3646 // we realized that the jump target information is imprecise, or we
3647 // are at the end of an inline function, or we realized that we
3648 // should stop parsing because there was a return in the first
3650 ASSERT(m_currentBlock
->isEmpty() || m_currentBlock
->last()->isTerminal() || (m_currentIndex
== codeBlock
->instructions().size() && inlineCallFrame()) || !shouldContinueParsing
);
3652 if (!shouldContinueParsing
)
3656 } while (m_currentIndex
< limit
);
3659 // Should have reached the end of the instructions.
3660 ASSERT(m_currentIndex
== codeBlock
->instructions().size());
3663 bool ByteCodeParser::parse()
3665 // Set during construction.
3666 ASSERT(!m_currentIndex
);
3668 if (Options::verboseDFGByteCodeParsing())
3669 dataLog("Parsing ", *m_codeBlock
, "\n");
3671 m_dfgCodeBlock
= m_graph
.m_plan
.profiledDFGCodeBlock
.get();
3672 if (isFTL(m_graph
.m_plan
.mode
) && m_dfgCodeBlock
3673 && Options::enablePolyvariantDevirtualization()) {
3674 if (Options::enablePolyvariantCallInlining())
3675 CallLinkStatus::computeDFGStatuses(m_dfgCodeBlock
, m_callContextMap
);
3676 if (Options::enablePolyvariantByIdInlining())
3677 m_dfgCodeBlock
->getStubInfoMap(m_dfgStubInfos
);
3680 if (m_codeBlock
->captureCount()) {
3681 SymbolTable
* symbolTable
= m_codeBlock
->symbolTable();
3682 ConcurrentJITLocker
locker(symbolTable
->m_lock
);
3683 SymbolTable::Map::iterator iter
= symbolTable
->begin(locker
);
3684 SymbolTable::Map::iterator end
= symbolTable
->end(locker
);
3685 for (; iter
!= end
; ++iter
) {
3686 VariableWatchpointSet
* set
= iter
->value
.watchpointSet();
3689 size_t index
= static_cast<size_t>(VirtualRegister(iter
->value
.getIndex()).toLocal());
3690 while (m_localWatchpoints
.size() <= index
)
3691 m_localWatchpoints
.append(nullptr);
3692 m_localWatchpoints
[index
] = set
;
3696 InlineStackEntry
inlineStackEntry(
3697 this, m_codeBlock
, m_profiledBlock
, 0, 0, VirtualRegister(), VirtualRegister(),
3698 m_codeBlock
->numParameters(), CodeForCall
);
3702 linkBlocks(inlineStackEntry
.m_unlinkedBlocks
, inlineStackEntry
.m_blockLinkingTargets
);
3703 m_graph
.determineReachability();
3704 m_graph
.killUnreachableBlocks();
3706 for (BlockIndex blockIndex
= m_graph
.numBlocks(); blockIndex
--;) {
3707 BasicBlock
* block
= m_graph
.block(blockIndex
);
3710 ASSERT(block
->variablesAtHead
.numberOfLocals() == m_graph
.block(0)->variablesAtHead
.numberOfLocals());
3711 ASSERT(block
->variablesAtHead
.numberOfArguments() == m_graph
.block(0)->variablesAtHead
.numberOfArguments());
3712 ASSERT(block
->variablesAtTail
.numberOfLocals() == m_graph
.block(0)->variablesAtHead
.numberOfLocals());
3713 ASSERT(block
->variablesAtTail
.numberOfArguments() == m_graph
.block(0)->variablesAtHead
.numberOfArguments());
3716 m_graph
.m_localVars
= m_numLocals
;
3717 m_graph
.m_parameterSlots
= m_parameterSlots
;
3722 bool parse(Graph
& graph
)
3724 SamplingRegion
samplingRegion("DFG Parsing");
3725 return ByteCodeParser(graph
).parse();
3728 } } // namespace JSC::DFG