2  * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. 
   4  * Redistribution and use in source and binary forms, with or without 
   5  * modification, are permitted provided that the following conditions 
   7  * 1. Redistributions of source code must retain the above copyright 
   8  *    notice, this list of conditions and the following disclaimer. 
   9  * 2. Redistributions in binary form must reproduce the above copyright 
  10  *    notice, this list of conditions and the following disclaimer in the 
  11  *    documentation and/or other materials provided with the distribution. 
  13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 
  14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR 
  17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 
  21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
  22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
  23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  
  27 #include "DFGByteCodeParser.h" 
  31 #include "ArrayConstructor.h" 
  32 #include "CallLinkStatus.h" 
  33 #include "CodeBlock.h" 
  34 #include "CodeBlockWithJITType.h" 
  35 #include "DFGArrayMode.h" 
  36 #include "DFGCapabilities.h" 
  37 #include "GetByIdStatus.h" 
  38 #include "Operations.h" 
  39 #include "PreciseJumpTargets.h" 
  40 #include "PutByIdStatus.h" 
  41 #include "ResolveGlobalStatus.h" 
  42 #include "StringConstructor.h" 
  43 #include <wtf/CommaPrinter.h> 
  44 #include <wtf/HashMap.h> 
  45 #include <wtf/MathExtras.h> 
  47 namespace JSC 
{ namespace DFG 
{ 
  49 class ConstantBufferKey 
{ 
  57     ConstantBufferKey(WTF::HashTableDeletedValueType
) 
  63     ConstantBufferKey(CodeBlock
* codeBlock
, unsigned index
) 
  64         : m_codeBlock(codeBlock
) 
  69     bool operator==(const ConstantBufferKey
& other
) const 
  71         return m_codeBlock 
== other
.m_codeBlock
 
  72             && m_index 
== other
.m_index
; 
  77         return WTF::PtrHash
<CodeBlock
*>::hash(m_codeBlock
) ^ m_index
; 
  80     bool isHashTableDeletedValue() const 
  82         return !m_codeBlock 
&& m_index
; 
  85     CodeBlock
* codeBlock() const { return m_codeBlock
; } 
  86     unsigned index() const { return m_index
; } 
  89     CodeBlock
* m_codeBlock
; 
  93 struct ConstantBufferKeyHash 
{ 
  94     static unsigned hash(const ConstantBufferKey
& key
) { return key
.hash(); } 
  95     static bool equal(const ConstantBufferKey
& a
, const ConstantBufferKey
& b
) 
 100     static const bool safeToCompareToEmptyOrDeleted 
= true; 
 103 } } // namespace JSC::DFG 
 107 template<typename T
> struct DefaultHash
; 
 108 template<> struct DefaultHash
<JSC::DFG::ConstantBufferKey
> { 
 109     typedef JSC::DFG::ConstantBufferKeyHash Hash
; 
 112 template<typename T
> struct HashTraits
; 
 113 template<> struct HashTraits
<JSC::DFG::ConstantBufferKey
> : SimpleClassHashTraits
<JSC::DFG::ConstantBufferKey
> { }; 
 117 namespace JSC 
{ namespace DFG 
{ 
 119 // === ByteCodeParser === 
 121 // This class is used to compile the dataflow graph from a CodeBlock. 
 122 class ByteCodeParser 
{ 
 124     ByteCodeParser(Graph
& graph
) 
 126         , m_codeBlock(graph
.m_codeBlock
) 
 127         , m_profiledBlock(graph
.m_profiledBlock
) 
 131         , m_currentProfilingIndex(0) 
 132         , m_constantUndefined(UINT_MAX
) 
 133         , m_constantNull(UINT_MAX
) 
 134         , m_constantNaN(UINT_MAX
) 
 135         , m_constant1(UINT_MAX
) 
 136         , m_constants(m_codeBlock
->numberOfConstantRegisters()) 
 137         , m_numArguments(m_codeBlock
->numParameters()) 
 138         , m_numLocals(m_codeBlock
->m_numCalleeRegisters
) 
 139         , m_preservedVars(m_codeBlock
->m_numVars
) 
 140         , m_parameterSlots(0) 
 141         , m_numPassedVarArgs(0) 
 142         , m_inlineStackTop(0) 
 143         , m_haveBuiltOperandMaps(false) 
 144         , m_emptyJSValueIndex(UINT_MAX
) 
 145         , m_currentInstruction(0) 
 147         ASSERT(m_profiledBlock
); 
 149         for (int i 
= 0; i 
< m_codeBlock
->m_numVars
; ++i
) 
 150             m_preservedVars
.set(i
); 
 153     // Parse a full CodeBlock of bytecode. 
 157     struct InlineStackEntry
; 
 159     // Just parse from m_currentIndex to the end of the current CodeBlock. 
 160     void parseCodeBlock(); 
 162     // Helper for min and max. 
 163     bool handleMinMax(bool usesResult
, int resultOperand
, NodeType op
, int registerOffset
, int argumentCountIncludingThis
); 
 165     // Handle calls. This resolves issues surrounding inlining and intrinsics. 
 166     void handleCall(Interpreter
*, Instruction
* currentInstruction
, NodeType op
, CodeSpecializationKind
); 
 167     void emitFunctionChecks(const CallLinkStatus
&, Node
* callTarget
, int registerOffset
, CodeSpecializationKind
); 
 168     void emitArgumentPhantoms(int registerOffset
, int argumentCountIncludingThis
, CodeSpecializationKind
); 
 169     // Handle inlining. Return true if it succeeded, false if we need to plant a call. 
 170     bool handleInlining(bool usesResult
, Node
* callTargetNode
, int resultOperand
, const CallLinkStatus
&, int registerOffset
, int argumentCountIncludingThis
, unsigned nextOffset
, CodeSpecializationKind
); 
 171     // Handle setting the result of an intrinsic. 
 172     void setIntrinsicResult(bool usesResult
, int resultOperand
, Node
*); 
 173     // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call. 
 174     bool handleIntrinsic(bool usesResult
, int resultOperand
, Intrinsic
, int registerOffset
, int argumentCountIncludingThis
, SpeculatedType prediction
); 
 175     bool handleConstantInternalFunction(bool usesResult
, int resultOperand
, InternalFunction
*, int registerOffset
, int argumentCountIncludingThis
, SpeculatedType prediction
, CodeSpecializationKind
); 
 176     Node
* handleGetByOffset(SpeculatedType
, Node
* base
, unsigned identifierNumber
, PropertyOffset
); 
 177     void handleGetByOffset( 
 178         int destinationOperand
, SpeculatedType
, Node
* base
, unsigned identifierNumber
, 
 181         int destinationOperand
, SpeculatedType
, Node
* base
, unsigned identifierNumber
, 
 182         const GetByIdStatus
&); 
 184     Node
* getScope(bool skipTop
, unsigned skipCount
); 
 186     // Convert a set of ResolveOperations into graph nodes 
 187     bool parseResolveOperations(SpeculatedType
, unsigned identifierNumber
, ResolveOperations
*, PutToBaseOperation
*, Node
** base
, Node
** value
); 
 189     // Prepare to parse a block. 
 190     void prepareToParseBlock(); 
 191     // Parse a single basic block of bytecode instructions. 
 192     bool parseBlock(unsigned limit
); 
 193     // Link block successors. 
 194     void linkBlock(BasicBlock
*, Vector
<BlockIndex
>& possibleTargets
); 
 195     void linkBlocks(Vector
<UnlinkedBlock
>& unlinkedBlocks
, Vector
<BlockIndex
>& possibleTargets
); 
 197     VariableAccessData
* newVariableAccessData(int operand
, bool isCaptured
) 
 199         ASSERT(operand 
< FirstConstantRegisterIndex
); 
 201         m_graph
.m_variableAccessData
.append(VariableAccessData(static_cast<VirtualRegister
>(operand
), isCaptured
)); 
 202         return &m_graph
.m_variableAccessData
.last(); 
 205     // Get/Set the operands/result of a bytecode instruction. 
 206     Node
* getDirect(int operand
) 
 208         // Is this a constant? 
 209         if (operand 
>= FirstConstantRegisterIndex
) { 
 210             unsigned constant 
= operand 
- FirstConstantRegisterIndex
; 
 211             ASSERT(constant 
< m_constants
.size()); 
 212             return getJSConstant(constant
); 
 215         ASSERT(operand 
!= JSStack::Callee
); 
 217         // Is this an argument? 
 218         if (operandIsArgument(operand
)) 
 219             return getArgument(operand
); 
 222         return getLocal((unsigned)operand
); 
 224     Node
* get(int operand
) 
 226         if (operand 
== JSStack::Callee
) { 
 227             if (inlineCallFrame() && inlineCallFrame()->callee
) 
 228                 return cellConstant(inlineCallFrame()->callee
.get()); 
 233         return getDirect(m_inlineStackTop
->remapOperand(operand
)); 
 235     enum SetMode 
{ NormalSet
, SetOnEntry 
}; 
 236     void setDirect(int operand
, Node
* value
, SetMode setMode 
= NormalSet
) 
 238         // Is this an argument? 
 239         if (operandIsArgument(operand
)) { 
 240             setArgument(operand
, value
, setMode
); 
 245         setLocal((unsigned)operand
, value
, setMode
); 
 247     void set(int operand
, Node
* value
, SetMode setMode 
= NormalSet
) 
 249         setDirect(m_inlineStackTop
->remapOperand(operand
), value
, setMode
); 
 252     void setPair(int operand1
, Node
* value1
, int operand2
, Node
* value2
) 
 254         // First emit dead SetLocals for the benefit of OSR. 
 255         set(operand1
, value1
); 
 256         set(operand2
, value2
); 
 258         // Now emit the real SetLocals. 
 259         set(operand1
, value1
); 
 260         set(operand2
, value2
); 
 263     Node
* injectLazyOperandSpeculation(Node
* node
) 
 265         ASSERT(node
->op() == GetLocal
); 
 266         ASSERT(node
->codeOrigin
.bytecodeIndex 
== m_currentIndex
); 
 267         SpeculatedType prediction 
=  
 268             m_inlineStackTop
->m_lazyOperands
.prediction( 
 269                 LazyOperandValueProfileKey(m_currentIndex
, node
->local())); 
 270 #if DFG_ENABLE(DEBUG_VERBOSE) 
 271         dataLog("Lazy operand [@", node
->index(), ", bc#", m_currentIndex
, ", r", node
->local(), "] prediction: ", SpeculationDump(prediction
), "\n"); 
 273         node
->variableAccessData()->predict(prediction
); 
 277     // Used in implementing get/set, above, where the operand is a local variable. 
 278     Node
* getLocal(unsigned operand
) 
 280         Node
* node 
= m_currentBlock
->variablesAtTail
.local(operand
); 
 281         bool isCaptured 
= m_codeBlock
->isCaptured(operand
, inlineCallFrame()); 
 283         // This has two goals: 1) link together variable access datas, and 2) 
 284         // try to avoid creating redundant GetLocals. (1) is required for 
 285         // correctness - no other phase will ensure that block-local variable 
 286         // access data unification is done correctly. (2) is purely opportunistic 
 287         // and is meant as an compile-time optimization only. 
 289         VariableAccessData
* variable
; 
 292             variable 
= node
->variableAccessData(); 
 293             variable
->mergeIsCaptured(isCaptured
); 
 296                 switch (node
->op()) { 
 300                     return node
->child1().node(); 
 306             m_preservedVars
.set(operand
); 
 307             variable 
= newVariableAccessData(operand
, isCaptured
); 
 310         node 
= injectLazyOperandSpeculation(addToGraph(GetLocal
, OpInfo(variable
))); 
 311         m_currentBlock
->variablesAtTail
.local(operand
) = node
; 
 314     void setLocal(unsigned operand
, Node
* value
, SetMode setMode 
= NormalSet
) 
 316         bool isCaptured 
= m_codeBlock
->isCaptured(operand
, inlineCallFrame()); 
 318         if (setMode 
== NormalSet
) { 
 319             ArgumentPosition
* argumentPosition 
= findArgumentPositionForLocal(operand
); 
 320             if (isCaptured 
|| argumentPosition
) 
 321                 flushDirect(operand
, argumentPosition
); 
 324         VariableAccessData
* variableAccessData 
= newVariableAccessData(operand
, isCaptured
); 
 325         variableAccessData
->mergeStructureCheckHoistingFailed( 
 326             m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)); 
 327         Node
* node 
= addToGraph(SetLocal
, OpInfo(variableAccessData
), value
); 
 328         m_currentBlock
->variablesAtTail
.local(operand
) = node
; 
 331     // Used in implementing get/set, above, where the operand is an argument. 
 332     Node
* getArgument(unsigned operand
) 
 334         unsigned argument 
= operandToArgument(operand
); 
 335         ASSERT(argument 
< m_numArguments
); 
 337         Node
* node 
= m_currentBlock
->variablesAtTail
.argument(argument
); 
 338         bool isCaptured 
= m_codeBlock
->isCaptured(operand
); 
 340         VariableAccessData
* variable
; 
 343             variable 
= node
->variableAccessData(); 
 344             variable
->mergeIsCaptured(isCaptured
); 
 346             switch (node
->op()) { 
 350                 return node
->child1().node(); 
 355             variable 
= newVariableAccessData(operand
, isCaptured
); 
 357         node 
= injectLazyOperandSpeculation(addToGraph(GetLocal
, OpInfo(variable
))); 
 358         m_currentBlock
->variablesAtTail
.argument(argument
) = node
; 
 361     void setArgument(int operand
, Node
* value
, SetMode setMode 
= NormalSet
) 
 363         unsigned argument 
= operandToArgument(operand
); 
 364         ASSERT(argument 
< m_numArguments
); 
 366         bool isCaptured 
= m_codeBlock
->isCaptured(operand
); 
 368         VariableAccessData
* variableAccessData 
= newVariableAccessData(operand
, isCaptured
); 
 370         // Always flush arguments, except for 'this'. If 'this' is created by us, 
 371         // then make sure that it's never unboxed. 
 373             if (setMode 
== NormalSet
) 
 374                 flushDirect(operand
); 
 375         } else if (m_codeBlock
->specializationKind() == CodeForConstruct
) 
 376             variableAccessData
->mergeShouldNeverUnbox(true); 
 378         variableAccessData
->mergeStructureCheckHoistingFailed( 
 379             m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)); 
 380         Node
* node 
= addToGraph(SetLocal
, OpInfo(variableAccessData
), value
); 
 381         m_currentBlock
->variablesAtTail
.argument(argument
) = node
; 
 384     ArgumentPosition
* findArgumentPositionForArgument(int argument
) 
 386         InlineStackEntry
* stack 
= m_inlineStackTop
; 
 387         while (stack
->m_inlineCallFrame
) 
 388             stack 
= stack
->m_caller
; 
 389         return stack
->m_argumentPositions
[argument
]; 
 392     ArgumentPosition
* findArgumentPositionForLocal(int operand
) 
 394         for (InlineStackEntry
* stack 
= m_inlineStackTop
; ; stack 
= stack
->m_caller
) { 
 395             InlineCallFrame
* inlineCallFrame 
= stack
->m_inlineCallFrame
; 
 396             if (!inlineCallFrame
) 
 398             if (operand 
>= static_cast<int>(inlineCallFrame
->stackOffset 
- JSStack::CallFrameHeaderSize
)) 
 400             if (operand 
== inlineCallFrame
->stackOffset 
+ CallFrame::thisArgumentOffset()) 
 402             if (operand 
< static_cast<int>(inlineCallFrame
->stackOffset 
- JSStack::CallFrameHeaderSize 
- inlineCallFrame
->arguments
.size())) 
 404             int argument 
= operandToArgument(operand 
- inlineCallFrame
->stackOffset
); 
 405             return stack
->m_argumentPositions
[argument
]; 
 410     ArgumentPosition
* findArgumentPosition(int operand
) 
 412         if (operandIsArgument(operand
)) 
 413             return findArgumentPositionForArgument(operandToArgument(operand
)); 
 414         return findArgumentPositionForLocal(operand
); 
 417     void flush(int operand
) 
 419         flushDirect(m_inlineStackTop
->remapOperand(operand
)); 
 422     void flushDirect(int operand
) 
 424         flushDirect(operand
, findArgumentPosition(operand
)); 
 427     void flushDirect(int operand
, ArgumentPosition
* argumentPosition
) 
 429         bool isCaptured 
= m_codeBlock
->isCaptured(operand
, inlineCallFrame()); 
 431         ASSERT(operand 
< FirstConstantRegisterIndex
); 
 433         if (!operandIsArgument(operand
)) 
 434             m_preservedVars
.set(operand
); 
 436         Node
* node 
= m_currentBlock
->variablesAtTail
.operand(operand
); 
 438         VariableAccessData
* variable
; 
 441             variable 
= node
->variableAccessData(); 
 442             variable
->mergeIsCaptured(isCaptured
); 
 444             variable 
= newVariableAccessData(operand
, isCaptured
); 
 446         node 
= addToGraph(Flush
, OpInfo(variable
)); 
 447         m_currentBlock
->variablesAtTail
.operand(operand
) = node
; 
 448         if (argumentPosition
) 
 449             argumentPosition
->addVariable(variable
); 
 452     void flush(InlineStackEntry
* inlineStackEntry
) 
 455         if (InlineCallFrame
* inlineCallFrame 
= inlineStackEntry
->m_inlineCallFrame
) 
 456             numArguments 
= inlineCallFrame
->arguments
.size(); 
 458             numArguments 
= inlineStackEntry
->m_codeBlock
->numParameters(); 
 459         for (unsigned argument 
= numArguments
; argument
-- > 1;) 
 460             flushDirect(inlineStackEntry
->remapOperand(argumentToOperand(argument
))); 
 461         for (int local 
= 0; local 
< inlineStackEntry
->m_codeBlock
->m_numVars
; ++local
) { 
 462             if (!inlineStackEntry
->m_codeBlock
->isCaptured(local
)) 
 464             flushDirect(inlineStackEntry
->remapOperand(local
)); 
 468     void flushAllArgumentsAndCapturedVariablesInInlineStack() 
 470         for (InlineStackEntry
* inlineStackEntry 
= m_inlineStackTop
; inlineStackEntry
; inlineStackEntry 
= inlineStackEntry
->m_caller
) 
 471             flush(inlineStackEntry
); 
 474     void flushArgumentsAndCapturedVariables() 
 476         flush(m_inlineStackTop
); 
 479     // Get an operand, and perform a ToInt32/ToNumber conversion on it. 
 480     Node
* getToInt32(int operand
) 
 482         return toInt32(get(operand
)); 
 485     // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32. 
 486     Node
* toInt32(Node
* node
) 
 488         if (node
->hasInt32Result()) 
 491         if (node
->op() == UInt32ToNumber
) 
 492             return node
->child1().node(); 
 494         // Check for numeric constants boxed as JSValues. 
 496             JSValue v 
= valueOfJSConstant(node
); 
 498                 return getJSConstant(node
->constantNumber()); 
 500                 return getJSConstantForValue(JSValue(JSC::toInt32(v
.asNumber()))); 
 503         return addToGraph(ValueToInt32
, node
); 
 506     // NOTE: Only use this to construct constants that arise from non-speculative 
 507     // constant folding. I.e. creating constants using this if we had constant 
 508     // field inference would be a bad idea, since the bytecode parser's folding 
 509     // doesn't handle liveness preservation. 
 510     Node
* getJSConstantForValue(JSValue constantValue
) 
 512         unsigned constantIndex 
= m_codeBlock
->addOrFindConstant(constantValue
); 
 513         if (constantIndex 
>= m_constants
.size()) 
 514             m_constants
.append(ConstantRecord()); 
 516         ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters()); 
 518         return getJSConstant(constantIndex
); 
 521     Node
* getJSConstant(unsigned constant
) 
 523         Node
* node 
= m_constants
[constant
].asJSValue
; 
 527         Node
* result 
= addToGraph(JSConstant
, OpInfo(constant
)); 
 528         m_constants
[constant
].asJSValue 
= result
; 
 534         return addToGraph(GetCallee
); 
 537     // Helper functions to get/set the this value. 
 540         return get(m_inlineStackTop
->m_codeBlock
->thisRegister()); 
 542     void setThis(Node
* value
) 
 544         set(m_inlineStackTop
->m_codeBlock
->thisRegister(), value
); 
 547     // Convenience methods for checking nodes for constants. 
 548     bool isJSConstant(Node
* node
) 
 550         return node
->op() == JSConstant
; 
 552     bool isInt32Constant(Node
* node
) 
 554         return isJSConstant(node
) && valueOfJSConstant(node
).isInt32(); 
 556     // Convenience methods for getting constant values. 
 557     JSValue 
valueOfJSConstant(Node
* node
) 
 559         ASSERT(isJSConstant(node
)); 
 560         return m_codeBlock
->getConstant(FirstConstantRegisterIndex 
+ node
->constantNumber()); 
 562     int32_t valueOfInt32Constant(Node
* node
) 
 564         ASSERT(isInt32Constant(node
)); 
 565         return valueOfJSConstant(node
).asInt32(); 
 568     // This method returns a JSConstant with the value 'undefined'. 
 569     Node
* constantUndefined() 
 571         // Has m_constantUndefined been set up yet? 
 572         if (m_constantUndefined 
== UINT_MAX
) { 
 573             // Search the constant pool for undefined, if we find it, we can just reuse this! 
 574             unsigned numberOfConstants 
= m_codeBlock
->numberOfConstantRegisters(); 
 575             for (m_constantUndefined 
= 0; m_constantUndefined 
< numberOfConstants
; ++m_constantUndefined
) { 
 576                 JSValue testMe 
= m_codeBlock
->getConstant(FirstConstantRegisterIndex 
+ m_constantUndefined
); 
 577                 if (testMe
.isUndefined()) 
 578                     return getJSConstant(m_constantUndefined
); 
 581             // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants. 
 582             ASSERT(m_constants
.size() == numberOfConstants
); 
 583             m_codeBlock
->addConstant(jsUndefined()); 
 584             m_constants
.append(ConstantRecord()); 
 585             ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters()); 
 588         // m_constantUndefined must refer to an entry in the CodeBlock's constant pool that has the value 'undefined'. 
 589         ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex 
+ m_constantUndefined
).isUndefined()); 
 590         return getJSConstant(m_constantUndefined
); 
 593     // This method returns a JSConstant with the value 'null'. 
 596         // Has m_constantNull been set up yet? 
 597         if (m_constantNull 
== UINT_MAX
) { 
 598             // Search the constant pool for null, if we find it, we can just reuse this! 
 599             unsigned numberOfConstants 
= m_codeBlock
->numberOfConstantRegisters(); 
 600             for (m_constantNull 
= 0; m_constantNull 
< numberOfConstants
; ++m_constantNull
) { 
 601                 JSValue testMe 
= m_codeBlock
->getConstant(FirstConstantRegisterIndex 
+ m_constantNull
); 
 603                     return getJSConstant(m_constantNull
); 
 606             // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants. 
 607             ASSERT(m_constants
.size() == numberOfConstants
); 
 608             m_codeBlock
->addConstant(jsNull()); 
 609             m_constants
.append(ConstantRecord()); 
 610             ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters()); 
 613         // m_constantNull must refer to an entry in the CodeBlock's constant pool that has the value 'null'. 
 614         ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex 
+ m_constantNull
).isNull()); 
 615         return getJSConstant(m_constantNull
); 
 618     // This method returns a DoubleConstant with the value 1. 
 621         // Has m_constant1 been set up yet? 
 622         if (m_constant1 
== UINT_MAX
) { 
 623             // Search the constant pool for the value 1, if we find it, we can just reuse this! 
 624             unsigned numberOfConstants 
= m_codeBlock
->numberOfConstantRegisters(); 
 625             for (m_constant1 
= 0; m_constant1 
< numberOfConstants
; ++m_constant1
) { 
 626                 JSValue testMe 
= m_codeBlock
->getConstant(FirstConstantRegisterIndex 
+ m_constant1
); 
 627                 if (testMe
.isInt32() && testMe
.asInt32() == 1) 
 628                     return getJSConstant(m_constant1
); 
 631             // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants. 
 632             ASSERT(m_constants
.size() == numberOfConstants
); 
 633             m_codeBlock
->addConstant(jsNumber(1)); 
 634             m_constants
.append(ConstantRecord()); 
 635             ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters()); 
 638         // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1. 
 639         ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex 
+ m_constant1
).isInt32()); 
 640         ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex 
+ m_constant1
).asInt32() == 1); 
 641         return getJSConstant(m_constant1
); 
 644     // This method returns a DoubleConstant with the value NaN. 
 647         JSValue nan 
= jsNaN(); 
 649         // Has m_constantNaN been set up yet? 
 650         if (m_constantNaN 
== UINT_MAX
) { 
 651             // Search the constant pool for the value NaN, if we find it, we can just reuse this! 
 652             unsigned numberOfConstants 
= m_codeBlock
->numberOfConstantRegisters(); 
 653             for (m_constantNaN 
= 0; m_constantNaN 
< numberOfConstants
; ++m_constantNaN
) { 
 654                 JSValue testMe 
= m_codeBlock
->getConstant(FirstConstantRegisterIndex 
+ m_constantNaN
); 
 655                 if (JSValue::encode(testMe
) == JSValue::encode(nan
)) 
 656                     return getJSConstant(m_constantNaN
); 
 659             // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants. 
 660             ASSERT(m_constants
.size() == numberOfConstants
); 
 661             m_codeBlock
->addConstant(nan
); 
 662             m_constants
.append(ConstantRecord()); 
 663             ASSERT(m_constants
.size() == m_codeBlock
->numberOfConstantRegisters()); 
 666         // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan. 
 667         ASSERT(m_codeBlock
->getConstant(FirstConstantRegisterIndex 
+ m_constantNaN
).isDouble()); 
 668         ASSERT(std::isnan(m_codeBlock
->getConstant(FirstConstantRegisterIndex 
+ m_constantNaN
).asDouble())); 
 669         return getJSConstant(m_constantNaN
); 
 672     Node
* cellConstant(JSCell
* cell
) 
 674         HashMap
<JSCell
*, Node
*>::AddResult result 
= m_cellConstantNodes
.add(cell
, 0); 
 675         if (result
.isNewEntry
) 
 676             result
.iterator
->value 
= addToGraph(WeakJSConstant
, OpInfo(cell
)); 
 678         return result
.iterator
->value
; 
 681     InlineCallFrame
* inlineCallFrame() 
 683         return m_inlineStackTop
->m_inlineCallFrame
; 
 686     CodeOrigin 
currentCodeOrigin() 
 688         return CodeOrigin(m_currentIndex
, inlineCallFrame(), m_currentProfilingIndex 
- m_currentIndex
); 
 691     bool canFold(Node
* node
) 
 693         return node
->isStronglyProvedConstantIn(inlineCallFrame()); 
 696     // Our codegen for constant strict equality performs a bitwise comparison, 
 697     // so we can only select values that have a consistent bitwise identity. 
 698     bool isConstantForCompareStrictEq(Node
* node
) 
 700         if (!node
->isConstant()) 
 702         JSValue value 
= valueOfJSConstant(node
); 
 703         return value
.isBoolean() || value
.isUndefinedOrNull(); 
 706     Node
* addToGraph(NodeType op
, Node
* child1 
= 0, Node
* child2 
= 0, Node
* child3 
= 0) 
 708         Node
* result 
= m_graph
.addNode( 
 709             SpecNone
, op
, currentCodeOrigin(), Edge(child1
), Edge(child2
), Edge(child3
)); 
 711         m_currentBlock
->append(result
); 
 714     Node
* addToGraph(NodeType op
, Edge child1
, Edge child2 
= Edge(), Edge child3 
= Edge()) 
 716         Node
* result 
= m_graph
.addNode( 
 717             SpecNone
, op
, currentCodeOrigin(), child1
, child2
, child3
); 
 719         m_currentBlock
->append(result
); 
 722     Node
* addToGraph(NodeType op
, OpInfo info
, Node
* child1 
= 0, Node
* child2 
= 0, Node
* child3 
= 0) 
 724         Node
* result 
= m_graph
.addNode( 
 725             SpecNone
, op
, currentCodeOrigin(), info
, Edge(child1
), Edge(child2
), Edge(child3
)); 
 727         m_currentBlock
->append(result
); 
 730     Node
* addToGraph(NodeType op
, OpInfo info1
, OpInfo info2
, Node
* child1 
= 0, Node
* child2 
= 0, Node
* child3 
= 0) 
 732         Node
* result 
= m_graph
.addNode( 
 733             SpecNone
, op
, currentCodeOrigin(), info1
, info2
, 
 734             Edge(child1
), Edge(child2
), Edge(child3
)); 
 736         m_currentBlock
->append(result
); 
 740     Node
* addToGraph(Node::VarArgTag
, NodeType op
, OpInfo info1
, OpInfo info2
) 
 742         Node
* result 
= m_graph
.addNode( 
 743             SpecNone
, Node::VarArg
, op
, currentCodeOrigin(), info1
, info2
, 
 744             m_graph
.m_varArgChildren
.size() - m_numPassedVarArgs
, m_numPassedVarArgs
); 
 746         m_currentBlock
->append(result
); 
 748         m_numPassedVarArgs 
= 0; 
 753     void addVarArgChild(Node
* child
) 
 755         m_graph
.m_varArgChildren
.append(Edge(child
)); 
 756         m_numPassedVarArgs
++; 
 759     Node
* addCall(Interpreter
* interpreter
, Instruction
* currentInstruction
, NodeType op
) 
 761         Instruction
* putInstruction 
= currentInstruction 
+ OPCODE_LENGTH(op_call
); 
 763         SpeculatedType prediction 
= SpecNone
; 
 764         if (interpreter
->getOpcodeID(putInstruction
->u
.opcode
) == op_call_put_result
) { 
 765             m_currentProfilingIndex 
= m_currentIndex 
+ OPCODE_LENGTH(op_call
); 
 766             prediction 
= getPrediction(); 
 769         addVarArgChild(get(currentInstruction
[1].u
.operand
)); 
 770         int argCount 
= currentInstruction
[2].u
.operand
; 
 771         if (JSStack::CallFrameHeaderSize 
+ (unsigned)argCount 
> m_parameterSlots
) 
 772             m_parameterSlots 
= JSStack::CallFrameHeaderSize 
+ argCount
; 
 774         int registerOffset 
= currentInstruction
[3].u
.operand
; 
 775         int dummyThisArgument 
= op 
== Call 
? 0 : 1; 
 776         for (int i 
= 0 + dummyThisArgument
; i 
< argCount
; ++i
) 
 777             addVarArgChild(get(registerOffset 
+ argumentToOperand(i
))); 
 779         Node
* call 
= addToGraph(Node::VarArg
, op
, OpInfo(0), OpInfo(prediction
)); 
 780         if (interpreter
->getOpcodeID(putInstruction
->u
.opcode
) == op_call_put_result
) 
 781             set(putInstruction
[1].u
.operand
, call
); 
 785     Node
* addStructureTransitionCheck(JSCell
* object
, Structure
* structure
) 
 787         // Add a weak JS constant for the object regardless, since the code should 
 788         // be jettisoned if the object ever dies. 
 789         Node
* objectNode 
= cellConstant(object
); 
 791         if (object
->structure() == structure 
&& structure
->transitionWatchpointSetIsStillValid()) { 
 792             addToGraph(StructureTransitionWatchpoint
, OpInfo(structure
), objectNode
); 
 796         addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(structure
)), objectNode
); 
 801     Node
* addStructureTransitionCheck(JSCell
* object
) 
 803         return addStructureTransitionCheck(object
, object
->structure()); 
 806     SpeculatedType 
getPredictionWithoutOSRExit(unsigned bytecodeIndex
) 
 808         return m_inlineStackTop
->m_profiledBlock
->valueProfilePredictionForBytecodeOffset(bytecodeIndex
); 
 811     SpeculatedType 
getPrediction(unsigned bytecodeIndex
) 
 813         SpeculatedType prediction 
= getPredictionWithoutOSRExit(bytecodeIndex
); 
 815         if (prediction 
== SpecNone
) { 
 816             // We have no information about what values this node generates. Give up 
 817             // on executing this code, since we're likely to do more damage than good. 
 818             addToGraph(ForceOSRExit
); 
 824     SpeculatedType 
getPredictionWithoutOSRExit() 
 826         return getPredictionWithoutOSRExit(m_currentProfilingIndex
); 
 829     SpeculatedType 
getPrediction() 
 831         return getPrediction(m_currentProfilingIndex
); 
 834     ArrayMode 
getArrayMode(ArrayProfile
* profile
, Array::Action action
) 
 836         profile
->computeUpdatedPrediction(m_inlineStackTop
->m_codeBlock
); 
 837         return ArrayMode::fromObserved(profile
, action
, false); 
 840     ArrayMode 
getArrayMode(ArrayProfile
* profile
) 
 842         return getArrayMode(profile
, Array::Read
); 
 845     ArrayMode 
getArrayModeAndEmitChecks(ArrayProfile
* profile
, Array::Action action
, Node
* base
) 
 847         profile
->computeUpdatedPrediction(m_inlineStackTop
->m_codeBlock
); 
 849 #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) 
 850         if (m_inlineStackTop
->m_profiledBlock
->numberOfRareCaseProfiles()) 
 851             dataLogF("Slow case profile for bc#%u: %u\n", m_currentIndex
, m_inlineStackTop
->m_profiledBlock
->rareCaseProfileForBytecodeOffset(m_currentIndex
)->m_counter
); 
 852         dataLogF("Array profile for bc#%u: %p%s%s, %u\n", m_currentIndex
, profile
->expectedStructure(), profile
->structureIsPolymorphic() ? " (polymorphic)" : "", profile
->mayInterceptIndexedAccesses() ? " (may intercept)" : "", profile
->observedArrayModes()); 
 856             m_inlineStackTop
->m_profiledBlock
->likelyToTakeSlowCase(m_currentIndex
) 
 857             || profile
->outOfBounds(); 
 859         ArrayMode result 
= ArrayMode::fromObserved(profile
, action
, makeSafe
); 
 861         if (profile
->hasDefiniteStructure() 
 862             && result
.benefitsFromStructureCheck() 
 863             && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)) 
 864             addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(profile
->expectedStructure())), base
); 
 869     Node
* makeSafe(Node
* node
) 
 871         bool likelyToTakeSlowCase
; 
 872         if (!isX86() && node
->op() == ArithMod
) 
 873             likelyToTakeSlowCase 
= false; 
 875             likelyToTakeSlowCase 
= m_inlineStackTop
->m_profiledBlock
->likelyToTakeSlowCase(m_currentIndex
); 
 877         if (!likelyToTakeSlowCase
 
 878             && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
) 
 879             && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
)) 
 882         switch (node
->op()) { 
 888         case ArithMod
: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double. 
 889             node
->mergeFlags(NodeMayOverflow
); 
 893             if (m_inlineStackTop
->m_profiledBlock
->likelyToTakeDeepestSlowCase(m_currentIndex
) 
 894                 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
)) { 
 895 #if DFG_ENABLE(DEBUG_VERBOSE) 
 896                 dataLogF("Making ArithMul @%u take deepest slow case.\n", node
->index()); 
 898                 node
->mergeFlags(NodeMayOverflow 
| NodeMayNegZero
); 
 899             } else if (m_inlineStackTop
->m_profiledBlock
->likelyToTakeSlowCase(m_currentIndex
) 
 900                        || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
)) { 
 901 #if DFG_ENABLE(DEBUG_VERBOSE) 
 902                 dataLogF("Making ArithMul @%u take faster slow case.\n", node
->index()); 
 904                 node
->mergeFlags(NodeMayNegZero
); 
 909             RELEASE_ASSERT_NOT_REACHED(); 
 916     Node
* makeDivSafe(Node
* node
) 
 918         ASSERT(node
->op() == ArithDiv
); 
 920         // The main slow case counter for op_div in the old JIT counts only when 
 921         // the operands are not numbers. We don't care about that since we already 
 922         // have speculations in place that take care of that separately. We only 
 923         // care about when the outcome of the division is not an integer, which 
 924         // is what the special fast case counter tells us. 
 926         if (!m_inlineStackTop
->m_profiledBlock
->couldTakeSpecialFastCase(m_currentIndex
) 
 927             && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
) 
 928             && !m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
)) 
 931 #if DFG_ENABLE(DEBUG_VERBOSE) 
 932         dataLogF("Making %s @%u safe at bc#%u because special fast-case counter is at %u and exit profiles say %d, %d\n", Graph::opName(node
->op()), node
->index(), m_currentIndex
, m_inlineStackTop
->m_profiledBlock
->specialFastCaseProfileForBytecodeOffset(m_currentIndex
)->m_counter
, m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
), m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, NegativeZero
)); 
 935         // FIXME: It might be possible to make this more granular. The DFG certainly can 
 936         // distinguish between negative zero and overflow in its exit profiles. 
 937         node
->mergeFlags(NodeMayOverflow 
| NodeMayNegZero
); 
 942     bool structureChainIsStillValid(bool direct
, Structure
* previousStructure
, StructureChain
* chain
) 
 947         if (!previousStructure
->storedPrototype().isNull() && previousStructure
->storedPrototype().asCell()->structure() != chain
->head()->get()) 
 950         for (WriteBarrier
<Structure
>* it 
= chain
->head(); *it
; ++it
) { 
 951             if (!(*it
)->storedPrototype().isNull() && (*it
)->storedPrototype().asCell()->structure() != it
[1].get()) 
 958     void buildOperandMapsIfNecessary(); 
 961     CodeBlock
* m_codeBlock
; 
 962     CodeBlock
* m_profiledBlock
; 
 965     // The current block being generated. 
 966     BasicBlock
* m_currentBlock
; 
 967     // The bytecode index of the current instruction being generated. 
 968     unsigned m_currentIndex
; 
 969     // The bytecode index of the value profile of the current instruction being generated. 
 970     unsigned m_currentProfilingIndex
; 
 972     // We use these values during code generation, and to avoid the need for 
 973     // special handling we make sure they are available as constants in the 
 974     // CodeBlock's constant pool. These variables are initialized to 
 975     // UINT_MAX, and lazily updated to hold an index into the CodeBlock's 
 976     // constant pool, as necessary. 
 977     unsigned m_constantUndefined
; 
 978     unsigned m_constantNull
; 
 979     unsigned m_constantNaN
; 
 980     unsigned m_constant1
; 
 981     HashMap
<JSCell
*, unsigned> m_cellConstants
; 
 982     HashMap
<JSCell
*, Node
*> m_cellConstantNodes
; 
 984     // A constant in the constant pool may be represented by more than one 
 985     // node in the graph, depending on the context in which it is being used. 
 986     struct ConstantRecord 
{ 
 999     // Track the index of the node whose result is the current value for every 
1000     // register value in the bytecode - argument, local, and temporary. 
1001     Vector
<ConstantRecord
, 16> m_constants
; 
1003     // The number of arguments passed to the function. 
1004     unsigned m_numArguments
; 
1005     // The number of locals (vars + temporaries) used in the function. 
1006     unsigned m_numLocals
; 
1007     // The set of registers we need to preserve across BasicBlock boundaries; 
1008     // typically equal to the set of vars, but we expand this to cover all 
1009     // temporaries that persist across blocks (dues to ?:, &&, ||, etc). 
1010     BitVector m_preservedVars
; 
1011     // The number of slots (in units of sizeof(Register)) that we need to 
1012     // preallocate for calls emanating from this frame. This includes the 
1013     // size of the CallFrame, only if this is not a leaf function.  (I.e. 
1014     // this is 0 if and only if this function is a leaf.) 
1015     unsigned m_parameterSlots
; 
1016     // The number of var args passed to the next var arg node. 
1017     unsigned m_numPassedVarArgs
; 
1019     HashMap
<ConstantBufferKey
, unsigned> m_constantBufferCache
; 
1021     struct InlineStackEntry 
{ 
1022         ByteCodeParser
* m_byteCodeParser
; 
1024         CodeBlock
* m_codeBlock
; 
1025         CodeBlock
* m_profiledBlock
; 
1026         InlineCallFrame
* m_inlineCallFrame
; 
1028         ScriptExecutable
* executable() { return m_codeBlock
->ownerExecutable(); } 
1030         QueryableExitProfile m_exitProfile
; 
1032         // Remapping of identifier and constant numbers from the code block being 
1033         // inlined (inline callee) to the code block that we're inlining into 
1034         // (the machine code block, which is the transitive, though not necessarily 
1036         Vector
<unsigned> m_identifierRemap
; 
1037         Vector
<unsigned> m_constantRemap
; 
1038         Vector
<unsigned> m_constantBufferRemap
; 
1040         // Blocks introduced by this code block, which need successor linking. 
1041         // May include up to one basic block that includes the continuation after 
1042         // the callsite in the caller. These must be appended in the order that they 
1043         // are created, but their bytecodeBegin values need not be in order as they 
1045         Vector
<UnlinkedBlock
> m_unlinkedBlocks
; 
1047         // Potential block linking targets. Must be sorted by bytecodeBegin, and 
1048         // cannot have two blocks that have the same bytecodeBegin. For this very 
1049         // reason, this is not equivalent to  
1050         Vector
<BlockIndex
> m_blockLinkingTargets
; 
1052         // If the callsite's basic block was split into two, then this will be 
1053         // the head of the callsite block. It needs its successors linked to the 
1054         // m_unlinkedBlocks, but not the other way around: there's no way for 
1055         // any blocks in m_unlinkedBlocks to jump back into this block. 
1056         BlockIndex m_callsiteBlockHead
; 
1058         // Does the callsite block head need linking? This is typically true 
1059         // but will be false for the machine code block's inline stack entry 
1060         // (since that one is not inlined) and for cases where an inline callee 
1061         // did the linking for us. 
1062         bool m_callsiteBlockHeadNeedsLinking
; 
1064         VirtualRegister m_returnValue
; 
1066         // Speculations about variable types collected from the profiled code block, 
1067         // which are based on OSR exit profiles that past DFG compilatins of this 
1068         // code block had gathered. 
1069         LazyOperandValueProfileParser m_lazyOperands
; 
1071         // Did we see any returns? We need to handle the (uncommon but necessary) 
1072         // case where a procedure that does not return was inlined. 
1075         // Did we have any early returns? 
1076         bool m_didEarlyReturn
; 
1078         // Pointers to the argument position trackers for this slice of code. 
1079         Vector
<ArgumentPosition
*> m_argumentPositions
; 
1081         InlineStackEntry
* m_caller
; 
1086             CodeBlock
* profiledBlock
, 
1087             BlockIndex callsiteBlockHead
, 
1088             JSFunction
* callee
, // Null if this is a closure call. 
1089             VirtualRegister returnValueVR
, 
1090             VirtualRegister inlineCallFrameStart
, 
1091             int argumentCountIncludingThis
, 
1092             CodeSpecializationKind
); 
1096             m_byteCodeParser
->m_inlineStackTop 
= m_caller
; 
1099         int remapOperand(int operand
) const 
1101             if (!m_inlineCallFrame
) 
1104             if (operand 
>= FirstConstantRegisterIndex
) { 
1105                 int result 
= m_constantRemap
[operand 
- FirstConstantRegisterIndex
]; 
1106                 ASSERT(result 
>= FirstConstantRegisterIndex
); 
1110             ASSERT(operand 
!= JSStack::Callee
); 
1112             return operand 
+ m_inlineCallFrame
->stackOffset
; 
1116     InlineStackEntry
* m_inlineStackTop
; 
1118     // Have we built operand maps? We initialize them lazily, and only when doing 
1120     bool m_haveBuiltOperandMaps
; 
1121     // Mapping between identifier names and numbers. 
1122     IdentifierMap m_identifierMap
; 
1123     // Mapping between values and constant numbers. 
1124     JSValueMap m_jsValueMap
; 
1125     // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible 
1126     // work-around for the fact that JSValueMap can't handle "empty" values. 
1127     unsigned m_emptyJSValueIndex
; 
1129     Instruction
* m_currentInstruction
; 
1132 #define NEXT_OPCODE(name) \ 
1133     m_currentIndex += OPCODE_LENGTH(name); \ 
1136 #define LAST_OPCODE(name) \ 
1137     m_currentIndex += OPCODE_LENGTH(name); \ 
1138     return shouldContinueParsing 
1141 void ByteCodeParser::handleCall(Interpreter
* interpreter
, Instruction
* currentInstruction
, NodeType op
, CodeSpecializationKind kind
) 
1143     ASSERT(OPCODE_LENGTH(op_call
) == OPCODE_LENGTH(op_construct
)); 
1145     Node
* callTarget 
= get(currentInstruction
[1].u
.operand
); 
1147     CallLinkStatus callLinkStatus
; 
1149     if (m_graph
.isConstant(callTarget
)) 
1150         callLinkStatus 
= CallLinkStatus(m_graph
.valueOfJSConstant(callTarget
)).setIsProved(true); 
1152         callLinkStatus 
= CallLinkStatus::computeFor(m_inlineStackTop
->m_profiledBlock
, m_currentIndex
); 
1153         callLinkStatus
.setHasBadFunctionExitSite(m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadFunction
)); 
1154         callLinkStatus
.setHasBadCacheExitSite(m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)); 
1155         callLinkStatus
.setHasBadExecutableExitSite(m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadExecutable
)); 
1158 #if DFG_ENABLE(DEBUG_VERBOSE) 
1159     dataLog("For call at bc#", m_currentIndex
, ": ", callLinkStatus
, "\n"); 
1162     if (!callLinkStatus
.canOptimize()) { 
1163         // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically 
1164         // that we cannot optimize them. 
1166         addCall(interpreter
, currentInstruction
, op
); 
1170     int argumentCountIncludingThis 
= currentInstruction
[2].u
.operand
; 
1171     int registerOffset 
= currentInstruction
[3].u
.operand
; 
1173     // Do we have a result? 
1174     bool usesResult 
= false; 
1175     int resultOperand 
= 0; // make compiler happy 
1176     unsigned nextOffset 
= m_currentIndex 
+ OPCODE_LENGTH(op_call
); 
1177     Instruction
* putInstruction 
= currentInstruction 
+ OPCODE_LENGTH(op_call
); 
1178     SpeculatedType prediction 
= SpecNone
; 
1179     if (interpreter
->getOpcodeID(putInstruction
->u
.opcode
) == op_call_put_result
) { 
1180         resultOperand 
= putInstruction
[1].u
.operand
; 
1182         m_currentProfilingIndex 
= nextOffset
; 
1183         prediction 
= getPrediction(); 
1184         nextOffset 
+= OPCODE_LENGTH(op_call_put_result
); 
1187     if (InternalFunction
* function 
= callLinkStatus
.internalFunction()) { 
1188         if (handleConstantInternalFunction(usesResult
, resultOperand
, function
, registerOffset
, argumentCountIncludingThis
, prediction
, kind
)) { 
1189             // This phantoming has to be *after* the code for the intrinsic, to signify that 
1190             // the inputs must be kept alive whatever exits the intrinsic may do. 
1191             addToGraph(Phantom
, callTarget
); 
1192             emitArgumentPhantoms(registerOffset
, argumentCountIncludingThis
, kind
); 
1196         // Can only handle this using the generic call handler. 
1197         addCall(interpreter
, currentInstruction
, op
); 
1201     Intrinsic intrinsic 
= callLinkStatus
.intrinsicFor(kind
); 
1202     if (intrinsic 
!= NoIntrinsic
) { 
1203         emitFunctionChecks(callLinkStatus
, callTarget
, registerOffset
, kind
); 
1205         if (handleIntrinsic(usesResult
, resultOperand
, intrinsic
, registerOffset
, argumentCountIncludingThis
, prediction
)) { 
1206             // This phantoming has to be *after* the code for the intrinsic, to signify that 
1207             // the inputs must be kept alive whatever exits the intrinsic may do. 
1208             addToGraph(Phantom
, callTarget
); 
1209             emitArgumentPhantoms(registerOffset
, argumentCountIncludingThis
, kind
); 
1210             if (m_graph
.m_compilation
) 
1211                 m_graph
.m_compilation
->noticeInlinedCall(); 
1214     } else if (handleInlining(usesResult
, callTarget
, resultOperand
, callLinkStatus
, registerOffset
, argumentCountIncludingThis
, nextOffset
, kind
)) { 
1215         if (m_graph
.m_compilation
) 
1216             m_graph
.m_compilation
->noticeInlinedCall(); 
1220     addCall(interpreter
, currentInstruction
, op
); 
1223 void ByteCodeParser::emitFunctionChecks(const CallLinkStatus
& callLinkStatus
, Node
* callTarget
, int registerOffset
, CodeSpecializationKind kind
) 
1226     if (kind 
== CodeForCall
) 
1227         thisArgument 
= get(registerOffset 
+ argumentToOperand(0)); 
1231     if (callLinkStatus
.isProved()) { 
1232         addToGraph(Phantom
, callTarget
, thisArgument
); 
1236     ASSERT(callLinkStatus
.canOptimize()); 
1238     if (JSFunction
* function 
= callLinkStatus
.function()) 
1239         addToGraph(CheckFunction
, OpInfo(function
), callTarget
, thisArgument
); 
1241         ASSERT(callLinkStatus
.structure()); 
1242         ASSERT(callLinkStatus
.executable()); 
1244         addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(callLinkStatus
.structure())), callTarget
); 
1245         addToGraph(CheckExecutable
, OpInfo(callLinkStatus
.executable()), callTarget
, thisArgument
); 
1249 void ByteCodeParser::emitArgumentPhantoms(int registerOffset
, int argumentCountIncludingThis
, CodeSpecializationKind kind
) 
1251     for (int i 
= kind 
== CodeForCall 
? 0 : 1; i 
< argumentCountIncludingThis
; ++i
) 
1252         addToGraph(Phantom
, get(registerOffset 
+ argumentToOperand(i
))); 
1255 bool ByteCodeParser::handleInlining(bool usesResult
, Node
* callTargetNode
, int resultOperand
, const CallLinkStatus
& callLinkStatus
, int registerOffset
, int argumentCountIncludingThis
, unsigned nextOffset
, CodeSpecializationKind kind
) 
1257     // First, the really simple checks: do we have an actual JS function? 
1258     if (!callLinkStatus
.executable()) 
1260     if (callLinkStatus
.executable()->isHostFunction()) 
1263     FunctionExecutable
* executable 
= jsCast
<FunctionExecutable
*>(callLinkStatus
.executable()); 
1265     // Does the number of arguments we're passing match the arity of the target? We currently 
1266     // inline only if the number of arguments passed is greater than or equal to the number 
1267     // arguments expected. 
1268     if (static_cast<int>(executable
->parameterCount()) + 1 > argumentCountIncludingThis
) 
1271     // Have we exceeded inline stack depth, or are we trying to inline a recursive call? 
1272     // If either of these are detected, then don't inline. 
1274     for (InlineStackEntry
* entry 
= m_inlineStackTop
; entry
; entry 
= entry
->m_caller
) { 
1276         if (depth 
>= Options::maximumInliningDepth()) 
1277             return false; // Depth exceeded. 
1279         if (entry
->executable() == executable
) 
1280             return false; // Recursion detected. 
1283     // Do we have a code block, and does the code block's size match the heuristics/requirements for 
1284     // being an inline candidate? We might not have a code block if code was thrown away or if we 
1285     // simply hadn't actually made this call yet. We could still theoretically attempt to inline it 
1286     // if we had a static proof of what was being called; this might happen for example if you call a 
1287     // global function, where watchpointing gives us static information. Overall, it's a rare case 
1288     // because we expect that any hot callees would have already been compiled. 
1289     CodeBlock
* codeBlock 
= executable
->baselineCodeBlockFor(kind
); 
1292     if (!canInlineFunctionFor(codeBlock
, kind
, callLinkStatus
.isClosureCall())) 
1295 #if DFG_ENABLE(DEBUG_VERBOSE) 
1296     dataLogF("Inlining executable %p.\n", executable
); 
1299     // Now we know without a doubt that we are committed to inlining. So begin the process 
1300     // by checking the callee (if necessary) and making sure that arguments and the callee 
1302     emitFunctionChecks(callLinkStatus
, callTargetNode
, registerOffset
, kind
); 
1304     // FIXME: Don't flush constants! 
1306     int inlineCallFrameStart 
= m_inlineStackTop
->remapOperand(registerOffset
) - JSStack::CallFrameHeaderSize
; 
1308     // Make sure that the area used by the call frame is reserved. 
1309     for (int arg 
= inlineCallFrameStart 
+ JSStack::CallFrameHeaderSize 
+ codeBlock
->m_numVars
; arg
-- > inlineCallFrameStart
;) 
1310         m_preservedVars
.set(arg
); 
1312     // Make sure that we have enough locals. 
1313     unsigned newNumLocals 
= inlineCallFrameStart 
+ JSStack::CallFrameHeaderSize 
+ codeBlock
->m_numCalleeRegisters
; 
1314     if (newNumLocals 
> m_numLocals
) { 
1315         m_numLocals 
= newNumLocals
; 
1316         for (size_t i 
= 0; i 
< m_graph
.m_blocks
.size(); ++i
) 
1317             m_graph
.m_blocks
[i
]->ensureLocals(newNumLocals
); 
1320     size_t argumentPositionStart 
= m_graph
.m_argumentPositions
.size(); 
1322     InlineStackEntry 
inlineStackEntry( 
1323         this, codeBlock
, codeBlock
, m_graph
.m_blocks
.size() - 1, 
1324         callLinkStatus
.function(), (VirtualRegister
)m_inlineStackTop
->remapOperand( 
1325             usesResult 
? resultOperand 
: InvalidVirtualRegister
), 
1326         (VirtualRegister
)inlineCallFrameStart
, argumentCountIncludingThis
, kind
); 
1328     // This is where the actual inlining really happens. 
1329     unsigned oldIndex 
= m_currentIndex
; 
1330     unsigned oldProfilingIndex 
= m_currentProfilingIndex
; 
1332     m_currentProfilingIndex 
= 0; 
1334     addToGraph(InlineStart
, OpInfo(argumentPositionStart
)); 
1335     if (callLinkStatus
.isClosureCall()) { 
1336         addToGraph(SetCallee
, callTargetNode
); 
1337         addToGraph(SetMyScope
, addToGraph(GetScope
, callTargetNode
)); 
1342     m_currentIndex 
= oldIndex
; 
1343     m_currentProfilingIndex 
= oldProfilingIndex
; 
1345     // If the inlined code created some new basic blocks, then we have linking to do. 
1346     if (inlineStackEntry
.m_callsiteBlockHead 
!= m_graph
.m_blocks
.size() - 1) { 
1348         ASSERT(!inlineStackEntry
.m_unlinkedBlocks
.isEmpty()); 
1349         if (inlineStackEntry
.m_callsiteBlockHeadNeedsLinking
) 
1350             linkBlock(m_graph
.m_blocks
[inlineStackEntry
.m_callsiteBlockHead
].get(), inlineStackEntry
.m_blockLinkingTargets
); 
1352             ASSERT(m_graph
.m_blocks
[inlineStackEntry
.m_callsiteBlockHead
]->isLinked
); 
1354         // It's possible that the callsite block head is not owned by the caller. 
1355         if (!inlineStackEntry
.m_caller
->m_unlinkedBlocks
.isEmpty()) { 
1356             // It's definitely owned by the caller, because the caller created new blocks. 
1357             // Assert that this all adds up. 
1358             ASSERT(inlineStackEntry
.m_caller
->m_unlinkedBlocks
.last().m_blockIndex 
== inlineStackEntry
.m_callsiteBlockHead
); 
1359             ASSERT(inlineStackEntry
.m_caller
->m_unlinkedBlocks
.last().m_needsNormalLinking
); 
1360             inlineStackEntry
.m_caller
->m_unlinkedBlocks
.last().m_needsNormalLinking 
= false; 
1362             // It's definitely not owned by the caller. Tell the caller that he does not 
1363             // need to link his callsite block head, because we did it for him. 
1364             ASSERT(inlineStackEntry
.m_caller
->m_callsiteBlockHeadNeedsLinking
); 
1365             ASSERT(inlineStackEntry
.m_caller
->m_callsiteBlockHead 
== inlineStackEntry
.m_callsiteBlockHead
); 
1366             inlineStackEntry
.m_caller
->m_callsiteBlockHeadNeedsLinking 
= false; 
1369         linkBlocks(inlineStackEntry
.m_unlinkedBlocks
, inlineStackEntry
.m_blockLinkingTargets
); 
1371         ASSERT(inlineStackEntry
.m_unlinkedBlocks
.isEmpty()); 
1373     BasicBlock
* lastBlock 
= m_graph
.m_blocks
.last().get(); 
1374     // If there was a return, but no early returns, then we're done. We allow parsing of 
1375     // the caller to continue in whatever basic block we're in right now. 
1376     if (!inlineStackEntry
.m_didEarlyReturn 
&& inlineStackEntry
.m_didReturn
) { 
1377         ASSERT(lastBlock
->isEmpty() || !lastBlock
->last()->isTerminal()); 
1379         // If we created new blocks then the last block needs linking, but in the 
1380         // caller. It doesn't need to be linked to, but it needs outgoing links. 
1381         if (!inlineStackEntry
.m_unlinkedBlocks
.isEmpty()) { 
1382 #if DFG_ENABLE(DEBUG_VERBOSE) 
1383             dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (inline return case).\n", lastBlock
, lastBlock
->bytecodeBegin
, m_currentIndex
); 
1385             // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter 
1386             // for release builds because this block will never serve as a potential target 
1387             // in the linker's binary search. 
1388             lastBlock
->bytecodeBegin 
= m_currentIndex
; 
1389             m_inlineStackTop
->m_caller
->m_unlinkedBlocks
.append(UnlinkedBlock(m_graph
.m_blocks
.size() - 1)); 
1392         m_currentBlock 
= m_graph
.m_blocks
.last().get(); 
1394 #if DFG_ENABLE(DEBUG_VERBOSE) 
1395         dataLogF("Done inlining executable %p, continuing code generation at epilogue.\n", executable
); 
1400     // If we get to this point then all blocks must end in some sort of terminals. 
1401     ASSERT(lastBlock
->last()->isTerminal()); 
1403     // Link the early returns to the basic block we're about to create. 
1404     for (size_t i 
= 0; i 
< inlineStackEntry
.m_unlinkedBlocks
.size(); ++i
) { 
1405         if (!inlineStackEntry
.m_unlinkedBlocks
[i
].m_needsEarlyReturnLinking
) 
1407         BasicBlock
* block 
= m_graph
.m_blocks
[inlineStackEntry
.m_unlinkedBlocks
[i
].m_blockIndex
].get(); 
1408         ASSERT(!block
->isLinked
); 
1409         Node
* node 
= block
->last(); 
1410         ASSERT(node
->op() == Jump
); 
1411         ASSERT(node
->takenBlockIndex() == NoBlock
); 
1412         node
->setTakenBlockIndex(m_graph
.m_blocks
.size()); 
1413         inlineStackEntry
.m_unlinkedBlocks
[i
].m_needsEarlyReturnLinking 
= false; 
1414 #if !ASSERT_DISABLED 
1415         block
->isLinked 
= true; 
1419     // Need to create a new basic block for the continuation at the caller. 
1420     OwnPtr
<BasicBlock
> block 
= adoptPtr(new BasicBlock(nextOffset
, m_numArguments
, m_numLocals
)); 
1421 #if DFG_ENABLE(DEBUG_VERBOSE) 
1422     dataLogF("Creating inline epilogue basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block
.get(), m_graph
.m_blocks
.size(), m_inlineStackTop
->executable(), m_currentIndex
, CodeOrigin::inlineDepthForCallFrame(inlineCallFrame())); 
1424     m_currentBlock 
= block
.get(); 
1425     ASSERT(m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.isEmpty() || m_graph
.m_blocks
[m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.last()]->bytecodeBegin 
< nextOffset
); 
1426     m_inlineStackTop
->m_caller
->m_unlinkedBlocks
.append(UnlinkedBlock(m_graph
.m_blocks
.size())); 
1427     m_inlineStackTop
->m_caller
->m_blockLinkingTargets
.append(m_graph
.m_blocks
.size()); 
1428     m_graph
.m_blocks
.append(block
.release()); 
1429     prepareToParseBlock(); 
1431     // At this point we return and continue to generate code for the caller, but 
1432     // in the new basic block. 
1433 #if DFG_ENABLE(DEBUG_VERBOSE) 
1434     dataLogF("Done inlining executable %p, continuing code generation in new block.\n", executable
); 
1439 void ByteCodeParser::setIntrinsicResult(bool usesResult
, int resultOperand
, Node
* node
) 
1443     set(resultOperand
, node
); 
1446 bool ByteCodeParser::handleMinMax(bool usesResult
, int resultOperand
, NodeType op
, int registerOffset
, int argumentCountIncludingThis
) 
1448     if (argumentCountIncludingThis 
== 1) { // Math.min() 
1449         setIntrinsicResult(usesResult
, resultOperand
, constantNaN()); 
1453     if (argumentCountIncludingThis 
== 2) { // Math.min(x) 
1454         Node
* result 
= get(registerOffset 
+ argumentToOperand(1)); 
1455         addToGraph(Phantom
, Edge(result
, NumberUse
)); 
1456         setIntrinsicResult(usesResult
, resultOperand
, result
); 
1460     if (argumentCountIncludingThis 
== 3) { // Math.min(x, y) 
1461         setIntrinsicResult(usesResult
, resultOperand
, addToGraph(op
, get(registerOffset 
+ argumentToOperand(1)), get(registerOffset 
+ argumentToOperand(2)))); 
1465     // Don't handle >=3 arguments for now. 
1469 // FIXME: We dead-code-eliminate unused Math intrinsics, but that's invalid because 
1470 // they need to perform the ToNumber conversion, which can have side-effects. 
1471 bool ByteCodeParser::handleIntrinsic(bool usesResult
, int resultOperand
, Intrinsic intrinsic
, int registerOffset
, int argumentCountIncludingThis
, SpeculatedType prediction
) 
1473     switch (intrinsic
) { 
1474     case AbsIntrinsic
: { 
1475         if (argumentCountIncludingThis 
== 1) { // Math.abs() 
1476             setIntrinsicResult(usesResult
, resultOperand
, constantNaN()); 
1480         if (!MacroAssembler::supportsFloatingPointAbs()) 
1483         Node
* node 
= addToGraph(ArithAbs
, get(registerOffset 
+ argumentToOperand(1))); 
1484         if (m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, Overflow
)) 
1485             node
->mergeFlags(NodeMayOverflow
); 
1486         setIntrinsicResult(usesResult
, resultOperand
, node
); 
1491         return handleMinMax(usesResult
, resultOperand
, ArithMin
, registerOffset
, argumentCountIncludingThis
); 
1494         return handleMinMax(usesResult
, resultOperand
, ArithMax
, registerOffset
, argumentCountIncludingThis
); 
1496     case SqrtIntrinsic
: { 
1497         if (argumentCountIncludingThis 
== 1) { // Math.sqrt() 
1498             setIntrinsicResult(usesResult
, resultOperand
, constantNaN()); 
1502         if (!MacroAssembler::supportsFloatingPointSqrt()) 
1505         setIntrinsicResult(usesResult
, resultOperand
, addToGraph(ArithSqrt
, get(registerOffset 
+ argumentToOperand(1)))); 
1509     case ArrayPushIntrinsic
: { 
1510         if (argumentCountIncludingThis 
!= 2) 
1513         ArrayMode arrayMode 
= getArrayMode(m_currentInstruction
[5].u
.arrayProfile
); 
1514         if (!arrayMode
.isJSArray()) 
1516         switch (arrayMode
.type()) { 
1517         case Array::Undecided
: 
1520         case Array::Contiguous
: 
1521         case Array::ArrayStorage
: { 
1522             Node
* arrayPush 
= addToGraph(ArrayPush
, OpInfo(arrayMode
.asWord()), OpInfo(prediction
), get(registerOffset 
+ argumentToOperand(0)), get(registerOffset 
+ argumentToOperand(1))); 
1524                 set(resultOperand
, arrayPush
); 
1534     case ArrayPopIntrinsic
: { 
1535         if (argumentCountIncludingThis 
!= 1) 
1538         ArrayMode arrayMode 
= getArrayMode(m_currentInstruction
[5].u
.arrayProfile
); 
1539         if (!arrayMode
.isJSArray()) 
1541         switch (arrayMode
.type()) { 
1544         case Array::Contiguous
: 
1545         case Array::ArrayStorage
: { 
1546             Node
* arrayPop 
= addToGraph(ArrayPop
, OpInfo(arrayMode
.asWord()), OpInfo(prediction
), get(registerOffset 
+ argumentToOperand(0))); 
1548                 set(resultOperand
, arrayPop
); 
1557     case CharCodeAtIntrinsic
: { 
1558         if (argumentCountIncludingThis 
!= 2) 
1561         int thisOperand 
= registerOffset 
+ argumentToOperand(0); 
1562         int indexOperand 
= registerOffset 
+ argumentToOperand(1); 
1563         Node
* charCode 
= addToGraph(StringCharCodeAt
, OpInfo(ArrayMode(Array::String
).asWord()), get(thisOperand
), getToInt32(indexOperand
)); 
1566             set(resultOperand
, charCode
); 
1570     case CharAtIntrinsic
: { 
1571         if (argumentCountIncludingThis 
!= 2) 
1574         int thisOperand 
= registerOffset 
+ argumentToOperand(0); 
1575         int indexOperand 
= registerOffset 
+ argumentToOperand(1); 
1576         Node
* charCode 
= addToGraph(StringCharAt
, OpInfo(ArrayMode(Array::String
).asWord()), get(thisOperand
), getToInt32(indexOperand
)); 
1579             set(resultOperand
, charCode
); 
1582     case FromCharCodeIntrinsic
: { 
1583         if (argumentCountIncludingThis 
!= 2) 
1586         int indexOperand 
= registerOffset 
+ argumentToOperand(1); 
1587         Node
* charCode 
= addToGraph(StringFromCharCode
, getToInt32(indexOperand
)); 
1590             set(resultOperand
, charCode
); 
1595     case RegExpExecIntrinsic
: { 
1596         if (argumentCountIncludingThis 
!= 2) 
1599         Node
* regExpExec 
= addToGraph(RegExpExec
, OpInfo(0), OpInfo(prediction
), get(registerOffset 
+ argumentToOperand(0)), get(registerOffset 
+ argumentToOperand(1))); 
1601             set(resultOperand
, regExpExec
); 
1606     case RegExpTestIntrinsic
: { 
1607         if (argumentCountIncludingThis 
!= 2) 
1610         Node
* regExpExec 
= addToGraph(RegExpTest
, OpInfo(0), OpInfo(prediction
), get(registerOffset 
+ argumentToOperand(0)), get(registerOffset 
+ argumentToOperand(1))); 
1612             set(resultOperand
, regExpExec
); 
1617     case IMulIntrinsic
: { 
1618         if (argumentCountIncludingThis 
!= 3) 
1620         int leftOperand 
= registerOffset 
+ argumentToOperand(1); 
1621         int rightOperand 
= registerOffset 
+ argumentToOperand(2); 
1622         Node
* left 
= getToInt32(leftOperand
); 
1623         Node
* right 
= getToInt32(rightOperand
); 
1624         setIntrinsicResult(usesResult
, resultOperand
, addToGraph(ArithIMul
, left
, right
)); 
1633 bool ByteCodeParser::handleConstantInternalFunction( 
1634     bool usesResult
, int resultOperand
, InternalFunction
* function
, int registerOffset
, 
1635     int argumentCountIncludingThis
, SpeculatedType prediction
, CodeSpecializationKind kind
) 
1637     // If we ever find that we have a lot of internal functions that we specialize for, 
1638     // then we should probably have some sort of hashtable dispatch, or maybe even 
1639     // dispatch straight through the MethodTable of the InternalFunction. But for now, 
1640     // it seems that this case is hit infrequently enough, and the number of functions 
1641     // we know about is small enough, that having just a linear cascade of if statements 
1644     UNUSED_PARAM(prediction
); // Remove this once we do more things. 
1646     if (function
->classInfo() == &ArrayConstructor::s_info
) { 
1647         if (argumentCountIncludingThis 
== 2) { 
1649                 usesResult
, resultOperand
, 
1650                 addToGraph(NewArrayWithSize
, OpInfo(ArrayWithUndecided
), get(registerOffset 
+ argumentToOperand(1)))); 
1654         for (int i 
= 1; i 
< argumentCountIncludingThis
; ++i
) 
1655             addVarArgChild(get(registerOffset 
+ argumentToOperand(i
))); 
1657             usesResult
, resultOperand
, 
1658             addToGraph(Node::VarArg
, NewArray
, OpInfo(ArrayWithUndecided
), OpInfo(0))); 
1660     } else if (function
->classInfo() == &StringConstructor::s_info
) { 
1663         if (argumentCountIncludingThis 
<= 1) 
1664             result 
= cellConstant(m_vm
->smallStrings
.emptyString()); 
1666             result 
= addToGraph(ToString
, get(registerOffset 
+ argumentToOperand(1))); 
1668         if (kind 
== CodeForConstruct
) 
1669             result 
= addToGraph(NewStringObject
, OpInfo(function
->globalObject()->stringObjectStructure()), result
); 
1671         setIntrinsicResult(usesResult
, resultOperand
, result
); 
1678 Node
* ByteCodeParser::handleGetByOffset(SpeculatedType prediction
, Node
* base
, unsigned identifierNumber
, PropertyOffset offset
) 
1680     Node
* propertyStorage
; 
1681     if (isInlineOffset(offset
)) 
1682         propertyStorage 
= base
; 
1684         propertyStorage 
= addToGraph(GetButterfly
, base
); 
1685     // FIXME: It would be far more efficient for load elimination (and safer from 
1686     // an OSR standpoint) if GetByOffset also referenced the object we were loading 
1687     // from, and if we could load eliminate a GetByOffset even if the butterfly 
1688     // had changed. That would be a great success. 
1689     Node
* getByOffset 
= addToGraph(GetByOffset
, OpInfo(m_graph
.m_storageAccessData
.size()), OpInfo(prediction
), propertyStorage
); 
1691     StorageAccessData storageAccessData
; 
1692     storageAccessData
.offset 
= indexRelativeToBase(offset
); 
1693     storageAccessData
.identifierNumber 
= identifierNumber
; 
1694     m_graph
.m_storageAccessData
.append(storageAccessData
); 
1699 void ByteCodeParser::handleGetByOffset( 
1700     int destinationOperand
, SpeculatedType prediction
, Node
* base
, unsigned identifierNumber
, 
1701     PropertyOffset offset
) 
1703     set(destinationOperand
, handleGetByOffset(prediction
, base
, identifierNumber
, offset
)); 
1706 void ByteCodeParser::handleGetById( 
1707     int destinationOperand
, SpeculatedType prediction
, Node
* base
, unsigned identifierNumber
, 
1708     const GetByIdStatus
& getByIdStatus
) 
1710     if (!getByIdStatus
.isSimple() 
1711         || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
) 
1712         || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadWeakConstantCache
)) { 
1713         set(destinationOperand
, 
1715                 getByIdStatus
.makesCalls() ? GetByIdFlush 
: GetById
, 
1716                 OpInfo(identifierNumber
), OpInfo(prediction
), base
)); 
1720     ASSERT(getByIdStatus
.structureSet().size()); 
1722     // The implementation of GetByOffset does not know to terminate speculative 
1723     // execution if it doesn't have a prediction, so we do it manually. 
1724     if (prediction 
== SpecNone
) 
1725         addToGraph(ForceOSRExit
); 
1726     else if (m_graph
.m_compilation
) 
1727         m_graph
.m_compilation
->noticeInlinedGetById(); 
1729     Node
* originalBaseForBaselineJIT 
= base
; 
1731     addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(getByIdStatus
.structureSet())), base
); 
1733     if (!getByIdStatus
.chain().isEmpty()) { 
1734         Structure
* currentStructure 
= getByIdStatus
.structureSet().singletonStructure(); 
1735         JSObject
* currentObject 
= 0; 
1736         for (unsigned i 
= 0; i 
< getByIdStatus
.chain().size(); ++i
) { 
1737             currentObject 
= asObject(currentStructure
->prototypeForLookup(m_inlineStackTop
->m_codeBlock
)); 
1738             currentStructure 
= getByIdStatus
.chain()[i
]; 
1739             base 
= addStructureTransitionCheck(currentObject
, currentStructure
); 
1743     // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to 
1744     // ensure that the base of the original get_by_id is kept alive until we're done with 
1745     // all of the speculations. We only insert the Phantom if there had been a CheckStructure 
1746     // on something other than the base following the CheckStructure on base, or if the 
1747     // access was compiled to a WeakJSConstant specific value, in which case we might not 
1748     // have any explicit use of the base at all. 
1749     if (getByIdStatus
.specificValue() || originalBaseForBaselineJIT 
!= base
) 
1750         addToGraph(Phantom
, originalBaseForBaselineJIT
); 
1752     if (getByIdStatus
.specificValue()) { 
1753         ASSERT(getByIdStatus
.specificValue().isCell()); 
1755         set(destinationOperand
, cellConstant(getByIdStatus
.specificValue().asCell())); 
1760         destinationOperand
, prediction
, base
, identifierNumber
, getByIdStatus
.offset()); 
1763 void ByteCodeParser::prepareToParseBlock() 
1765     for (unsigned i 
= 0; i 
< m_constants
.size(); ++i
) 
1766         m_constants
[i
] = ConstantRecord(); 
1767     m_cellConstantNodes
.clear(); 
1770 Node
* ByteCodeParser::getScope(bool skipTop
, unsigned skipCount
) 
1773     if (inlineCallFrame() && !inlineCallFrame()->isClosureCall()) { 
1774         ASSERT(inlineCallFrame()->callee
); 
1775         localBase 
= cellConstant(inlineCallFrame()->callee
->scope()); 
1777         localBase 
= addToGraph(GetMyScope
); 
1779         ASSERT(!inlineCallFrame()); 
1780         localBase 
= addToGraph(SkipTopScope
, localBase
); 
1782     for (unsigned n 
= skipCount
; n
--;) 
1783         localBase 
= addToGraph(SkipScope
, localBase
); 
1787 bool ByteCodeParser::parseResolveOperations(SpeculatedType prediction
, unsigned identifier
, ResolveOperations
* resolveOperations
, PutToBaseOperation
* putToBaseOperation
, Node
** base
, Node
** value
) 
1789     if (resolveOperations
->isEmpty()) { 
1790         addToGraph(ForceOSRExit
); 
1793     JSGlobalObject
* globalObject 
= m_inlineStackTop
->m_codeBlock
->globalObject(); 
1795     bool skipTop 
= false; 
1796     bool skippedScopes 
= false; 
1797     bool setBase 
= false; 
1798     ResolveOperation
* pc 
= resolveOperations
->data(); 
1799     Node
* localBase 
= 0; 
1800     bool resolvingBase 
= true; 
1801     while (resolvingBase
) { 
1802         switch (pc
->m_operation
) { 
1803         case ResolveOperation::ReturnGlobalObjectAsBase
: 
1804             *base 
= cellConstant(globalObject
); 
1808         case ResolveOperation::SetBaseToGlobal
: 
1809             *base 
= cellConstant(globalObject
); 
1811             resolvingBase 
= false; 
1815         case ResolveOperation::SetBaseToUndefined
: 
1816             *base 
= constantUndefined(); 
1818             resolvingBase 
= false; 
1822         case ResolveOperation::SetBaseToScope
: 
1823             localBase 
= getScope(skipTop
, skipCount
); 
1827             resolvingBase 
= false; 
1829             // Reset the scope skipping as we've already loaded it 
1830             skippedScopes 
= false; 
1833         case ResolveOperation::ReturnScopeAsBase
: 
1834             *base 
= getScope(skipTop
, skipCount
); 
1838         case ResolveOperation::SkipTopScopeNode
: 
1839             ASSERT(!inlineCallFrame()); 
1841             skippedScopes 
= true; 
1845         case ResolveOperation::SkipScopes
: 
1846             skipCount 
+= pc
->m_scopesToSkip
; 
1847             skippedScopes 
= true; 
1851         case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope
: 
1854         case ResolveOperation::Fail
: 
1858             resolvingBase 
= false; 
1862         localBase 
= getScope(skipTop
, skipCount
); 
1864     if (base 
&& !setBase
) 
1868     ResolveOperation
* resolveValueOperation 
= pc
; 
1869     switch (resolveValueOperation
->m_operation
) { 
1870     case ResolveOperation::GetAndReturnGlobalProperty
: { 
1871         ResolveGlobalStatus status 
= ResolveGlobalStatus::computeFor(m_inlineStackTop
->m_profiledBlock
, m_currentIndex
, resolveValueOperation
, m_codeBlock
->identifier(identifier
)); 
1872         if (status
.isSimple()) { 
1873             ASSERT(status
.structure()); 
1875             Node
* globalObjectNode 
= addStructureTransitionCheck(globalObject
, status
.structure()); 
1877             if (status
.specificValue()) { 
1878                 ASSERT(status
.specificValue().isCell()); 
1879                 *value 
= cellConstant(status
.specificValue().asCell()); 
1881                 *value 
= handleGetByOffset(prediction
, globalObjectNode
, identifier
, status
.offset()); 
1885         Node
* resolve 
= addToGraph(ResolveGlobal
, OpInfo(m_graph
.m_resolveGlobalData
.size()), OpInfo(prediction
)); 
1886         m_graph
.m_resolveGlobalData
.append(ResolveGlobalData()); 
1887         ResolveGlobalData
& data 
= m_graph
.m_resolveGlobalData
.last(); 
1888         data
.identifierNumber 
= identifier
; 
1889         data
.resolveOperations 
= resolveOperations
; 
1890         data
.putToBaseOperation 
= putToBaseOperation
; 
1891         data
.resolvePropertyIndex 
= resolveValueOperation 
- resolveOperations
->data(); 
1895     case ResolveOperation::GetAndReturnGlobalVar
: { 
1896         *value 
= addToGraph( 
1898             OpInfo(globalObject
->assertRegisterIsInThisObject(pc
->m_registerAddress
)), 
1899             OpInfo(prediction
)); 
1902     case ResolveOperation::GetAndReturnGlobalVarWatchable
: { 
1903         SpeculatedType prediction 
= getPrediction(); 
1905         JSGlobalObject
* globalObject 
= m_inlineStackTop
->m_codeBlock
->globalObject(); 
1907         Identifier ident 
= m_codeBlock
->identifier(identifier
); 
1908         SymbolTableEntry entry 
= globalObject
->symbolTable()->get(ident
.impl()); 
1909         if (!entry
.couldBeWatched()) { 
1910             *value 
= addToGraph(GetGlobalVar
, OpInfo(globalObject
->assertRegisterIsInThisObject(pc
->m_registerAddress
)), OpInfo(prediction
)); 
1914         // The watchpoint is still intact! This means that we will get notified if the 
1915         // current value in the global variable changes. So, we can inline that value. 
1916         // Moreover, currently we can assume that this value is a JSFunction*, which 
1917         // implies that it's a cell. This simplifies things, since in general we'd have 
1918         // to use a JSConstant for non-cells and a WeakJSConstant for cells. So instead 
1919         // of having both cases we just assert that the value is a cell. 
1921         // NB. If it wasn't for CSE, GlobalVarWatchpoint would have no need for the 
1922         // register pointer. But CSE tracks effects on global variables by comparing 
1923         // register pointers. Because CSE executes multiple times while the backend 
1924         // executes once, we use the following performance trade-off: 
1925         // - The node refers directly to the register pointer to make CSE super cheap. 
1926         // - To perform backend code generation, the node only contains the identifier 
1927         //   number, from which it is possible to get (via a few average-time O(1) 
1928         //   lookups) to the WatchpointSet. 
1930         addToGraph(GlobalVarWatchpoint
, OpInfo(globalObject
->assertRegisterIsInThisObject(pc
->m_registerAddress
)), OpInfo(identifier
)); 
1932         JSValue specificValue 
= globalObject
->registerAt(entry
.getIndex()).get(); 
1933         ASSERT(specificValue
.isCell()); 
1934         *value 
= cellConstant(specificValue
.asCell()); 
1937     case ResolveOperation::GetAndReturnScopedVar
: { 
1938         Node
* getScopeRegisters 
= addToGraph(GetScopeRegisters
, localBase
); 
1939         *value 
= addToGraph(GetScopedVar
, OpInfo(resolveValueOperation
->m_offset
), OpInfo(prediction
), getScopeRegisters
); 
1949 bool ByteCodeParser::parseBlock(unsigned limit
) 
1951     bool shouldContinueParsing 
= true; 
1953     Interpreter
* interpreter 
= m_vm
->interpreter
; 
1954     Instruction
* instructionsBegin 
= m_inlineStackTop
->m_codeBlock
->instructions().begin(); 
1955     unsigned blockBegin 
= m_currentIndex
; 
1957     // If we are the first basic block, introduce markers for arguments. This allows 
1958     // us to track if a use of an argument may use the actual argument passed, as 
1959     // opposed to using a value we set explicitly. 
1960     if (m_currentBlock 
== m_graph
.m_blocks
[0].get() && !inlineCallFrame()) { 
1961         m_graph
.m_arguments
.resize(m_numArguments
); 
1962         for (unsigned argument 
= 0; argument 
< m_numArguments
; ++argument
) { 
1963             VariableAccessData
* variable 
= newVariableAccessData( 
1964                 argumentToOperand(argument
), m_codeBlock
->isCaptured(argumentToOperand(argument
))); 
1965             variable
->mergeStructureCheckHoistingFailed( 
1966                 m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
)); 
1968             Node
* setArgument 
= addToGraph(SetArgument
, OpInfo(variable
)); 
1969             m_graph
.m_arguments
[argument
] = setArgument
; 
1970             m_currentBlock
->variablesAtTail
.setArgumentFirstTime(argument
, setArgument
); 
1975         m_currentProfilingIndex 
= m_currentIndex
; 
1977         // Don't extend over jump destinations. 
1978         if (m_currentIndex 
== limit
) { 
1979             // Ordinarily we want to plant a jump. But refuse to do this if the block is 
1980             // empty. This is a special case for inlining, which might otherwise create 
1981             // some empty blocks in some cases. When parseBlock() returns with an empty 
1982             // block, it will get repurposed instead of creating a new one. Note that this 
1983             // logic relies on every bytecode resulting in one or more nodes, which would 
1984             // be true anyway except for op_loop_hint, which emits a Phantom to force this 
1986             if (!m_currentBlock
->isEmpty()) 
1987                 addToGraph(Jump
, OpInfo(m_currentIndex
)); 
1989 #if DFG_ENABLE(DEBUG_VERBOSE) 
1990                 dataLogF("Refusing to plant jump at limit %u because block %p is empty.\n", limit
, m_currentBlock
); 
1993             return shouldContinueParsing
; 
1996         // Switch on the current bytecode opcode. 
1997         Instruction
* currentInstruction 
= instructionsBegin 
+ m_currentIndex
; 
1998         m_currentInstruction 
= currentInstruction
; // Some methods want to use this, and we'd rather not thread it through calls. 
1999         OpcodeID opcodeID 
= interpreter
->getOpcodeID(currentInstruction
->u
.opcode
); 
2001         if (m_graph
.m_compilation 
&& opcodeID 
!= op_call_put_result
) { 
2002             addToGraph(CountExecution
, OpInfo(m_graph
.m_compilation
->executionCounterFor( 
2003                 Profiler::OriginStack(*m_vm
->m_perBytecodeProfiler
, m_codeBlock
, currentCodeOrigin())))); 
2008         // === Function entry opcodes === 
2011             // Initialize all locals to undefined. 
2012             for (int i 
= 0; i 
< m_inlineStackTop
->m_codeBlock
->m_numVars
; ++i
) 
2013                 set(i
, constantUndefined(), SetOnEntry
); 
2014             NEXT_OPCODE(op_enter
); 
2016         case op_convert_this
: { 
2017             Node
* op1 
= getThis(); 
2018             if (op1
->op() != ConvertThis
) { 
2019                 ValueProfile
* profile 
= 
2020                     m_inlineStackTop
->m_profiledBlock
->valueProfileForBytecodeOffset(m_currentProfilingIndex
); 
2021                 profile
->computeUpdatedPrediction(); 
2022 #if DFG_ENABLE(DEBUG_VERBOSE) 
2023                 dataLogF("[bc#%u]: profile %p: ", m_currentProfilingIndex
, profile
); 
2024                 profile
->dump(WTF::dataFile()); 
2027                 if (profile
->m_singletonValueIsTop
 
2028                     || !profile
->m_singletonValue
 
2029                     || !profile
->m_singletonValue
.isCell() 
2030                     || profile
->m_singletonValue
.asCell()->classInfo() != &Structure::s_info
) 
2031                     setThis(addToGraph(ConvertThis
, op1
)); 
2035                         OpInfo(m_graph
.addStructureSet(jsCast
<Structure
*>(profile
->m_singletonValue
.asCell()))), 
2039             NEXT_OPCODE(op_convert_this
); 
2042         case op_create_this
: { 
2043             int calleeOperand 
= currentInstruction
[2].u
.operand
; 
2044             Node
* callee 
= get(calleeOperand
); 
2045             bool alreadyEmitted 
= false; 
2046             if (callee
->op() == WeakJSConstant
) { 
2047                 JSCell
* cell 
= callee
->weakConstant(); 
2048                 ASSERT(cell
->inherits(&JSFunction::s_info
)); 
2050                 JSFunction
* function 
= jsCast
<JSFunction
*>(cell
); 
2051                 ObjectAllocationProfile
* allocationProfile 
= function
->tryGetAllocationProfile(); 
2052                 if (allocationProfile
) { 
2053                     addToGraph(AllocationProfileWatchpoint
, OpInfo(function
)); 
2054                     // The callee is still live up to this point. 
2055                     addToGraph(Phantom
, callee
); 
2056                     set(currentInstruction
[1].u
.operand
, 
2057                         addToGraph(NewObject
, OpInfo(allocationProfile
->structure()))); 
2058                     alreadyEmitted 
= true; 
2061             if (!alreadyEmitted
) 
2062                 set(currentInstruction
[1].u
.operand
, 
2063                     addToGraph(CreateThis
, OpInfo(currentInstruction
[3].u
.operand
), callee
)); 
2064             NEXT_OPCODE(op_create_this
); 
2067         case op_new_object
: { 
2068             set(currentInstruction
[1].u
.operand
, 
2069                 addToGraph(NewObject
, 
2070                     OpInfo(currentInstruction
[3].u
.objectAllocationProfile
->structure()))); 
2071             NEXT_OPCODE(op_new_object
); 
2074         case op_new_array
: { 
2075             int startOperand 
= currentInstruction
[2].u
.operand
; 
2076             int numOperands 
= currentInstruction
[3].u
.operand
; 
2077             ArrayAllocationProfile
* profile 
= currentInstruction
[4].u
.arrayAllocationProfile
; 
2078             for (int operandIdx 
= startOperand
; operandIdx 
< startOperand 
+ numOperands
; ++operandIdx
) 
2079                 addVarArgChild(get(operandIdx
)); 
2080             set(currentInstruction
[1].u
.operand
, addToGraph(Node::VarArg
, NewArray
, OpInfo(profile
->selectIndexingType()), OpInfo(0))); 
2081             NEXT_OPCODE(op_new_array
); 
2084         case op_new_array_with_size
: { 
2085             int lengthOperand 
= currentInstruction
[2].u
.operand
; 
2086             ArrayAllocationProfile
* profile 
= currentInstruction
[3].u
.arrayAllocationProfile
; 
2087             set(currentInstruction
[1].u
.operand
, addToGraph(NewArrayWithSize
, OpInfo(profile
->selectIndexingType()), get(lengthOperand
))); 
2088             NEXT_OPCODE(op_new_array_with_size
); 
2091         case op_new_array_buffer
: { 
2092             int startConstant 
= currentInstruction
[2].u
.operand
; 
2093             int numConstants 
= currentInstruction
[3].u
.operand
; 
2094             ArrayAllocationProfile
* profile 
= currentInstruction
[4].u
.arrayAllocationProfile
; 
2095             NewArrayBufferData data
; 
2096             data
.startConstant 
= m_inlineStackTop
->m_constantBufferRemap
[startConstant
]; 
2097             data
.numConstants 
= numConstants
; 
2098             data
.indexingType 
= profile
->selectIndexingType(); 
2100             // If this statement has never executed, we'll have the wrong indexing type in the profile. 
2101             for (int i 
= 0; i 
< numConstants
; ++i
) { 
2103                     leastUpperBoundOfIndexingTypeAndValue( 
2105                         m_codeBlock
->constantBuffer(data
.startConstant
)[i
]); 
2108             m_graph
.m_newArrayBufferData
.append(data
); 
2109             set(currentInstruction
[1].u
.operand
, addToGraph(NewArrayBuffer
, OpInfo(&m_graph
.m_newArrayBufferData
.last()))); 
2110             NEXT_OPCODE(op_new_array_buffer
); 
2113         case op_new_regexp
: { 
2114             set(currentInstruction
[1].u
.operand
, addToGraph(NewRegexp
, OpInfo(currentInstruction
[2].u
.operand
))); 
2115             NEXT_OPCODE(op_new_regexp
); 
2118         case op_get_callee
: { 
2119             ValueProfile
* profile 
= currentInstruction
[2].u
.profile
; 
2120             profile
->computeUpdatedPrediction(); 
2121             if (profile
->m_singletonValueIsTop
 
2122                 || !profile
->m_singletonValue
 
2123                 || !profile
->m_singletonValue
.isCell()) 
2124                 set(currentInstruction
[1].u
.operand
, get(JSStack::Callee
)); 
2126                 ASSERT(profile
->m_singletonValue
.asCell()->inherits(&JSFunction::s_info
)); 
2127                 Node
* actualCallee 
= get(JSStack::Callee
); 
2128                 addToGraph(CheckFunction
, OpInfo(profile
->m_singletonValue
.asCell()), actualCallee
); 
2129                 set(currentInstruction
[1].u
.operand
, addToGraph(WeakJSConstant
, OpInfo(profile
->m_singletonValue
.asCell()))); 
2131             NEXT_OPCODE(op_get_callee
); 
2134         // === Bitwise operations === 
2137             Node
* op1 
= getToInt32(currentInstruction
[2].u
.operand
); 
2138             Node
* op2 
= getToInt32(currentInstruction
[3].u
.operand
); 
2139             set(currentInstruction
[1].u
.operand
, addToGraph(BitAnd
, op1
, op2
)); 
2140             NEXT_OPCODE(op_bitand
); 
2144             Node
* op1 
= getToInt32(currentInstruction
[2].u
.operand
); 
2145             Node
* op2 
= getToInt32(currentInstruction
[3].u
.operand
); 
2146             set(currentInstruction
[1].u
.operand
, addToGraph(BitOr
, op1
, op2
)); 
2147             NEXT_OPCODE(op_bitor
); 
2151             Node
* op1 
= getToInt32(currentInstruction
[2].u
.operand
); 
2152             Node
* op2 
= getToInt32(currentInstruction
[3].u
.operand
); 
2153             set(currentInstruction
[1].u
.operand
, addToGraph(BitXor
, op1
, op2
)); 
2154             NEXT_OPCODE(op_bitxor
); 
2158             Node
* op1 
= getToInt32(currentInstruction
[2].u
.operand
); 
2159             Node
* op2 
= getToInt32(currentInstruction
[3].u
.operand
); 
2161             // Optimize out shifts by zero. 
2162             if (isInt32Constant(op2
) && !(valueOfInt32Constant(op2
) & 0x1f)) 
2165                 result 
= addToGraph(BitRShift
, op1
, op2
); 
2166             set(currentInstruction
[1].u
.operand
, result
); 
2167             NEXT_OPCODE(op_rshift
); 
2171             Node
* op1 
= getToInt32(currentInstruction
[2].u
.operand
); 
2172             Node
* op2 
= getToInt32(currentInstruction
[3].u
.operand
); 
2174             // Optimize out shifts by zero. 
2175             if (isInt32Constant(op2
) && !(valueOfInt32Constant(op2
) & 0x1f)) 
2178                 result 
= addToGraph(BitLShift
, op1
, op2
); 
2179             set(currentInstruction
[1].u
.operand
, result
); 
2180             NEXT_OPCODE(op_lshift
); 
2184             Node
* op1 
= getToInt32(currentInstruction
[2].u
.operand
); 
2185             Node
* op2 
= getToInt32(currentInstruction
[3].u
.operand
); 
2187             // The result of a zero-extending right shift is treated as an unsigned value. 
2188             // This means that if the top bit is set, the result is not in the int32 range, 
2189             // and as such must be stored as a double. If the shift amount is a constant, 
2190             // we may be able to optimize. 
2191             if (isInt32Constant(op2
)) { 
2192                 // If we know we are shifting by a non-zero amount, then since the operation 
2193                 // zero fills we know the top bit of the result must be zero, and as such the 
2194                 // result must be within the int32 range. Conversely, if this is a shift by 
2195                 // zero, then the result may be changed by the conversion to unsigned, but it 
2196                 // is not necessary to perform the shift! 
2197                 if (valueOfInt32Constant(op2
) & 0x1f) 
2198                     result 
= addToGraph(BitURShift
, op1
, op2
); 
2200                     result 
= makeSafe(addToGraph(UInt32ToNumber
, op1
)); 
2202                 // Cannot optimize at this stage; shift & potentially rebox as a double. 
2203                 result 
= addToGraph(BitURShift
, op1
, op2
); 
2204                 result 
= makeSafe(addToGraph(UInt32ToNumber
, result
)); 
2206             set(currentInstruction
[1].u
.operand
, result
); 
2207             NEXT_OPCODE(op_urshift
); 
2210         // === Increment/Decrement opcodes === 
2213             unsigned srcDst 
= currentInstruction
[1].u
.operand
; 
2214             Node
* op 
= get(srcDst
); 
2215             set(srcDst
, makeSafe(addToGraph(ArithAdd
, op
, one()))); 
2216             NEXT_OPCODE(op_inc
); 
2220             unsigned srcDst 
= currentInstruction
[1].u
.operand
; 
2221             Node
* op 
= get(srcDst
); 
2222             set(srcDst
, makeSafe(addToGraph(ArithSub
, op
, one()))); 
2223             NEXT_OPCODE(op_dec
); 
2226         // === Arithmetic operations === 
2229             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2230             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2231             if (op1
->hasNumberResult() && op2
->hasNumberResult()) 
2232                 set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ArithAdd
, op1
, op2
))); 
2234                 set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ValueAdd
, op1
, op2
))); 
2235             NEXT_OPCODE(op_add
); 
2239             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2240             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2241             set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ArithSub
, op1
, op2
))); 
2242             NEXT_OPCODE(op_sub
); 
2246             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2247             set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ArithNegate
, op1
))); 
2248             NEXT_OPCODE(op_negate
); 
2252             // Multiply requires that the inputs are not truncated, unfortunately. 
2253             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2254             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2255             set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ArithMul
, op1
, op2
))); 
2256             NEXT_OPCODE(op_mul
); 
2260             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2261             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2262             set(currentInstruction
[1].u
.operand
, makeSafe(addToGraph(ArithMod
, op1
, op2
))); 
2263             NEXT_OPCODE(op_mod
); 
2267             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2268             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2269             set(currentInstruction
[1].u
.operand
, makeDivSafe(addToGraph(ArithDiv
, op1
, op2
))); 
2270             NEXT_OPCODE(op_div
); 
2273         // === Misc operations === 
2275 #if ENABLE(DEBUG_WITH_BREAKPOINT) 
2277             addToGraph(Breakpoint
); 
2278             NEXT_OPCODE(op_debug
); 
2281             Node
* op 
= get(currentInstruction
[2].u
.operand
); 
2282             set(currentInstruction
[1].u
.operand
, op
); 
2283             NEXT_OPCODE(op_mov
); 
2286         case op_check_has_instance
: 
2287             addToGraph(CheckHasInstance
, get(currentInstruction
[3].u
.operand
)); 
2288             NEXT_OPCODE(op_check_has_instance
); 
2290         case op_instanceof
: { 
2291             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2292             Node
* prototype 
= get(currentInstruction
[3].u
.operand
); 
2293             set(currentInstruction
[1].u
.operand
, addToGraph(InstanceOf
, value
, prototype
)); 
2294             NEXT_OPCODE(op_instanceof
); 
2297         case op_is_undefined
: { 
2298             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2299             set(currentInstruction
[1].u
.operand
, addToGraph(IsUndefined
, value
)); 
2300             NEXT_OPCODE(op_is_undefined
); 
2303         case op_is_boolean
: { 
2304             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2305             set(currentInstruction
[1].u
.operand
, addToGraph(IsBoolean
, value
)); 
2306             NEXT_OPCODE(op_is_boolean
); 
2309         case op_is_number
: { 
2310             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2311             set(currentInstruction
[1].u
.operand
, addToGraph(IsNumber
, value
)); 
2312             NEXT_OPCODE(op_is_number
); 
2315         case op_is_string
: { 
2316             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2317             set(currentInstruction
[1].u
.operand
, addToGraph(IsString
, value
)); 
2318             NEXT_OPCODE(op_is_string
); 
2321         case op_is_object
: { 
2322             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2323             set(currentInstruction
[1].u
.operand
, addToGraph(IsObject
, value
)); 
2324             NEXT_OPCODE(op_is_object
); 
2327         case op_is_function
: { 
2328             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2329             set(currentInstruction
[1].u
.operand
, addToGraph(IsFunction
, value
)); 
2330             NEXT_OPCODE(op_is_function
); 
2334             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2335             set(currentInstruction
[1].u
.operand
, addToGraph(LogicalNot
, value
)); 
2336             NEXT_OPCODE(op_not
); 
2339         case op_to_primitive
: { 
2340             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2341             set(currentInstruction
[1].u
.operand
, addToGraph(ToPrimitive
, value
)); 
2342             NEXT_OPCODE(op_to_primitive
); 
2346             int startOperand 
= currentInstruction
[2].u
.operand
; 
2347             int numOperands 
= currentInstruction
[3].u
.operand
; 
2349             // X86 doesn't have enough registers to compile MakeRope with three arguments. 
2350             // Rather than try to be clever, we just make MakeRope dumber on this processor. 
2351             const unsigned maxRopeArguments 
= 2; 
2353             const unsigned maxRopeArguments 
= 3; 
2355             OwnArrayPtr
<Node
*> toStringNodes 
= adoptArrayPtr(new Node
*[numOperands
]); 
2356             for (int i 
= 0; i 
< numOperands
; i
++) 
2357                 toStringNodes
[i
] = addToGraph(ToString
, get(startOperand 
+ i
)); 
2359             for (int i 
= 0; i 
< numOperands
; i
++) 
2360                 addToGraph(Phantom
, toStringNodes
[i
]); 
2362             Node
* operands
[AdjacencyList::Size
]; 
2363             unsigned indexInOperands 
= 0; 
2364             for (unsigned i 
= 0; i 
< AdjacencyList::Size
; ++i
) 
2366             for (int operandIdx 
= 0; operandIdx 
< numOperands
; ++operandIdx
) { 
2367                 if (indexInOperands 
== maxRopeArguments
) { 
2368                     operands
[0] = addToGraph(MakeRope
, operands
[0], operands
[1], operands
[2]); 
2369                     for (unsigned i 
= 1; i 
< AdjacencyList::Size
; ++i
) 
2371                     indexInOperands 
= 1; 
2374                 ASSERT(indexInOperands 
< AdjacencyList::Size
); 
2375                 ASSERT(indexInOperands 
< maxRopeArguments
); 
2376                 operands
[indexInOperands
++] = toStringNodes
[operandIdx
]; 
2378             set(currentInstruction
[1].u
.operand
, 
2379                 addToGraph(MakeRope
, operands
[0], operands
[1], operands
[2])); 
2380             NEXT_OPCODE(op_strcat
); 
2384             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2385             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2386             if (canFold(op1
) && canFold(op2
)) { 
2387                 JSValue a 
= valueOfJSConstant(op1
); 
2388                 JSValue b 
= valueOfJSConstant(op2
); 
2389                 if (a
.isNumber() && b
.isNumber()) { 
2390                     set(currentInstruction
[1].u
.operand
, 
2391                         getJSConstantForValue(jsBoolean(a
.asNumber() < b
.asNumber()))); 
2392                     NEXT_OPCODE(op_less
); 
2395             set(currentInstruction
[1].u
.operand
, addToGraph(CompareLess
, op1
, op2
)); 
2396             NEXT_OPCODE(op_less
); 
2400             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2401             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2402             if (canFold(op1
) && canFold(op2
)) { 
2403                 JSValue a 
= valueOfJSConstant(op1
); 
2404                 JSValue b 
= valueOfJSConstant(op2
); 
2405                 if (a
.isNumber() && b
.isNumber()) { 
2406                     set(currentInstruction
[1].u
.operand
, 
2407                         getJSConstantForValue(jsBoolean(a
.asNumber() <= b
.asNumber()))); 
2408                     NEXT_OPCODE(op_lesseq
); 
2411             set(currentInstruction
[1].u
.operand
, addToGraph(CompareLessEq
, op1
, op2
)); 
2412             NEXT_OPCODE(op_lesseq
); 
2416             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2417             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2418             if (canFold(op1
) && canFold(op2
)) { 
2419                 JSValue a 
= valueOfJSConstant(op1
); 
2420                 JSValue b 
= valueOfJSConstant(op2
); 
2421                 if (a
.isNumber() && b
.isNumber()) { 
2422                     set(currentInstruction
[1].u
.operand
, 
2423                         getJSConstantForValue(jsBoolean(a
.asNumber() > b
.asNumber()))); 
2424                     NEXT_OPCODE(op_greater
); 
2427             set(currentInstruction
[1].u
.operand
, addToGraph(CompareGreater
, op1
, op2
)); 
2428             NEXT_OPCODE(op_greater
); 
2431         case op_greatereq
: { 
2432             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2433             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2434             if (canFold(op1
) && canFold(op2
)) { 
2435                 JSValue a 
= valueOfJSConstant(op1
); 
2436                 JSValue b 
= valueOfJSConstant(op2
); 
2437                 if (a
.isNumber() && b
.isNumber()) { 
2438                     set(currentInstruction
[1].u
.operand
, 
2439                         getJSConstantForValue(jsBoolean(a
.asNumber() >= b
.asNumber()))); 
2440                     NEXT_OPCODE(op_greatereq
); 
2443             set(currentInstruction
[1].u
.operand
, addToGraph(CompareGreaterEq
, op1
, op2
)); 
2444             NEXT_OPCODE(op_greatereq
); 
2448             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2449             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2450             if (canFold(op1
) && canFold(op2
)) { 
2451                 JSValue a 
= valueOfJSConstant(op1
); 
2452                 JSValue b 
= valueOfJSConstant(op2
); 
2453                 set(currentInstruction
[1].u
.operand
, 
2454                     getJSConstantForValue(jsBoolean(JSValue::equal(m_codeBlock
->globalObject()->globalExec(), a
, b
)))); 
2457             set(currentInstruction
[1].u
.operand
, addToGraph(CompareEq
, op1
, op2
)); 
2462             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2463             set(currentInstruction
[1].u
.operand
, addToGraph(CompareEqConstant
, value
, constantNull())); 
2464             NEXT_OPCODE(op_eq_null
); 
2468             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2469             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2470             if (canFold(op1
) && canFold(op2
)) { 
2471                 JSValue a 
= valueOfJSConstant(op1
); 
2472                 JSValue b 
= valueOfJSConstant(op2
); 
2473                 set(currentInstruction
[1].u
.operand
, 
2474                     getJSConstantForValue(jsBoolean(JSValue::strictEqual(m_codeBlock
->globalObject()->globalExec(), a
, b
)))); 
2475                 NEXT_OPCODE(op_stricteq
); 
2477             if (isConstantForCompareStrictEq(op1
)) 
2478                 set(currentInstruction
[1].u
.operand
, addToGraph(CompareStrictEqConstant
, op2
, op1
)); 
2479             else if (isConstantForCompareStrictEq(op2
)) 
2480                 set(currentInstruction
[1].u
.operand
, addToGraph(CompareStrictEqConstant
, op1
, op2
)); 
2482                 set(currentInstruction
[1].u
.operand
, addToGraph(CompareStrictEq
, op1
, op2
)); 
2483             NEXT_OPCODE(op_stricteq
); 
2487             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2488             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2489             if (canFold(op1
) && canFold(op2
)) { 
2490                 JSValue a 
= valueOfJSConstant(op1
); 
2491                 JSValue b 
= valueOfJSConstant(op2
); 
2492                 set(currentInstruction
[1].u
.operand
, 
2493                     getJSConstantForValue(jsBoolean(!JSValue::equal(m_codeBlock
->globalObject()->globalExec(), a
, b
)))); 
2494                 NEXT_OPCODE(op_neq
); 
2496             set(currentInstruction
[1].u
.operand
, addToGraph(LogicalNot
, addToGraph(CompareEq
, op1
, op2
))); 
2497             NEXT_OPCODE(op_neq
); 
2501             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2502             set(currentInstruction
[1].u
.operand
, addToGraph(LogicalNot
, addToGraph(CompareEqConstant
, value
, constantNull()))); 
2503             NEXT_OPCODE(op_neq_null
); 
2506         case op_nstricteq
: { 
2507             Node
* op1 
= get(currentInstruction
[2].u
.operand
); 
2508             Node
* op2 
= get(currentInstruction
[3].u
.operand
); 
2509             if (canFold(op1
) && canFold(op2
)) { 
2510                 JSValue a 
= valueOfJSConstant(op1
); 
2511                 JSValue b 
= valueOfJSConstant(op2
); 
2512                 set(currentInstruction
[1].u
.operand
, 
2513                     getJSConstantForValue(jsBoolean(!JSValue::strictEqual(m_codeBlock
->globalObject()->globalExec(), a
, b
)))); 
2514                 NEXT_OPCODE(op_nstricteq
); 
2516             Node
* invertedResult
; 
2517             if (isConstantForCompareStrictEq(op1
)) 
2518                 invertedResult 
= addToGraph(CompareStrictEqConstant
, op2
, op1
); 
2519             else if (isConstantForCompareStrictEq(op2
)) 
2520                 invertedResult 
= addToGraph(CompareStrictEqConstant
, op1
, op2
); 
2522                 invertedResult 
= addToGraph(CompareStrictEq
, op1
, op2
); 
2523             set(currentInstruction
[1].u
.operand
, addToGraph(LogicalNot
, invertedResult
)); 
2524             NEXT_OPCODE(op_nstricteq
); 
2527         // === Property access operations === 
2529         case op_get_by_val
: { 
2530             SpeculatedType prediction 
= getPrediction(); 
2532             Node
* base 
= get(currentInstruction
[2].u
.operand
); 
2533             ArrayMode arrayMode 
= getArrayModeAndEmitChecks(currentInstruction
[4].u
.arrayProfile
, Array::Read
, base
); 
2534             Node
* property 
= get(currentInstruction
[3].u
.operand
); 
2535             Node
* getByVal 
= addToGraph(GetByVal
, OpInfo(arrayMode
.asWord()), OpInfo(prediction
), base
, property
); 
2536             set(currentInstruction
[1].u
.operand
, getByVal
); 
2538             NEXT_OPCODE(op_get_by_val
); 
2541         case op_put_by_val
: { 
2542             Node
* base 
= get(currentInstruction
[1].u
.operand
); 
2544             ArrayMode arrayMode 
= getArrayModeAndEmitChecks(currentInstruction
[4].u
.arrayProfile
, Array::Write
, base
); 
2546             Node
* property 
= get(currentInstruction
[2].u
.operand
); 
2547             Node
* value 
= get(currentInstruction
[3].u
.operand
); 
2549             addVarArgChild(base
); 
2550             addVarArgChild(property
); 
2551             addVarArgChild(value
); 
2552             addVarArgChild(0); // Leave room for property storage. 
2553             addToGraph(Node::VarArg
, PutByVal
, OpInfo(arrayMode
.asWord()), OpInfo(0)); 
2555             NEXT_OPCODE(op_put_by_val
); 
2559         case op_get_by_id_out_of_line
: 
2560         case op_get_array_length
: { 
2561             SpeculatedType prediction 
= getPrediction(); 
2563             Node
* base 
= get(currentInstruction
[2].u
.operand
); 
2564             unsigned identifierNumber 
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[3].u
.operand
]; 
2566             Identifier identifier 
= m_codeBlock
->identifier(identifierNumber
); 
2567             GetByIdStatus getByIdStatus 
= GetByIdStatus::computeFor( 
2568                 m_inlineStackTop
->m_profiledBlock
, m_currentIndex
, identifier
); 
2571                 currentInstruction
[1].u
.operand
, prediction
, base
, identifierNumber
, getByIdStatus
); 
2573             NEXT_OPCODE(op_get_by_id
); 
2576         case op_put_by_id_out_of_line
: 
2577         case op_put_by_id_transition_direct
: 
2578         case op_put_by_id_transition_normal
: 
2579         case op_put_by_id_transition_direct_out_of_line
: 
2580         case op_put_by_id_transition_normal_out_of_line
: { 
2581             Node
* value 
= get(currentInstruction
[3].u
.operand
); 
2582             Node
* base 
= get(currentInstruction
[1].u
.operand
); 
2583             unsigned identifierNumber 
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[2].u
.operand
]; 
2584             bool direct 
= currentInstruction
[8].u
.operand
; 
2586             PutByIdStatus putByIdStatus 
= PutByIdStatus::computeFor( 
2587                 m_inlineStackTop
->m_profiledBlock
, 
2589                 m_codeBlock
->identifier(identifierNumber
)); 
2590             bool canCountAsInlined 
= true; 
2591             if (!putByIdStatus
.isSet()) { 
2592                 addToGraph(ForceOSRExit
); 
2593                 canCountAsInlined 
= false; 
2597                 m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadCache
) 
2598                 || m_inlineStackTop
->m_exitProfile
.hasExitSite(m_currentIndex
, BadWeakConstantCache
); 
2600             if (!hasExitSite 
&& putByIdStatus
.isSimpleReplace()) { 
2601                 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(putByIdStatus
.oldStructure())), base
); 
2602                 Node
* propertyStorage
; 
2603                 if (isInlineOffset(putByIdStatus
.offset())) 
2604                     propertyStorage 
= base
; 
2606                     propertyStorage 
= addToGraph(GetButterfly
, base
); 
2607                 addToGraph(PutByOffset
, OpInfo(m_graph
.m_storageAccessData
.size()), propertyStorage
, base
, value
); 
2609                 StorageAccessData storageAccessData
; 
2610                 storageAccessData
.offset 
= indexRelativeToBase(putByIdStatus
.offset()); 
2611                 storageAccessData
.identifierNumber 
= identifierNumber
; 
2612                 m_graph
.m_storageAccessData
.append(storageAccessData
); 
2613             } else if (!hasExitSite
 
2614                        && putByIdStatus
.isSimpleTransition() 
2615                        && structureChainIsStillValid( 
2617                            putByIdStatus
.oldStructure(), 
2618                            putByIdStatus
.structureChain())) { 
2620                 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(putByIdStatus
.oldStructure())), base
); 
2622                     if (!putByIdStatus
.oldStructure()->storedPrototype().isNull()) { 
2623                         addStructureTransitionCheck( 
2624                             putByIdStatus
.oldStructure()->storedPrototype().asCell()); 
2627                     for (WriteBarrier
<Structure
>* it 
= putByIdStatus
.structureChain()->head(); *it
; ++it
) { 
2628                         JSValue prototype 
= (*it
)->storedPrototype(); 
2629                         if (prototype
.isNull()) 
2631                         ASSERT(prototype
.isCell()); 
2632                         addStructureTransitionCheck(prototype
.asCell()); 
2635                 ASSERT(putByIdStatus
.oldStructure()->transitionWatchpointSetHasBeenInvalidated()); 
2637                 Node
* propertyStorage
; 
2638                 StructureTransitionData
* transitionData 
= 
2639                     m_graph
.addStructureTransitionData( 
2640                         StructureTransitionData( 
2641                             putByIdStatus
.oldStructure(), 
2642                             putByIdStatus
.newStructure())); 
2644                 if (putByIdStatus
.oldStructure()->outOfLineCapacity() 
2645                     != putByIdStatus
.newStructure()->outOfLineCapacity()) { 
2647                     // If we're growing the property storage then it must be because we're 
2648                     // storing into the out-of-line storage. 
2649                     ASSERT(!isInlineOffset(putByIdStatus
.offset())); 
2651                     if (!putByIdStatus
.oldStructure()->outOfLineCapacity()) { 
2652                         propertyStorage 
= addToGraph( 
2653                             AllocatePropertyStorage
, OpInfo(transitionData
), base
); 
2655                         propertyStorage 
= addToGraph( 
2656                             ReallocatePropertyStorage
, OpInfo(transitionData
), 
2657                             base
, addToGraph(GetButterfly
, base
)); 
2660                     if (isInlineOffset(putByIdStatus
.offset())) 
2661                         propertyStorage 
= base
; 
2663                         propertyStorage 
= addToGraph(GetButterfly
, base
); 
2666                 addToGraph(PutStructure
, OpInfo(transitionData
), base
); 
2670                     OpInfo(m_graph
.m_storageAccessData
.size()), 
2675                 StorageAccessData storageAccessData
; 
2676                 storageAccessData
.offset 
= indexRelativeToBase(putByIdStatus
.offset()); 
2677                 storageAccessData
.identifierNumber 
= identifierNumber
; 
2678                 m_graph
.m_storageAccessData
.append(storageAccessData
); 
2681                     addToGraph(PutByIdDirect
, OpInfo(identifierNumber
), base
, value
); 
2683                     addToGraph(PutById
, OpInfo(identifierNumber
), base
, value
); 
2684                 canCountAsInlined 
= false; 
2687             if (canCountAsInlined 
&& m_graph
.m_compilation
) 
2688                 m_graph
.m_compilation
->noticeInlinedPutById(); 
2690             NEXT_OPCODE(op_put_by_id
); 
2693         case op_init_global_const_nop
: { 
2694             NEXT_OPCODE(op_init_global_const_nop
); 
2697         case op_init_global_const
: { 
2698             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2701                 OpInfo(m_inlineStackTop
->m_codeBlock
->globalObject()->assertRegisterIsInThisObject(currentInstruction
[1].u
.registerPointer
)), 
2703             NEXT_OPCODE(op_init_global_const
); 
2706         case op_init_global_const_check
: { 
2707             Node
* value 
= get(currentInstruction
[2].u
.operand
); 
2708             CodeBlock
* codeBlock 
= m_inlineStackTop
->m_codeBlock
; 
2709             JSGlobalObject
* globalObject 
= codeBlock
->globalObject(); 
2710             unsigned identifierNumber 
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[4].u
.operand
]; 
2711             Identifier identifier 
= m_codeBlock
->identifier(identifierNumber
); 
2712             SymbolTableEntry entry 
= globalObject
->symbolTable()->get(identifier
.impl()); 
2713             if (!entry
.couldBeWatched()) { 
2716                     OpInfo(globalObject
->assertRegisterIsInThisObject(currentInstruction
[1].u
.registerPointer
)), 
2718                 NEXT_OPCODE(op_init_global_const_check
); 
2722                 OpInfo(codeBlock
->globalObject()->assertRegisterIsInThisObject(currentInstruction
[1].u
.registerPointer
)), 
2723                 OpInfo(identifierNumber
), 
2725             NEXT_OPCODE(op_init_global_const_check
); 
2729         // === Block terminators. === 
2732             unsigned relativeOffset 
= currentInstruction
[1].u
.operand
; 
2733             addToGraph(Jump
, OpInfo(m_currentIndex 
+ relativeOffset
)); 
2734             LAST_OPCODE(op_jmp
); 
2738             unsigned relativeOffset 
= currentInstruction
[2].u
.operand
; 
2739             Node
* condition 
= get(currentInstruction
[1].u
.operand
); 
2740             if (canFold(condition
)) { 
2741                 TriState state 
= valueOfJSConstant(condition
).pureToBoolean(); 
2742                 if (state 
== TrueTriState
) { 
2743                     addToGraph(Jump
, OpInfo(m_currentIndex 
+ relativeOffset
)); 
2744                     LAST_OPCODE(op_jtrue
); 
2745                 } else if (state 
== FalseTriState
) { 
2746                     // Emit a placeholder for this bytecode operation but otherwise 
2747                     // just fall through. 
2748                     addToGraph(Phantom
); 
2749                     NEXT_OPCODE(op_jtrue
); 
2752             addToGraph(Branch
, OpInfo(m_currentIndex 
+ relativeOffset
), OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jtrue
)), condition
); 
2753             LAST_OPCODE(op_jtrue
); 
2757             unsigned relativeOffset 
= currentInstruction
[2].u
.operand
; 
2758             Node
* condition 
= get(currentInstruction
[1].u
.operand
); 
2759             if (canFold(condition
)) { 
2760                 TriState state 
= valueOfJSConstant(condition
).pureToBoolean(); 
2761                 if (state 
== FalseTriState
) { 
2762                     addToGraph(Jump
, OpInfo(m_currentIndex 
+ relativeOffset
)); 
2763                     LAST_OPCODE(op_jfalse
); 
2764                 } else if (state 
== TrueTriState
) { 
2765                     // Emit a placeholder for this bytecode operation but otherwise 
2766                     // just fall through. 
2767                     addToGraph(Phantom
); 
2768                     NEXT_OPCODE(op_jfalse
); 
2771             addToGraph(Branch
, OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jfalse
)), OpInfo(m_currentIndex 
+ relativeOffset
), condition
); 
2772             LAST_OPCODE(op_jfalse
); 
2776             unsigned relativeOffset 
= currentInstruction
[2].u
.operand
; 
2777             Node
* value 
= get(currentInstruction
[1].u
.operand
); 
2778             Node
* condition 
= addToGraph(CompareEqConstant
, value
, constantNull()); 
2779             addToGraph(Branch
, OpInfo(m_currentIndex 
+ relativeOffset
), OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jeq_null
)), condition
); 
2780             LAST_OPCODE(op_jeq_null
); 
2783         case op_jneq_null
: { 
2784             unsigned relativeOffset 
= currentInstruction
[2].u
.operand
; 
2785             Node
* value 
= get(currentInstruction
[1].u
.operand
); 
2786             Node
* condition 
= addToGraph(CompareEqConstant
, value
, constantNull()); 
2787             addToGraph(Branch
, OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jneq_null
)), OpInfo(m_currentIndex 
+ relativeOffset
), condition
); 
2788             LAST_OPCODE(op_jneq_null
); 
2792             unsigned relativeOffset 
= currentInstruction
[3].u
.operand
; 
2793             Node
* op1 
= get(currentInstruction
[1].u
.operand
); 
2794             Node
* op2 
= get(currentInstruction
[2].u
.operand
); 
2795             if (canFold(op1
) && canFold(op2
)) { 
2796                 JSValue aValue 
= valueOfJSConstant(op1
); 
2797                 JSValue bValue 
= valueOfJSConstant(op2
); 
2798                 if (aValue
.isNumber() && bValue
.isNumber()) { 
2799                     double a 
= aValue
.asNumber(); 
2800                     double b 
= bValue
.asNumber(); 
2802                         addToGraph(Jump
, OpInfo(m_currentIndex 
+ relativeOffset
)); 
2803                         LAST_OPCODE(op_jless
); 
2805                         // Emit a placeholder for this bytecode operation but otherwise 
2806                         // just fall through. 
2807                         addToGraph(Phantom
); 
2808                         NEXT_OPCODE(op_jless
); 
2812             Node
* condition 
= addToGraph(CompareLess
, op1
, op2
); 
2813             addToGraph(Branch
, OpInfo(m_currentIndex 
+ relativeOffset
), OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jless
)), condition
); 
2814             LAST_OPCODE(op_jless
); 
2818             unsigned relativeOffset 
= currentInstruction
[3].u
.operand
; 
2819             Node
* op1 
= get(currentInstruction
[1].u
.operand
); 
2820             Node
* op2 
= get(currentInstruction
[2].u
.operand
); 
2821             if (canFold(op1
) && canFold(op2
)) { 
2822                 JSValue aValue 
= valueOfJSConstant(op1
); 
2823                 JSValue bValue 
= valueOfJSConstant(op2
); 
2824                 if (aValue
.isNumber() && bValue
.isNumber()) { 
2825                     double a 
= aValue
.asNumber(); 
2826                     double b 
= bValue
.asNumber(); 
2828                         addToGraph(Jump
, OpInfo(m_currentIndex 
+ relativeOffset
)); 
2829                         LAST_OPCODE(op_jlesseq
); 
2831                         // Emit a placeholder for this bytecode operation but otherwise 
2832                         // just fall through. 
2833                         addToGraph(Phantom
); 
2834                         NEXT_OPCODE(op_jlesseq
); 
2838             Node
* condition 
= addToGraph(CompareLessEq
, op1
, op2
); 
2839             addToGraph(Branch
, OpInfo(m_currentIndex 
+ relativeOffset
), OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jlesseq
)), condition
); 
2840             LAST_OPCODE(op_jlesseq
); 
2844             unsigned relativeOffset 
= currentInstruction
[3].u
.operand
; 
2845             Node
* op1 
= get(currentInstruction
[1].u
.operand
); 
2846             Node
* op2 
= get(currentInstruction
[2].u
.operand
); 
2847             if (canFold(op1
) && canFold(op2
)) { 
2848                 JSValue aValue 
= valueOfJSConstant(op1
); 
2849                 JSValue bValue 
= valueOfJSConstant(op2
); 
2850                 if (aValue
.isNumber() && bValue
.isNumber()) { 
2851                     double a 
= aValue
.asNumber(); 
2852                     double b 
= bValue
.asNumber(); 
2854                         addToGraph(Jump
, OpInfo(m_currentIndex 
+ relativeOffset
)); 
2855                         LAST_OPCODE(op_jgreater
); 
2857                         // Emit a placeholder for this bytecode operation but otherwise 
2858                         // just fall through. 
2859                         addToGraph(Phantom
); 
2860                         NEXT_OPCODE(op_jgreater
); 
2864             Node
* condition 
= addToGraph(CompareGreater
, op1
, op2
); 
2865             addToGraph(Branch
, OpInfo(m_currentIndex 
+ relativeOffset
), OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jgreater
)), condition
); 
2866             LAST_OPCODE(op_jgreater
); 
2869         case op_jgreatereq
: { 
2870             unsigned relativeOffset 
= currentInstruction
[3].u
.operand
; 
2871             Node
* op1 
= get(currentInstruction
[1].u
.operand
); 
2872             Node
* op2 
= get(currentInstruction
[2].u
.operand
); 
2873             if (canFold(op1
) && canFold(op2
)) { 
2874                 JSValue aValue 
= valueOfJSConstant(op1
); 
2875                 JSValue bValue 
= valueOfJSConstant(op2
); 
2876                 if (aValue
.isNumber() && bValue
.isNumber()) { 
2877                     double a 
= aValue
.asNumber(); 
2878                     double b 
= bValue
.asNumber(); 
2880                         addToGraph(Jump
, OpInfo(m_currentIndex 
+ relativeOffset
)); 
2881                         LAST_OPCODE(op_jgreatereq
); 
2883                         // Emit a placeholder for this bytecode operation but otherwise 
2884                         // just fall through. 
2885                         addToGraph(Phantom
); 
2886                         NEXT_OPCODE(op_jgreatereq
); 
2890             Node
* condition 
= addToGraph(CompareGreaterEq
, op1
, op2
); 
2891             addToGraph(Branch
, OpInfo(m_currentIndex 
+ relativeOffset
), OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jgreatereq
)), condition
); 
2892             LAST_OPCODE(op_jgreatereq
); 
2896             unsigned relativeOffset 
= currentInstruction
[3].u
.operand
; 
2897             Node
* op1 
= get(currentInstruction
[1].u
.operand
); 
2898             Node
* op2 
= get(currentInstruction
[2].u
.operand
); 
2899             if (canFold(op1
) && canFold(op2
)) { 
2900                 JSValue aValue 
= valueOfJSConstant(op1
); 
2901                 JSValue bValue 
= valueOfJSConstant(op2
); 
2902                 if (aValue
.isNumber() && bValue
.isNumber()) { 
2903                     double a 
= aValue
.asNumber(); 
2904                     double b 
= bValue
.asNumber(); 
2906                         // Emit a placeholder for this bytecode operation but otherwise 
2907                         // just fall through. 
2908                         addToGraph(Phantom
); 
2909                         NEXT_OPCODE(op_jnless
); 
2911                         addToGraph(Jump
, OpInfo(m_currentIndex 
+ relativeOffset
)); 
2912                         LAST_OPCODE(op_jnless
); 
2916             Node
* condition 
= addToGraph(CompareLess
, op1
, op2
); 
2917             addToGraph(Branch
, OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jnless
)), OpInfo(m_currentIndex 
+ relativeOffset
), condition
); 
2918             LAST_OPCODE(op_jnless
); 
2922             unsigned relativeOffset 
= currentInstruction
[3].u
.operand
; 
2923             Node
* op1 
= get(currentInstruction
[1].u
.operand
); 
2924             Node
* op2 
= get(currentInstruction
[2].u
.operand
); 
2925             if (canFold(op1
) && canFold(op2
)) { 
2926                 JSValue aValue 
= valueOfJSConstant(op1
); 
2927                 JSValue bValue 
= valueOfJSConstant(op2
); 
2928                 if (aValue
.isNumber() && bValue
.isNumber()) { 
2929                     double a 
= aValue
.asNumber(); 
2930                     double b 
= bValue
.asNumber(); 
2932                         // Emit a placeholder for this bytecode operation but otherwise 
2933                         // just fall through. 
2934                         addToGraph(Phantom
); 
2935                         NEXT_OPCODE(op_jnlesseq
); 
2937                         addToGraph(Jump
, OpInfo(m_currentIndex 
+ relativeOffset
)); 
2938                         LAST_OPCODE(op_jnlesseq
); 
2942             Node
* condition 
= addToGraph(CompareLessEq
, op1
, op2
); 
2943             addToGraph(Branch
, OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jnlesseq
)), OpInfo(m_currentIndex 
+ relativeOffset
), condition
); 
2944             LAST_OPCODE(op_jnlesseq
); 
2947         case op_jngreater
: { 
2948             unsigned relativeOffset 
= currentInstruction
[3].u
.operand
; 
2949             Node
* op1 
= get(currentInstruction
[1].u
.operand
); 
2950             Node
* op2 
= get(currentInstruction
[2].u
.operand
); 
2951             if (canFold(op1
) && canFold(op2
)) { 
2952                 JSValue aValue 
= valueOfJSConstant(op1
); 
2953                 JSValue bValue 
= valueOfJSConstant(op2
); 
2954                 if (aValue
.isNumber() && bValue
.isNumber()) { 
2955                     double a 
= aValue
.asNumber(); 
2956                     double b 
= bValue
.asNumber(); 
2958                         // Emit a placeholder for this bytecode operation but otherwise 
2959                         // just fall through. 
2960                         addToGraph(Phantom
); 
2961                         NEXT_OPCODE(op_jngreater
); 
2963                         addToGraph(Jump
, OpInfo(m_currentIndex 
+ relativeOffset
)); 
2964                         LAST_OPCODE(op_jngreater
); 
2968             Node
* condition 
= addToGraph(CompareGreater
, op1
, op2
); 
2969             addToGraph(Branch
, OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jngreater
)), OpInfo(m_currentIndex 
+ relativeOffset
), condition
); 
2970             LAST_OPCODE(op_jngreater
); 
2973         case op_jngreatereq
: { 
2974             unsigned relativeOffset 
= currentInstruction
[3].u
.operand
; 
2975             Node
* op1 
= get(currentInstruction
[1].u
.operand
); 
2976             Node
* op2 
= get(currentInstruction
[2].u
.operand
); 
2977             if (canFold(op1
) && canFold(op2
)) { 
2978                 JSValue aValue 
= valueOfJSConstant(op1
); 
2979                 JSValue bValue 
= valueOfJSConstant(op2
); 
2980                 if (aValue
.isNumber() && bValue
.isNumber()) { 
2981                     double a 
= aValue
.asNumber(); 
2982                     double b 
= bValue
.asNumber(); 
2984                         // Emit a placeholder for this bytecode operation but otherwise 
2985                         // just fall through. 
2986                         addToGraph(Phantom
); 
2987                         NEXT_OPCODE(op_jngreatereq
); 
2989                         addToGraph(Jump
, OpInfo(m_currentIndex 
+ relativeOffset
)); 
2990                         LAST_OPCODE(op_jngreatereq
); 
2994             Node
* condition 
= addToGraph(CompareGreaterEq
, op1
, op2
); 
2995             addToGraph(Branch
, OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jngreatereq
)), OpInfo(m_currentIndex 
+ relativeOffset
), condition
); 
2996             LAST_OPCODE(op_jngreatereq
); 
3000             flushArgumentsAndCapturedVariables(); 
3001             if (inlineCallFrame()) { 
3002                 if (m_inlineStackTop
->m_returnValue 
!= InvalidVirtualRegister
) 
3003                     setDirect(m_inlineStackTop
->m_returnValue
, get(currentInstruction
[1].u
.operand
)); 
3004                 m_inlineStackTop
->m_didReturn 
= true; 
3005                 if (m_inlineStackTop
->m_unlinkedBlocks
.isEmpty()) { 
3006                     // If we're returning from the first block, then we're done parsing. 
3007                     ASSERT(m_inlineStackTop
->m_callsiteBlockHead 
== m_graph
.m_blocks
.size() - 1); 
3008                     shouldContinueParsing 
= false; 
3009                     LAST_OPCODE(op_ret
); 
3011                     // If inlining created blocks, and we're doing a return, then we need some 
3013                     ASSERT(m_inlineStackTop
->m_unlinkedBlocks
.last().m_blockIndex 
== m_graph
.m_blocks
.size() - 1); 
3014                     m_inlineStackTop
->m_unlinkedBlocks
.last().m_needsNormalLinking 
= false; 
3016                 if (m_currentIndex 
+ OPCODE_LENGTH(op_ret
) != m_inlineStackTop
->m_codeBlock
->instructions().size() || m_inlineStackTop
->m_didEarlyReturn
) { 
3017                     ASSERT(m_currentIndex 
+ OPCODE_LENGTH(op_ret
) <= m_inlineStackTop
->m_codeBlock
->instructions().size()); 
3018                     addToGraph(Jump
, OpInfo(NoBlock
)); 
3019                     m_inlineStackTop
->m_unlinkedBlocks
.last().m_needsEarlyReturnLinking 
= true; 
3020                     m_inlineStackTop
->m_didEarlyReturn 
= true; 
3022                 LAST_OPCODE(op_ret
); 
3024             addToGraph(Return
, get(currentInstruction
[1].u
.operand
)); 
3025             LAST_OPCODE(op_ret
); 
3028             flushArgumentsAndCapturedVariables(); 
3029             ASSERT(!inlineCallFrame()); 
3030             addToGraph(Return
, get(currentInstruction
[1].u
.operand
)); 
3031             LAST_OPCODE(op_end
); 
3034             flushAllArgumentsAndCapturedVariablesInInlineStack(); 
3035             addToGraph(Throw
, get(currentInstruction
[1].u
.operand
)); 
3036             LAST_OPCODE(op_throw
); 
3038         case op_throw_static_error
: 
3039             flushAllArgumentsAndCapturedVariablesInInlineStack(); 
3040             addToGraph(ThrowReferenceError
); 
3041             LAST_OPCODE(op_throw_static_error
); 
3044             handleCall(interpreter
, currentInstruction
, Call
, CodeForCall
); 
3045             NEXT_OPCODE(op_call
); 
3048             handleCall(interpreter
, currentInstruction
, Construct
, CodeForConstruct
); 
3049             NEXT_OPCODE(op_construct
); 
3051         case op_call_varargs
: { 
3052             ASSERT(inlineCallFrame()); 
3053             ASSERT(currentInstruction
[3].u
.operand 
== m_inlineStackTop
->m_codeBlock
->argumentsRegister()); 
3054             ASSERT(!m_inlineStackTop
->m_codeBlock
->symbolTable()->slowArguments()); 
3055             // It would be cool to funnel this into handleCall() so that it can handle 
3056             // inlining. But currently that won't be profitable anyway, since none of the 
3057             // uses of call_varargs will be inlineable. So we set this up manually and 
3058             // without inline/intrinsic detection. 
3060             Instruction
* putInstruction 
= currentInstruction 
+ OPCODE_LENGTH(op_call_varargs
); 
3062             SpeculatedType prediction 
= SpecNone
; 
3063             if (interpreter
->getOpcodeID(putInstruction
->u
.opcode
) == op_call_put_result
) { 
3064                 m_currentProfilingIndex 
= m_currentIndex 
+ OPCODE_LENGTH(op_call_varargs
); 
3065                 prediction 
= getPrediction(); 
3068             addToGraph(CheckArgumentsNotCreated
); 
3070             unsigned argCount 
= inlineCallFrame()->arguments
.size(); 
3071             if (JSStack::CallFrameHeaderSize 
+ argCount 
> m_parameterSlots
) 
3072                 m_parameterSlots 
= JSStack::CallFrameHeaderSize 
+ argCount
; 
3074             addVarArgChild(get(currentInstruction
[1].u
.operand
)); // callee 
3075             addVarArgChild(get(currentInstruction
[2].u
.operand
)); // this 
3076             for (unsigned argument 
= 1; argument 
< argCount
; ++argument
) 
3077                 addVarArgChild(get(argumentToOperand(argument
))); 
3079             Node
* call 
= addToGraph(Node::VarArg
, Call
, OpInfo(0), OpInfo(prediction
)); 
3080             if (interpreter
->getOpcodeID(putInstruction
->u
.opcode
) == op_call_put_result
) 
3081                 set(putInstruction
[1].u
.operand
, call
); 
3083             NEXT_OPCODE(op_call_varargs
); 
3086         case op_call_put_result
: 
3087             NEXT_OPCODE(op_call_put_result
); 
3090             // Statically speculate for now. It makes sense to let speculate-only jneq_ptr 
3091             // support simmer for a while before making it more general, since it's 
3092             // already gnarly enough as it is. 
3093             ASSERT(pointerIsFunction(currentInstruction
[2].u
.specialPointer
)); 
3096                 OpInfo(actualPointerFor(m_inlineStackTop
->m_codeBlock
, currentInstruction
[2].u
.specialPointer
)), 
3097                 get(currentInstruction
[1].u
.operand
)); 
3098             addToGraph(Jump
, OpInfo(m_currentIndex 
+ OPCODE_LENGTH(op_jneq_ptr
))); 
3099             LAST_OPCODE(op_jneq_ptr
); 
3101         case op_get_scoped_var
: { 
3102             SpeculatedType prediction 
= getPrediction(); 
3103             int dst 
= currentInstruction
[1].u
.operand
; 
3104             int slot 
= currentInstruction
[2].u
.operand
; 
3105             int depth 
= currentInstruction
[3].u
.operand
; 
3106             bool hasTopScope 
= m_codeBlock
->codeType() == FunctionCode 
&& m_inlineStackTop
->m_codeBlock
->needsFullScopeChain(); 
3107             ASSERT(!hasTopScope 
|| depth 
>= 1); 
3108             Node
* scope 
= getScope(hasTopScope
, depth 
- hasTopScope
); 
3109             Node
* getScopeRegisters 
= addToGraph(GetScopeRegisters
, scope
); 
3110             Node
* getScopedVar 
= addToGraph(GetScopedVar
, OpInfo(slot
), OpInfo(prediction
), getScopeRegisters
); 
3111             set(dst
, getScopedVar
); 
3112             NEXT_OPCODE(op_get_scoped_var
); 
3115         case op_put_scoped_var
: { 
3116             int slot 
= currentInstruction
[1].u
.operand
; 
3117             int depth 
= currentInstruction
[2].u
.operand
; 
3118             int source 
= currentInstruction
[3].u
.operand
; 
3119             bool hasTopScope 
= m_codeBlock
->codeType() == FunctionCode 
&& m_inlineStackTop
->m_codeBlock
->needsFullScopeChain(); 
3120             ASSERT(!hasTopScope 
|| depth 
>= 1); 
3121             Node
* scope 
= getScope(hasTopScope
, depth 
- hasTopScope
); 
3122             Node
* scopeRegisters 
= addToGraph(GetScopeRegisters
, scope
); 
3123             addToGraph(PutScopedVar
, OpInfo(slot
), scope
, scopeRegisters
, get(source
)); 
3124             NEXT_OPCODE(op_put_scoped_var
); 
3128         case op_resolve_global_property
: 
3129         case op_resolve_global_var
: 
3130         case op_resolve_scoped_var
: 
3131         case op_resolve_scoped_var_on_top_scope
: 
3132         case op_resolve_scoped_var_with_top_scope_check
: { 
3133             SpeculatedType prediction 
= getPrediction(); 
3135             unsigned identifier 
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[2].u
.operand
]; 
3136             ResolveOperations
* operations 
= currentInstruction
[3].u
.resolveOperations
; 
3138             if (parseResolveOperations(prediction
, identifier
, operations
, 0, 0, &value
)) { 
3139                 set(currentInstruction
[1].u
.operand
, value
); 
3140                 NEXT_OPCODE(op_resolve
); 
3143             Node
* resolve 
= addToGraph(Resolve
, OpInfo(m_graph
.m_resolveOperationsData
.size()), OpInfo(prediction
)); 
3144             m_graph
.m_resolveOperationsData
.append(ResolveOperationData()); 
3145             ResolveOperationData
& data 
= m_graph
.m_resolveOperationsData
.last(); 
3146             data
.identifierNumber 
= identifier
; 
3147             data
.resolveOperations 
= operations
; 
3149             set(currentInstruction
[1].u
.operand
, resolve
); 
3151             NEXT_OPCODE(op_resolve
); 
3154         case op_put_to_base_variable
: 
3155         case op_put_to_base
: { 
3156             unsigned base 
= currentInstruction
[1].u
.operand
; 
3157             unsigned identifier 
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[2].u
.operand
]; 
3158             unsigned value 
= currentInstruction
[3].u
.operand
; 
3159             PutToBaseOperation
* putToBase 
= currentInstruction
[4].u
.putToBaseOperation
; 
3161             if (putToBase
->m_isDynamic
) { 
3162                 addToGraph(PutById
, OpInfo(identifier
), get(base
), get(value
)); 
3163                 NEXT_OPCODE(op_put_to_base
); 
3166             switch (putToBase
->m_kind
) { 
3167             case PutToBaseOperation::Uninitialised
: 
3168                 addToGraph(ForceOSRExit
); 
3169                 addToGraph(Phantom
, get(base
)); 
3170                 addToGraph(Phantom
, get(value
)); 
3173             case PutToBaseOperation::GlobalVariablePutChecked
: { 
3174                 CodeBlock
* codeBlock 
= m_inlineStackTop
->m_codeBlock
; 
3175                 JSGlobalObject
* globalObject 
= codeBlock
->globalObject(); 
3176                 SymbolTableEntry entry 
= globalObject
->symbolTable()->get(m_codeBlock
->identifier(identifier
).impl()); 
3177                 if (entry
.couldBeWatched()) { 
3178                     addToGraph(PutGlobalVarCheck
, 
3179                                OpInfo(codeBlock
->globalObject()->assertRegisterIsInThisObject(putToBase
->m_registerAddress
)), 
3185             case PutToBaseOperation::GlobalVariablePut
: 
3186                 addToGraph(PutGlobalVar
, 
3187                            OpInfo(m_inlineStackTop
->m_codeBlock
->globalObject()->assertRegisterIsInThisObject(putToBase
->m_registerAddress
)), 
3190             case PutToBaseOperation::VariablePut
: { 
3191                 Node
* scope 
= get(base
); 
3192                 Node
* scopeRegisters 
= addToGraph(GetScopeRegisters
, scope
); 
3193                 addToGraph(PutScopedVar
, OpInfo(putToBase
->m_offset
), scope
, scopeRegisters
, get(value
)); 
3196             case PutToBaseOperation::GlobalPropertyPut
: { 
3197                 if (!putToBase
->m_structure
) { 
3198                     addToGraph(ForceOSRExit
); 
3199                     addToGraph(Phantom
, get(base
)); 
3200                     addToGraph(Phantom
, get(value
)); 
3201                     NEXT_OPCODE(op_put_to_base
); 
3203                 Node
* baseNode 
= get(base
); 
3204                 addToGraph(CheckStructure
, OpInfo(m_graph
.addStructureSet(putToBase
->m_structure
.get())), baseNode
); 
3205                 Node
* propertyStorage
; 
3206                 if (isInlineOffset(putToBase
->m_offset
)) 
3207                     propertyStorage 
= baseNode
; 
3209                     propertyStorage 
= addToGraph(GetButterfly
, baseNode
); 
3210                 addToGraph(PutByOffset
, OpInfo(m_graph
.m_storageAccessData
.size()), propertyStorage
, baseNode
, get(value
)); 
3212                 StorageAccessData storageAccessData
; 
3213                 storageAccessData
.offset 
= indexRelativeToBase(putToBase
->m_offset
); 
3214                 storageAccessData
.identifierNumber 
= identifier
; 
3215                 m_graph
.m_storageAccessData
.append(storageAccessData
); 
3218             case PutToBaseOperation::Readonly
: 
3219             case PutToBaseOperation::Generic
: 
3220                 addToGraph(PutById
, OpInfo(identifier
), get(base
), get(value
)); 
3222             NEXT_OPCODE(op_put_to_base
); 
3225         case op_resolve_base_to_global
: 
3226         case op_resolve_base_to_global_dynamic
: 
3227         case op_resolve_base_to_scope
: 
3228         case op_resolve_base_to_scope_with_top_scope_check
: 
3229         case op_resolve_base
: { 
3230             SpeculatedType prediction 
= getPrediction(); 
3232             unsigned identifier 
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[2].u
.operand
]; 
3233             ResolveOperations
* operations 
= currentInstruction
[4].u
.resolveOperations
; 
3234             PutToBaseOperation
* putToBaseOperation 
= currentInstruction
[5].u
.putToBaseOperation
; 
3237             if (parseResolveOperations(prediction
, identifier
, operations
, 0, &base
, 0)) { 
3238                 set(currentInstruction
[1].u
.operand
, base
); 
3239                 NEXT_OPCODE(op_resolve_base
); 
3242             Node
* resolve 
= addToGraph(currentInstruction
[3].u
.operand 
? ResolveBaseStrictPut 
: ResolveBase
, OpInfo(m_graph
.m_resolveOperationsData
.size()), OpInfo(prediction
)); 
3243             m_graph
.m_resolveOperationsData
.append(ResolveOperationData()); 
3244             ResolveOperationData
& data 
= m_graph
.m_resolveOperationsData
.last(); 
3245             data
.identifierNumber 
= identifier
; 
3246             data
.resolveOperations 
= operations
; 
3247             data
.putToBaseOperation 
= putToBaseOperation
; 
3249             set(currentInstruction
[1].u
.operand
, resolve
); 
3251             NEXT_OPCODE(op_resolve_base
); 
3253         case op_resolve_with_base
: { 
3254             SpeculatedType prediction 
= getPrediction(); 
3255             unsigned baseDst 
= currentInstruction
[1].u
.operand
; 
3256             unsigned valueDst 
= currentInstruction
[2].u
.operand
; 
3257             unsigned identifier 
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[3].u
.operand
]; 
3258             ResolveOperations
* operations 
= currentInstruction
[4].u
.resolveOperations
; 
3259             PutToBaseOperation
* putToBaseOperation 
= currentInstruction
[5].u
.putToBaseOperation
; 
3263             if (parseResolveOperations(prediction
, identifier
, operations
, putToBaseOperation
, &base
, &value
)) 
3264                 setPair(baseDst
, base
, valueDst
, value
); 
3266                 addToGraph(ForceOSRExit
); 
3267                 setPair(baseDst
, addToGraph(GarbageValue
), valueDst
, addToGraph(GarbageValue
)); 
3270             NEXT_OPCODE(op_resolve_with_base
); 
3272         case op_resolve_with_this
: { 
3273             SpeculatedType prediction 
= getPrediction(); 
3274             unsigned baseDst 
= currentInstruction
[1].u
.operand
; 
3275             unsigned valueDst 
= currentInstruction
[2].u
.operand
; 
3276             unsigned identifier 
= m_inlineStackTop
->m_identifierRemap
[currentInstruction
[3].u
.operand
]; 
3277             ResolveOperations
* operations 
= currentInstruction
[4].u
.resolveOperations
; 
3281             if (parseResolveOperations(prediction
, identifier
, operations
, 0, &base
, &value
)) 
3282                 setPair(baseDst
, base
, valueDst
, value
); 
3284                 addToGraph(ForceOSRExit
); 
3285                 setPair(baseDst
, addToGraph(GarbageValue
), valueDst
, addToGraph(GarbageValue
)); 
3288             NEXT_OPCODE(op_resolve_with_this
); 
3290         case op_loop_hint
: { 
3291             // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG 
3292             // OSR can only happen at basic block boundaries. Assert that these two statements 
3294             RELEASE_ASSERT(m_currentIndex 
== blockBegin
); 
3296             // We never do OSR into an inlined code block. That could not happen, since OSR 
3297             // looks up the code block that is the replacement for the baseline JIT code 
3298             // block. Hence, machine code block = true code block = not inline code block. 
3299             if (!m_inlineStackTop
->m_caller
) 
3300                 m_currentBlock
->isOSRTarget 
= true; 
3302             if (m_vm
->watchdog
.isEnabled()) 
3303                 addToGraph(CheckWatchdogTimer
); 
3305                 // Emit a phantom node to ensure that there is a placeholder 
3306                 // node for this bytecode op. 
3307                 addToGraph(Phantom
); 
3310             NEXT_OPCODE(op_loop_hint
); 
3313         case op_init_lazy_reg
: { 
3314             set(currentInstruction
[1].u
.operand
, getJSConstantForValue(JSValue())); 
3315             NEXT_OPCODE(op_init_lazy_reg
); 
3318         case op_create_activation
: { 
3319             set(currentInstruction
[1].u
.operand
, addToGraph(CreateActivation
, get(currentInstruction
[1].u
.operand
))); 
3320             NEXT_OPCODE(op_create_activation
); 
3323         case op_create_arguments
: { 
3324             m_graph
.m_hasArguments 
= true; 
3325             Node
* createArguments 
= addToGraph(CreateArguments
, get(currentInstruction
[1].u
.operand
)); 
3326             set(currentInstruction
[1].u
.operand
, createArguments
); 
3327             set(unmodifiedArgumentsRegister(currentInstruction
[1].u
.operand
), createArguments
); 
3328             NEXT_OPCODE(op_create_arguments
); 
3331         case op_tear_off_activation
: { 
3332             addToGraph(TearOffActivation
, get(currentInstruction
[1].u
.operand
)); 
3333             NEXT_OPCODE(op_tear_off_activation
); 
3336         case op_tear_off_arguments
: { 
3337             m_graph
.m_hasArguments 
= true; 
3338             addToGraph(TearOffArguments
, get(unmodifiedArgumentsRegister(currentInstruction
[1].u
.operand
)), get(currentInstruction
[2].u
.operand
)); 
3339             NEXT_OPCODE(op_tear_off_arguments
); 
3342         case op_get_arguments_length
: { 
3343             m_graph
.m_hasArguments 
= true; 
3344             set(currentInstruction
[1].u
.operand
, addToGraph(GetMyArgumentsLengthSafe
)); 
3345             NEXT_OPCODE(op_get_arguments_length
); 
3348         case op_get_argument_by_val
: { 
3349             m_graph
.m_hasArguments 
= true; 
3350             set(currentInstruction
[1].u
.operand
, 
3352                     GetMyArgumentByValSafe
, OpInfo(0), OpInfo(getPrediction()), 
3353                     get(currentInstruction
[3].u
.operand
))); 
3354             NEXT_OPCODE(op_get_argument_by_val
); 
3358             if (!currentInstruction
[3].u
.operand
) { 
3359                 set(currentInstruction
[1].u
.operand
, 
3360                     addToGraph(NewFunctionNoCheck
, OpInfo(currentInstruction
[2].u
.operand
))); 
3362                 set(currentInstruction
[1].u
.operand
, 
3365                         OpInfo(currentInstruction
[2].u
.operand
), 
3366                         get(currentInstruction
[1].u
.operand
))); 
3368             NEXT_OPCODE(op_new_func
); 
3371         case op_new_func_exp
: { 
3372             set(currentInstruction
[1].u
.operand
, 
3373                 addToGraph(NewFunctionExpression
, OpInfo(currentInstruction
[2].u
.operand
))); 
3374             NEXT_OPCODE(op_new_func_exp
); 
3378             set(currentInstruction
[1].u
.operand
, 
3379                 addToGraph(TypeOf
, get(currentInstruction
[2].u
.operand
))); 
3380             NEXT_OPCODE(op_typeof
); 
3383         case op_to_number
: { 
3384             set(currentInstruction
[1].u
.operand
, 
3385                 addToGraph(Identity
, Edge(get(currentInstruction
[2].u
.operand
), NumberUse
))); 
3386             NEXT_OPCODE(op_to_number
); 
3390             // Parse failed! This should not happen because the capabilities checker 
3391             // should have caught it. 
3392             RELEASE_ASSERT_NOT_REACHED(); 
3398 void ByteCodeParser::linkBlock(BasicBlock
* block
, Vector
<BlockIndex
>& possibleTargets
) 
3400     ASSERT(!block
->isLinked
); 
3401     ASSERT(!block
->isEmpty()); 
3402     Node
* node 
= block
->last(); 
3403     ASSERT(node
->isTerminal()); 
3405     switch (node
->op()) { 
3407         node
->setTakenBlockIndex(m_graph
.blockIndexForBytecodeOffset(possibleTargets
, node
->takenBytecodeOffsetDuringParsing())); 
3408 #if DFG_ENABLE(DEBUG_VERBOSE) 
3409         dataLogF("Linked basic block %p to %p, #%u.\n", block
, m_graph
.m_blocks
[node
->takenBlockIndex()].get(), node
->takenBlockIndex()); 
3414         node
->setTakenBlockIndex(m_graph
.blockIndexForBytecodeOffset(possibleTargets
, node
->takenBytecodeOffsetDuringParsing())); 
3415         node
->setNotTakenBlockIndex(m_graph
.blockIndexForBytecodeOffset(possibleTargets
, node
->notTakenBytecodeOffsetDuringParsing())); 
3416 #if DFG_ENABLE(DEBUG_VERBOSE) 
3417         dataLogF("Linked basic block %p to %p, #%u and %p, #%u.\n", block
, m_graph
.m_blocks
[node
->takenBlockIndex()].get(), node
->takenBlockIndex(), m_graph
.m_blocks
[node
->notTakenBlockIndex()].get(), node
->notTakenBlockIndex()); 
3422 #if DFG_ENABLE(DEBUG_VERBOSE) 
3423         dataLogF("Marking basic block %p as linked.\n", block
); 
3428 #if !ASSERT_DISABLED 
3429     block
->isLinked 
= true; 
3433 void ByteCodeParser::linkBlocks(Vector
<UnlinkedBlock
>& unlinkedBlocks
, Vector
<BlockIndex
>& possibleTargets
) 
3435     for (size_t i 
= 0; i 
< unlinkedBlocks
.size(); ++i
) { 
3436         if (unlinkedBlocks
[i
].m_needsNormalLinking
) { 
3437             linkBlock(m_graph
.m_blocks
[unlinkedBlocks
[i
].m_blockIndex
].get(), possibleTargets
); 
3438             unlinkedBlocks
[i
].m_needsNormalLinking 
= false; 
3443 void ByteCodeParser::buildOperandMapsIfNecessary() 
3445     if (m_haveBuiltOperandMaps
) 
3448     for (size_t i 
= 0; i 
< m_codeBlock
->numberOfIdentifiers(); ++i
) 
3449         m_identifierMap
.add(m_codeBlock
->identifier(i
).impl(), i
); 
3450     for (size_t i 
= 0; i 
< m_codeBlock
->numberOfConstantRegisters(); ++i
) { 
3451         JSValue value 
= m_codeBlock
->getConstant(i 
+ FirstConstantRegisterIndex
); 
3453             m_emptyJSValueIndex 
= i 
+ FirstConstantRegisterIndex
; 
3455             m_jsValueMap
.add(JSValue::encode(value
), i 
+ FirstConstantRegisterIndex
); 
3458     m_haveBuiltOperandMaps 
= true; 
3461 ByteCodeParser::InlineStackEntry::InlineStackEntry( 
3462     ByteCodeParser
* byteCodeParser
, 
3463     CodeBlock
* codeBlock
, 
3464     CodeBlock
* profiledBlock
, 
3465     BlockIndex callsiteBlockHead
, 
3466     JSFunction
* callee
, // Null if this is a closure call. 
3467     VirtualRegister returnValueVR
, 
3468     VirtualRegister inlineCallFrameStart
, 
3469     int argumentCountIncludingThis
, 
3470     CodeSpecializationKind kind
) 
3471     : m_byteCodeParser(byteCodeParser
) 
3472     , m_codeBlock(codeBlock
) 
3473     , m_profiledBlock(profiledBlock
) 
3474     , m_exitProfile(profiledBlock
->exitProfile()) 
3475     , m_callsiteBlockHead(callsiteBlockHead
) 
3476     , m_returnValue(returnValueVR
) 
3477     , m_lazyOperands(profiledBlock
->lazyOperandValueProfiles()) 
3478     , m_didReturn(false) 
3479     , m_didEarlyReturn(false) 
3480     , m_caller(byteCodeParser
->m_inlineStackTop
) 
3482     m_argumentPositions
.resize(argumentCountIncludingThis
); 
3483     for (int i 
= 0; i 
< argumentCountIncludingThis
; ++i
) { 
3484         byteCodeParser
->m_graph
.m_argumentPositions
.append(ArgumentPosition()); 
3485         ArgumentPosition
* argumentPosition 
= &byteCodeParser
->m_graph
.m_argumentPositions
.last(); 
3486         m_argumentPositions
[i
] = argumentPosition
; 
3489     // Track the code-block-global exit sites. 
3490     if (m_exitProfile
.hasExitSite(ArgumentsEscaped
)) { 
3491         byteCodeParser
->m_graph
.m_executablesWhoseArgumentsEscaped
.add( 
3492             codeBlock
->ownerExecutable()); 
3497         ASSERT(codeBlock 
!= byteCodeParser
->m_codeBlock
); 
3498         ASSERT(inlineCallFrameStart 
!= InvalidVirtualRegister
); 
3499         ASSERT(callsiteBlockHead 
!= NoBlock
); 
3501         InlineCallFrame inlineCallFrame
; 
3502         inlineCallFrame
.executable
.set(*byteCodeParser
->m_vm
, byteCodeParser
->m_codeBlock
->ownerExecutable(), codeBlock
->ownerExecutable()); 
3503         inlineCallFrame
.stackOffset 
= inlineCallFrameStart 
+ JSStack::CallFrameHeaderSize
; 
3505             inlineCallFrame
.callee
.set(*byteCodeParser
->m_vm
, byteCodeParser
->m_codeBlock
->ownerExecutable(), callee
); 
3506         inlineCallFrame
.caller 
= byteCodeParser
->currentCodeOrigin(); 
3507         inlineCallFrame
.arguments
.resize(argumentCountIncludingThis
); // Set the number of arguments including this, but don't configure the value recoveries, yet. 
3508         inlineCallFrame
.isCall 
= isCall(kind
); 
3510         if (inlineCallFrame
.caller
.inlineCallFrame
) 
3511             inlineCallFrame
.capturedVars 
= inlineCallFrame
.caller
.inlineCallFrame
->capturedVars
; 
3513             for (int i 
= byteCodeParser
->m_codeBlock
->m_numVars
; i
--;) { 
3514                 if (byteCodeParser
->m_codeBlock
->isCaptured(i
)) 
3515                     inlineCallFrame
.capturedVars
.set(i
); 
3519         for (int i 
= argumentCountIncludingThis
; i
--;) { 
3520             if (codeBlock
->isCaptured(argumentToOperand(i
))) 
3521                 inlineCallFrame
.capturedVars
.set(argumentToOperand(i
) + inlineCallFrame
.stackOffset
); 
3523         for (size_t i 
= codeBlock
->m_numVars
; i
--;) { 
3524             if (codeBlock
->isCaptured(i
)) 
3525                 inlineCallFrame
.capturedVars
.set(i 
+ inlineCallFrame
.stackOffset
); 
3528 #if DFG_ENABLE(DEBUG_VERBOSE) 
3529         dataLogF("Current captured variables: "); 
3530         inlineCallFrame
.capturedVars
.dump(WTF::dataFile()); 
3534         byteCodeParser
->m_codeBlock
->inlineCallFrames().append(inlineCallFrame
); 
3535         m_inlineCallFrame 
= &byteCodeParser
->m_codeBlock
->inlineCallFrames().last(); 
3537         byteCodeParser
->buildOperandMapsIfNecessary(); 
3539         m_identifierRemap
.resize(codeBlock
->numberOfIdentifiers()); 
3540         m_constantRemap
.resize(codeBlock
->numberOfConstantRegisters()); 
3541         m_constantBufferRemap
.resize(codeBlock
->numberOfConstantBuffers()); 
3543         for (size_t i 
= 0; i 
< codeBlock
->numberOfIdentifiers(); ++i
) { 
3544             StringImpl
* rep 
= codeBlock
->identifier(i
).impl(); 
3545             IdentifierMap::AddResult result 
= byteCodeParser
->m_identifierMap
.add(rep
, byteCodeParser
->m_codeBlock
->numberOfIdentifiers()); 
3546             if (result
.isNewEntry
) 
3547                 byteCodeParser
->m_codeBlock
->addIdentifier(Identifier(byteCodeParser
->m_vm
, rep
)); 
3548             m_identifierRemap
[i
] = result
.iterator
->value
; 
3550         for (size_t i 
= 0; i 
< codeBlock
->numberOfConstantRegisters(); ++i
) { 
3551             JSValue value 
= codeBlock
->getConstant(i 
+ FirstConstantRegisterIndex
); 
3553                 if (byteCodeParser
->m_emptyJSValueIndex 
== UINT_MAX
) { 
3554                     byteCodeParser
->m_emptyJSValueIndex 
= byteCodeParser
->m_codeBlock
->numberOfConstantRegisters() + FirstConstantRegisterIndex
; 
3555                     byteCodeParser
->m_codeBlock
->addConstant(JSValue()); 
3556                     byteCodeParser
->m_constants
.append(ConstantRecord()); 
3558                 m_constantRemap
[i
] = byteCodeParser
->m_emptyJSValueIndex
; 
3561             JSValueMap::AddResult result 
= byteCodeParser
->m_jsValueMap
.add(JSValue::encode(value
), byteCodeParser
->m_codeBlock
->numberOfConstantRegisters() + FirstConstantRegisterIndex
); 
3562             if (result
.isNewEntry
) { 
3563                 byteCodeParser
->m_codeBlock
->addConstant(value
); 
3564                 byteCodeParser
->m_constants
.append(ConstantRecord()); 
3566             m_constantRemap
[i
] = result
.iterator
->value
; 
3568         for (unsigned i 
= 0; i 
< codeBlock
->numberOfConstantBuffers(); ++i
) { 
3569             // If we inline the same code block multiple times, we don't want to needlessly 
3570             // duplicate its constant buffers. 
3571             HashMap
<ConstantBufferKey
, unsigned>::iterator iter 
= 
3572                 byteCodeParser
->m_constantBufferCache
.find(ConstantBufferKey(codeBlock
, i
)); 
3573             if (iter 
!= byteCodeParser
->m_constantBufferCache
.end()) { 
3574                 m_constantBufferRemap
[i
] = iter
->value
; 
3577             Vector
<JSValue
>& buffer 
= codeBlock
->constantBufferAsVector(i
); 
3578             unsigned newIndex 
= byteCodeParser
->m_codeBlock
->addConstantBuffer(buffer
); 
3579             m_constantBufferRemap
[i
] = newIndex
; 
3580             byteCodeParser
->m_constantBufferCache
.add(ConstantBufferKey(codeBlock
, i
), newIndex
); 
3582         m_callsiteBlockHeadNeedsLinking 
= true; 
3584         // Machine code block case. 
3585         ASSERT(codeBlock 
== byteCodeParser
->m_codeBlock
); 
3587         ASSERT(returnValueVR 
== InvalidVirtualRegister
); 
3588         ASSERT(inlineCallFrameStart 
== InvalidVirtualRegister
); 
3589         ASSERT(callsiteBlockHead 
== NoBlock
); 
3591         m_inlineCallFrame 
= 0; 
3593         m_identifierRemap
.resize(codeBlock
->numberOfIdentifiers()); 
3594         m_constantRemap
.resize(codeBlock
->numberOfConstantRegisters()); 
3595         m_constantBufferRemap
.resize(codeBlock
->numberOfConstantBuffers()); 
3596         for (size_t i 
= 0; i 
< codeBlock
->numberOfIdentifiers(); ++i
) 
3597             m_identifierRemap
[i
] = i
; 
3598         for (size_t i 
= 0; i 
< codeBlock
->numberOfConstantRegisters(); ++i
) 
3599             m_constantRemap
[i
] = i 
+ FirstConstantRegisterIndex
; 
3600         for (size_t i 
= 0; i 
< codeBlock
->numberOfConstantBuffers(); ++i
) 
3601             m_constantBufferRemap
[i
] = i
; 
3602         m_callsiteBlockHeadNeedsLinking 
= false; 
3605     for (size_t i 
= 0; i 
< m_constantRemap
.size(); ++i
) 
3606         ASSERT(m_constantRemap
[i
] >= static_cast<unsigned>(FirstConstantRegisterIndex
)); 
3608     byteCodeParser
->m_inlineStackTop 
= this; 
3611 void ByteCodeParser::parseCodeBlock() 
3613     CodeBlock
* codeBlock 
= m_inlineStackTop
->m_codeBlock
; 
3615     if (m_graph
.m_compilation
) { 
3616         m_graph
.m_compilation
->addProfiledBytecodes( 
3617             *m_vm
->m_perBytecodeProfiler
, m_inlineStackTop
->m_profiledBlock
); 
3620     bool shouldDumpBytecode 
= Options::dumpBytecodeAtDFGTime(); 
3621 #if DFG_ENABLE(DEBUG_VERBOSE) 
3622     shouldDumpBytecode 
|= true; 
3624     if (shouldDumpBytecode
) { 
3625         dataLog("Parsing ", *codeBlock
); 
3626         if (inlineCallFrame()) { 
3628                 " for inlining at ", CodeBlockWithJITType(m_codeBlock
, JITCode::DFGJIT
), 
3629                 " ", inlineCallFrame()->caller
); 
3632             ": captureCount = ", codeBlock
->symbolTable() ? codeBlock
->symbolTable()->captureCount() : 0, 
3633             ", needsFullScopeChain = ", codeBlock
->needsFullScopeChain(), 
3634             ", needsActivation = ", codeBlock
->ownerExecutable()->needsActivation(), 
3635             ", isStrictMode = ", codeBlock
->ownerExecutable()->isStrictMode(), "\n"); 
3636         codeBlock
->baselineVersion()->dumpBytecode(); 
3639     Vector
<unsigned, 32> jumpTargets
; 
3640     computePreciseJumpTargets(codeBlock
, jumpTargets
); 
3641     if (Options::dumpBytecodeAtDFGTime()) { 
3642         dataLog("Jump targets: "); 
3644         for (unsigned i 
= 0; i 
< jumpTargets
.size(); ++i
) 
3645             dataLog(comma
, jumpTargets
[i
]); 
3649     for (unsigned jumpTargetIndex 
= 0; jumpTargetIndex 
<= jumpTargets
.size(); ++jumpTargetIndex
) { 
3650         // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions. 
3651         unsigned limit 
= jumpTargetIndex 
< jumpTargets
.size() ? jumpTargets
[jumpTargetIndex
] : codeBlock
->instructions().size(); 
3652 #if DFG_ENABLE(DEBUG_VERBOSE) 
3654             "Parsing bytecode with limit ", pointerDump(inlineCallFrame()), 
3655             " bc#", limit
, " at inline depth ", 
3656             CodeOrigin::inlineDepthForCallFrame(inlineCallFrame()), ".\n"); 
3658         ASSERT(m_currentIndex 
< limit
); 
3660         // Loop until we reach the current limit (i.e. next jump target). 
3662             if (!m_currentBlock
) { 
3663                 // Check if we can use the last block. 
3664                 if (!m_graph
.m_blocks
.isEmpty() && m_graph
.m_blocks
.last()->isEmpty()) { 
3665                     // This must be a block belonging to us. 
3666                     ASSERT(m_inlineStackTop
->m_unlinkedBlocks
.last().m_blockIndex 
== m_graph
.m_blocks
.size() - 1); 
3667                     // Either the block is linkable or it isn't. If it's linkable then it's the last 
3668                     // block in the blockLinkingTargets list. If it's not then the last block will 
3669                     // have a lower bytecode index that the one we're about to give to this block. 
3670                     if (m_inlineStackTop
->m_blockLinkingTargets
.isEmpty() || m_graph
.m_blocks
[m_inlineStackTop
->m_blockLinkingTargets
.last()]->bytecodeBegin 
!= m_currentIndex
) { 
3671                         // Make the block linkable. 
3672                         ASSERT(m_inlineStackTop
->m_blockLinkingTargets
.isEmpty() || m_graph
.m_blocks
[m_inlineStackTop
->m_blockLinkingTargets
.last()]->bytecodeBegin 
< m_currentIndex
); 
3673                         m_inlineStackTop
->m_blockLinkingTargets
.append(m_graph
.m_blocks
.size() - 1); 
3675                     // Change its bytecode begin and continue. 
3676                     m_currentBlock 
= m_graph
.m_blocks
.last().get(); 
3677 #if DFG_ENABLE(DEBUG_VERBOSE) 
3678                     dataLogF("Reascribing bytecode index of block %p from bc#%u to bc#%u (peephole case).\n", m_currentBlock
, m_currentBlock
->bytecodeBegin
, m_currentIndex
); 
3680                     m_currentBlock
->bytecodeBegin 
= m_currentIndex
; 
3682                     OwnPtr
<BasicBlock
> block 
= adoptPtr(new BasicBlock(m_currentIndex
, m_numArguments
, m_numLocals
)); 
3683 #if DFG_ENABLE(DEBUG_VERBOSE) 
3684                     dataLogF("Creating basic block %p, #%zu for %p bc#%u at inline depth %u.\n", block
.get(), m_graph
.m_blocks
.size(), m_inlineStackTop
->executable(), m_currentIndex
, CodeOrigin::inlineDepthForCallFrame(inlineCallFrame())); 
3686                     m_currentBlock 
= block
.get(); 
3687                     // This assertion checks two things: 
3688                     // 1) If the bytecodeBegin is greater than currentIndex, then something has gone 
3689                     //    horribly wrong. So, we're probably generating incorrect code. 
3690                     // 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do 
3691                     //    a peephole coalescing of this block in the if statement above. So, we're 
3692                     //    generating suboptimal code and leaving more work for the CFG simplifier. 
3693                     ASSERT(m_inlineStackTop
->m_unlinkedBlocks
.isEmpty() || m_graph
.m_blocks
[m_inlineStackTop
->m_unlinkedBlocks
.last().m_blockIndex
]->bytecodeBegin 
< m_currentIndex
); 
3694                     m_inlineStackTop
->m_unlinkedBlocks
.append(UnlinkedBlock(m_graph
.m_blocks
.size())); 
3695                     m_inlineStackTop
->m_blockLinkingTargets
.append(m_graph
.m_blocks
.size()); 
3696                     // The first block is definitely an OSR target. 
3697                     if (!m_graph
.m_blocks
.size()) 
3698                         block
->isOSRTarget 
= true; 
3699                     m_graph
.m_blocks
.append(block
.release()); 
3700                     prepareToParseBlock(); 
3704             bool shouldContinueParsing 
= parseBlock(limit
); 
3706             // We should not have gone beyond the limit. 
3707             ASSERT(m_currentIndex 
<= limit
); 
3709             // We should have planted a terminal, or we just gave up because 
3710             // we realized that the jump target information is imprecise, or we 
3711             // are at the end of an inline function, or we realized that we 
3712             // should stop parsing because there was a return in the first 
3714             ASSERT(m_currentBlock
->isEmpty() || m_currentBlock
->last()->isTerminal() || (m_currentIndex 
== codeBlock
->instructions().size() && inlineCallFrame()) || !shouldContinueParsing
); 
3716             if (!shouldContinueParsing
) 
3720         } while (m_currentIndex 
< limit
); 
3723     // Should have reached the end of the instructions. 
3724     ASSERT(m_currentIndex 
== codeBlock
->instructions().size()); 
3727 bool ByteCodeParser::parse() 
3729     // Set during construction. 
3730     ASSERT(!m_currentIndex
); 
3732 #if DFG_ENABLE(ALL_VARIABLES_CAPTURED) 
3733     // We should be pretending that the code has an activation. 
3734     ASSERT(m_graph
.needsActivation()); 
3737     InlineStackEntry 
inlineStackEntry( 
3738         this, m_codeBlock
, m_profiledBlock
, NoBlock
, 0, InvalidVirtualRegister
, InvalidVirtualRegister
, 
3739         m_codeBlock
->numParameters(), CodeForCall
); 
3743     linkBlocks(inlineStackEntry
.m_unlinkedBlocks
, inlineStackEntry
.m_blockLinkingTargets
); 
3744     m_graph
.determineReachability(); 
3746     ASSERT(m_preservedVars
.size()); 
3747     size_t numberOfLocals 
= 0; 
3748     for (size_t i 
= m_preservedVars
.size(); i
--;) { 
3749         if (m_preservedVars
.quickGet(i
)) { 
3750             numberOfLocals 
= i 
+ 1; 
3755     for (BlockIndex blockIndex 
= 0; blockIndex 
< m_graph
.m_blocks
.size(); ++blockIndex
) { 
3756         BasicBlock
* block 
= m_graph
.m_blocks
[blockIndex
].get(); 
3758         if (!block
->isReachable
) { 
3759             m_graph
.m_blocks
[blockIndex
].clear(); 
3763         block
->variablesAtHead
.ensureLocals(numberOfLocals
); 
3764         block
->variablesAtTail
.ensureLocals(numberOfLocals
); 
3767     m_graph
.m_preservedVars 
= m_preservedVars
; 
3768     m_graph
.m_localVars 
= m_numLocals
; 
3769     m_graph
.m_parameterSlots 
= m_parameterSlots
; 
3774 bool parse(ExecState
*, Graph
& graph
) 
3776     SamplingRegion 
samplingRegion("DFG Parsing"); 
3777 #if DFG_DEBUG_LOCAL_DISBALE 
3779     UNUSED_PARAM(graph
); 
3782     return ByteCodeParser(graph
).parse(); 
3786 } } // namespace JSC::DFG