X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/14957cd040308e3eeec43d26bae5d76da13fcd85..ef99ff287df9046eb88937225e0554eabb00e33c:/dfg/DFGByteCodeParser.cpp diff --git a/dfg/DFGByteCodeParser.cpp b/dfg/DFGByteCodeParser.cpp index 7c333de..7067480 100644 --- a/dfg/DFGByteCodeParser.cpp +++ b/dfg/DFGByteCodeParser.cpp @@ -1,5 +1,5 @@ -/* - * Copyright (C) 2011 Apple Inc. All rights reserved. + /* + * Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,315 +28,594 @@ #if ENABLE(DFG_JIT) -#include "DFGAliasTracker.h" -#include "DFGScoreBoard.h" +#include "ArrayConstructor.h" +#include "CallLinkStatus.h" #include "CodeBlock.h" +#include "CodeBlockWithJITType.h" +#include "DFGArrayMode.h" +#include "DFGCapabilities.h" +#include "DFGJITCode.h" +#include "GetByIdStatus.h" +#include "Heap.h" +#include "JSActivation.h" +#include "JSCInlines.h" +#include "PreciseJumpTargets.h" +#include "PutByIdStatus.h" +#include "StackAlignment.h" +#include "StringConstructor.h" +#include +#include +#include +#include namespace JSC { namespace DFG { -#if ENABLE(DFG_JIT_RESTRICTIONS) -// FIXME: Temporarily disable arithmetic, until we fix associated performance regressions. -#define ARITHMETIC_OP() m_parseFailed = true -#else -#define ARITHMETIC_OP() ((void)0) -#endif +class ConstantBufferKey { +public: + ConstantBufferKey() + : m_codeBlock(0) + , m_index(0) + { + } + + ConstantBufferKey(WTF::HashTableDeletedValueType) + : m_codeBlock(0) + , m_index(1) + { + } + + ConstantBufferKey(CodeBlock* codeBlock, unsigned index) + : m_codeBlock(codeBlock) + , m_index(index) + { + } + + bool operator==(const ConstantBufferKey& other) const + { + return m_codeBlock == other.m_codeBlock + && m_index == other.m_index; + } + + unsigned hash() const + { + return WTF::PtrHash::hash(m_codeBlock) ^ m_index; + } + + bool isHashTableDeletedValue() const + { + return !m_codeBlock && m_index; + } + + CodeBlock* codeBlock() const { return m_codeBlock; } + unsigned index() const { return m_index; } + +private: + CodeBlock* m_codeBlock; + unsigned m_index; +}; + +struct ConstantBufferKeyHash { + static unsigned hash(const ConstantBufferKey& key) { return key.hash(); } + static bool equal(const ConstantBufferKey& a, const ConstantBufferKey& b) + { + return a == b; + } + + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} } // namespace JSC::DFG + +namespace WTF { + +template struct DefaultHash; +template<> struct DefaultHash { + typedef JSC::DFG::ConstantBufferKeyHash Hash; +}; + +template struct HashTraits; +template<> struct HashTraits : SimpleClassHashTraits { }; + +} // namespace WTF + +namespace JSC { namespace DFG { // === ByteCodeParser === // // This class is used to compile the dataflow graph from a CodeBlock. class ByteCodeParser { public: - ByteCodeParser(JSGlobalData* globalData, CodeBlock* codeBlock, Graph& graph) - : m_globalData(globalData) - , m_codeBlock(codeBlock) + ByteCodeParser(Graph& graph) + : m_vm(&graph.m_vm) + , m_codeBlock(graph.m_codeBlock) + , m_profiledBlock(graph.m_profiledBlock) , m_graph(graph) + , m_currentBlock(0) , m_currentIndex(0) - , m_parseFailed(false) , m_constantUndefined(UINT_MAX) , m_constantNull(UINT_MAX) + , m_constantNaN(UINT_MAX) , m_constant1(UINT_MAX) - , m_constants(codeBlock->numberOfConstantRegisters()) - , m_numArguments(codeBlock->m_numParameters) - , m_numLocals(codeBlock->m_numCalleeRegisters) - , m_preservedVars(codeBlock->m_numVars) + , m_constants(m_codeBlock->numberOfConstantRegisters()) + , m_numArguments(m_codeBlock->numParameters()) + , m_numLocals(m_codeBlock->m_numCalleeRegisters) + , m_parameterSlots(0) + , m_numPassedVarArgs(0) + , m_inlineStackTop(0) + , m_haveBuiltOperandMaps(false) + , m_emptyJSValueIndex(UINT_MAX) + , m_currentInstruction(0) { + ASSERT(m_profiledBlock); } - + // Parse a full CodeBlock of bytecode. bool parse(); - + private: + struct InlineStackEntry; + + // Just parse from m_currentIndex to the end of the current CodeBlock. + void parseCodeBlock(); + + void ensureLocals(unsigned newNumLocals) + { + if (newNumLocals <= m_numLocals) + return; + m_numLocals = newNumLocals; + for (size_t i = 0; i < m_graph.numBlocks(); ++i) + m_graph.block(i)->ensureLocals(newNumLocals); + } + + // Helper for min and max. + bool handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis); + + // Handle calls. This resolves issues surrounding inlining and intrinsics. + void handleCall(int result, NodeType op, CodeSpecializationKind, unsigned instructionSize, int callee, int argCount, int registerOffset); + void handleCall(Instruction* pc, NodeType op, CodeSpecializationKind); + void emitFunctionChecks(const CallLinkStatus&, Node* callTarget, int registerOffset, CodeSpecializationKind); + void emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind); + // Handle inlining. Return true if it succeeded, false if we need to plant a call. + bool handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus&, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind); + // Handle intrinsic functions. Return true if it succeeded, false if we need to plant a call. + bool handleIntrinsic(int resultOperand, Intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction); + bool handleTypedArrayConstructor(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, TypedArrayType); + bool handleConstantInternalFunction(int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind); + Node* handlePutByOffset(Node* base, unsigned identifier, PropertyOffset, Node* value); + Node* handleGetByOffset(SpeculatedType, Node* base, unsigned identifierNumber, PropertyOffset); + void handleGetByOffset( + int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, + PropertyOffset); + void handleGetById( + int destinationOperand, SpeculatedType, Node* base, unsigned identifierNumber, + const GetByIdStatus&); + void emitPutById( + Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, bool isDirect); + void handlePutById( + Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus&, + bool isDirect); + Node* emitPrototypeChecks(Structure*, IntendedStructureChain*); + + Node* getScope(bool skipTop, unsigned skipCount); + + // Prepare to parse a block. + void prepareToParseBlock(); // Parse a single basic block of bytecode instructions. bool parseBlock(unsigned limit); - // Setup predecessor links in the graph's BasicBlocks. - void setupPredecessors(); - // Link GetLocal & SetLocal nodes, to ensure live values are generated. - enum PhiStackType { - LocalPhiStack, - ArgumentPhiStack - }; - template - void processPhiStack(); - // Add spill locations to nodes. - void allocateVirtualRegisters(); - + // Link block successors. + void linkBlock(BasicBlock*, Vector& possibleTargets); + void linkBlocks(Vector& unlinkedBlocks, Vector& possibleTargets); + + VariableAccessData* newVariableAccessData(VirtualRegister operand, bool isCaptured) + { + ASSERT(!operand.isConstant()); + + m_graph.m_variableAccessData.append(VariableAccessData(operand, isCaptured)); + return &m_graph.m_variableAccessData.last(); + } + // Get/Set the operands/result of a bytecode instruction. - NodeIndex get(int operand) + Node* getDirect(VirtualRegister operand) { // Is this a constant? - if (operand >= FirstConstantRegisterIndex) { - unsigned constant = operand - FirstConstantRegisterIndex; + if (operand.isConstant()) { + unsigned constant = operand.toConstantIndex(); ASSERT(constant < m_constants.size()); return getJSConstant(constant); } // Is this an argument? - if (operandIsArgument(operand)) + if (operand.isArgument()) return getArgument(operand); // Must be a local. - return getLocal((unsigned)operand); + return getLocal(operand); } - void set(int operand, NodeIndex value, PredictedType prediction = PredictNone) - { - m_graph.predict(operand, prediction); - // Is this an argument? - if (operandIsArgument(operand)) { - setArgument(operand, value); - return; + Node* get(VirtualRegister operand) + { + if (inlineCallFrame()) { + if (!inlineCallFrame()->isClosureCall) { + JSFunction* callee = inlineCallFrame()->calleeConstant(); + if (operand.offset() == JSStack::Callee) + return cellConstant(callee); + if (operand.offset() == JSStack::ScopeChain) + return cellConstant(callee->scope()); + } + } else if (operand.offset() == JSStack::Callee) + return addToGraph(GetCallee); + else if (operand.offset() == JSStack::ScopeChain) + return addToGraph(GetMyScope); + + return getDirect(m_inlineStackTop->remapOperand(operand)); + } + + enum SetMode { + // A normal set which follows a two-phase commit that spans code origins. During + // the current code origin it issues a MovHint, and at the start of the next + // code origin there will be a SetLocal. If the local needs flushing, the second + // SetLocal will be preceded with a Flush. + NormalSet, + + // A set where the SetLocal happens immediately and there is still a Flush. This + // is relevant when assigning to a local in tricky situations for the delayed + // SetLocal logic but where we know that we have not performed any side effects + // within this code origin. This is a safe replacement for NormalSet anytime we + // know that we have not yet performed side effects in this code origin. + ImmediateSetWithFlush, + + // A set where the SetLocal happens immediately and we do not Flush it even if + // this is a local that is marked as needing it. This is relevant when + // initializing locals at the top of a function. + ImmediateNakedSet + }; + Node* setDirect(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) + { + addToGraph(MovHint, OpInfo(operand.offset()), value); + + DelayedSetLocal delayed = DelayedSetLocal(operand, value); + + if (setMode == NormalSet) { + m_setLocalQueue.append(delayed); + return 0; } + + return delayed.execute(this, setMode); + } - // Must be a local. - setLocal((unsigned)operand, value); + Node* set(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) + { + return setDirect(m_inlineStackTop->remapOperand(operand), value, setMode); + } + + Node* injectLazyOperandSpeculation(Node* node) + { + ASSERT(node->op() == GetLocal); + ASSERT(node->origin.semantic.bytecodeIndex == m_currentIndex); + ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); + LazyOperandValueProfileKey key(m_currentIndex, node->local()); + SpeculatedType prediction = m_inlineStackTop->m_lazyOperands.prediction(locker, key); + node->variableAccessData()->predict(prediction); + return node; } // Used in implementing get/set, above, where the operand is a local variable. - NodeIndex getLocal(unsigned operand) + Node* getLocal(VirtualRegister operand) { - NodeIndex nodeIndex = m_currentBlock->m_locals[operand].value; - - if (nodeIndex != NoNode) { - Node& node = m_graph[nodeIndex]; - if (node.op == GetLocal) - return nodeIndex; - ASSERT(node.op == SetLocal); - return node.child1; + unsigned local = operand.toLocal(); + + if (local < m_localWatchpoints.size()) { + if (VariableWatchpointSet* set = m_localWatchpoints[local]) { + if (JSValue value = set->inferredValue()) { + addToGraph(FunctionReentryWatchpoint, OpInfo(m_codeBlock->symbolTable())); + addToGraph(VariableWatchpoint, OpInfo(set)); + // Note: this is very special from an OSR exit standpoint. We wouldn't be + // able to do this for most locals, but it works here because we're dealing + // with a flushed local. For most locals we would need to issue a GetLocal + // here and ensure that we have uses in DFG IR wherever there would have + // been uses in bytecode. Clearly this optimization does not do this. But + // that's fine, because we don't need to track liveness for captured + // locals, and this optimization only kicks in for captured locals. + return inferredConstant(value); + } + } } - // Check for reads of temporaries from prior blocks, - // expand m_preservedVars to cover these. - m_preservedVars = std::max(m_preservedVars, operand + 1); - - NodeIndex phi = addToGraph(Phi); - m_localPhiStack.append(PhiStackEntry(m_currentBlock, phi, operand)); - nodeIndex = addToGraph(GetLocal, OpInfo(operand), phi); - m_currentBlock->m_locals[operand].value = nodeIndex; - return nodeIndex; + Node* node = m_currentBlock->variablesAtTail.local(local); + bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame()); + + // This has two goals: 1) link together variable access datas, and 2) + // try to avoid creating redundant GetLocals. (1) is required for + // correctness - no other phase will ensure that block-local variable + // access data unification is done correctly. (2) is purely opportunistic + // and is meant as an compile-time optimization only. + + VariableAccessData* variable; + + if (node) { + variable = node->variableAccessData(); + variable->mergeIsCaptured(isCaptured); + + if (!isCaptured) { + switch (node->op()) { + case GetLocal: + return node; + case SetLocal: + return node->child1().node(); + default: + break; + } + } + } else + variable = newVariableAccessData(operand, isCaptured); + + node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable))); + m_currentBlock->variablesAtTail.local(local) = node; + return node; } - void setLocal(unsigned operand, NodeIndex value) + + Node* setLocal(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) { - m_currentBlock->m_locals[operand].value = addToGraph(SetLocal, OpInfo(operand), value); + unsigned local = operand.toLocal(); + bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame()); + + if (setMode != ImmediateNakedSet) { + ArgumentPosition* argumentPosition = findArgumentPositionForLocal(operand); + if (isCaptured || argumentPosition) + flushDirect(operand, argumentPosition); + } + + VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured); + variableAccessData->mergeStructureCheckHoistingFailed( + m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) + || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)); + variableAccessData->mergeCheckArrayHoistingFailed( + m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)); + Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value); + m_currentBlock->variablesAtTail.local(local) = node; + return node; } // Used in implementing get/set, above, where the operand is an argument. - NodeIndex getArgument(unsigned operand) + Node* getArgument(VirtualRegister operand) { - unsigned argument = operand + m_codeBlock->m_numParameters + RegisterFile::CallFrameHeaderSize; + unsigned argument = operand.toArgument(); ASSERT(argument < m_numArguments); + + Node* node = m_currentBlock->variablesAtTail.argument(argument); + bool isCaptured = m_codeBlock->isCaptured(operand); - NodeIndex nodeIndex = m_currentBlock->m_arguments[argument].value; - - if (nodeIndex != NoNode) { - Node& node = m_graph[nodeIndex]; - if (node.op == GetLocal) - return nodeIndex; - ASSERT(node.op == SetLocal); - return node.child1; - } - - NodeIndex phi = addToGraph(Phi); - m_argumentPhiStack.append(PhiStackEntry(m_currentBlock, phi, argument)); - nodeIndex = addToGraph(GetLocal, OpInfo(operand), phi); - m_currentBlock->m_arguments[argument].value = nodeIndex; - return nodeIndex; + VariableAccessData* variable; + + if (node) { + variable = node->variableAccessData(); + variable->mergeIsCaptured(isCaptured); + + switch (node->op()) { + case GetLocal: + return node; + case SetLocal: + return node->child1().node(); + default: + break; + } + } else + variable = newVariableAccessData(operand, isCaptured); + + node = injectLazyOperandSpeculation(addToGraph(GetLocal, OpInfo(variable))); + m_currentBlock->variablesAtTail.argument(argument) = node; + return node; } - void setArgument(int operand, NodeIndex value) + Node* setArgument(VirtualRegister operand, Node* value, SetMode setMode = NormalSet) { - unsigned argument = operand + m_codeBlock->m_numParameters + RegisterFile::CallFrameHeaderSize; + unsigned argument = operand.toArgument(); ASSERT(argument < m_numArguments); + + bool isCaptured = m_codeBlock->isCaptured(operand); - m_currentBlock->m_arguments[argument].value = addToGraph(SetLocal, OpInfo(operand), value); - } + VariableAccessData* variableAccessData = newVariableAccessData(operand, isCaptured); - // Get an operand, and perform a ToInt32/ToNumber conversion on it. - NodeIndex getToInt32(int operand) + // Always flush arguments, except for 'this'. If 'this' is created by us, + // then make sure that it's never unboxed. + if (argument) { + if (setMode != ImmediateNakedSet) + flushDirect(operand); + } else if (m_codeBlock->specializationKind() == CodeForConstruct) + variableAccessData->mergeShouldNeverUnbox(true); + + variableAccessData->mergeStructureCheckHoistingFailed( + m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) + || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)); + variableAccessData->mergeCheckArrayHoistingFailed( + m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)); + Node* node = addToGraph(SetLocal, OpInfo(variableAccessData), value); + m_currentBlock->variablesAtTail.argument(argument) = node; + return node; + } + + ArgumentPosition* findArgumentPositionForArgument(int argument) { - // Avoid wastefully adding a JSConstant node to the graph, only to - // replace it with a Int32Constant (which is what would happen if - // we called 'toInt32(get(operand))' in this case). - if (operand >= FirstConstantRegisterIndex) { - JSValue v = m_codeBlock->getConstant(operand); - if (v.isInt32()) - return getInt32Constant(v.asInt32(), operand - FirstConstantRegisterIndex); - } - return toInt32(get(operand)); + InlineStackEntry* stack = m_inlineStackTop; + while (stack->m_inlineCallFrame) + stack = stack->m_caller; + return stack->m_argumentPositions[argument]; } - NodeIndex getToNumber(int operand) + + ArgumentPosition* findArgumentPositionForLocal(VirtualRegister operand) { - // Avoid wastefully adding a JSConstant node to the graph, only to - // replace it with a DoubleConstant (which is what would happen if - // we called 'toNumber(get(operand))' in this case). - if (operand >= FirstConstantRegisterIndex) { - JSValue v = m_codeBlock->getConstant(operand); - if (v.isNumber()) - return getDoubleConstant(v.uncheckedGetNumber(), operand - FirstConstantRegisterIndex); + for (InlineStackEntry* stack = m_inlineStackTop; ; stack = stack->m_caller) { + InlineCallFrame* inlineCallFrame = stack->m_inlineCallFrame; + if (!inlineCallFrame) + break; + if (operand.offset() < static_cast(inlineCallFrame->stackOffset + JSStack::CallFrameHeaderSize)) + continue; + if (operand.offset() == inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset()) + continue; + if (operand.offset() >= static_cast(inlineCallFrame->stackOffset + CallFrame::thisArgumentOffset() + inlineCallFrame->arguments.size())) + continue; + int argument = VirtualRegister(operand.offset() - inlineCallFrame->stackOffset).toArgument(); + return stack->m_argumentPositions[argument]; } - return toNumber(get(operand)); + return 0; } - - // Perform an ES5 ToInt32 operation - returns a node of type NodeResultInt32. - NodeIndex toInt32(NodeIndex index) + + ArgumentPosition* findArgumentPosition(VirtualRegister operand) { - Node& node = m_graph[index]; - - if (node.hasInt32Result()) - return index; - - if (node.hasDoubleResult()) { - if (node.op == DoubleConstant) - return getInt32Constant(JSC::toInt32(valueOfDoubleConstant(index)), node.constantNumber()); - // 'NumberToInt32(Int32ToNumber(X))' == X, and 'NumberToInt32(UInt32ToNumber(X)) == X' - if (node.op == Int32ToNumber || node.op == UInt32ToNumber) - return node.child1; - - // We unique NumberToInt32 nodes in a map to prevent duplicate conversions. - pair result = m_numberToInt32Nodes.add(index, NoNode); - // Either we added a new value, or the existing value in the map is non-zero. - ASSERT(result.second == (result.first->second == NoNode)); - if (result.second) - result.first->second = addToGraph(NumberToInt32, index); - return result.first->second; - } - - // Check for numeric constants boxed as JSValues. - if (node.op == JSConstant) { - JSValue v = valueOfJSConstant(index); - if (v.isInt32()) - return getInt32Constant(v.asInt32(), node.constantNumber()); - if (v.isNumber()) - return getInt32Constant(JSC::toInt32(v.uncheckedGetNumber()), node.constantNumber()); - } - - return addToGraph(ValueToInt32, index); + if (operand.isArgument()) + return findArgumentPositionForArgument(operand.toArgument()); + return findArgumentPositionForLocal(operand); } - // Perform an ES5 ToNumber operation - returns a node of type NodeResultDouble. - NodeIndex toNumber(NodeIndex index) + void addConstant(JSValue value) { - Node& node = m_graph[index]; - - if (node.hasDoubleResult()) - return index; - - if (node.hasInt32Result()) { - if (node.op == Int32Constant) - return getDoubleConstant(valueOfInt32Constant(index), node.constantNumber()); - - // We unique Int32ToNumber nodes in a map to prevent duplicate conversions. - pair result = m_int32ToNumberNodes.add(index, NoNode); - // Either we added a new value, or the existing value in the map is non-zero. - ASSERT(result.second == (result.first->second == NoNode)); - if (result.second) - result.first->second = addToGraph(Int32ToNumber, index); - return result.first->second; - } - - if (node.op == JSConstant) { - JSValue v = valueOfJSConstant(index); - if (v.isNumber()) - return getDoubleConstant(v.uncheckedGetNumber(), node.constantNumber()); + unsigned constantIndex = m_codeBlock->addConstantLazily(); + initializeLazyWriteBarrierForConstant( + m_graph.m_plan.writeBarriers, + m_codeBlock->constants()[constantIndex], + m_codeBlock, + constantIndex, + m_codeBlock->ownerExecutable(), + value); + } + + void flush(VirtualRegister operand) + { + flushDirect(m_inlineStackTop->remapOperand(operand)); + } + + void flushDirect(VirtualRegister operand) + { + flushDirect(operand, findArgumentPosition(operand)); + } + + void flushDirect(VirtualRegister operand, ArgumentPosition* argumentPosition) + { + bool isCaptured = m_codeBlock->isCaptured(operand, inlineCallFrame()); + + ASSERT(!operand.isConstant()); + + Node* node = m_currentBlock->variablesAtTail.operand(operand); + + VariableAccessData* variable; + + if (node) { + variable = node->variableAccessData(); + variable->mergeIsCaptured(isCaptured); + } else + variable = newVariableAccessData(operand, isCaptured); + + node = addToGraph(Flush, OpInfo(variable)); + m_currentBlock->variablesAtTail.operand(operand) = node; + if (argumentPosition) + argumentPosition->addVariable(variable); + } + + void flush(InlineStackEntry* inlineStackEntry) + { + int numArguments; + if (InlineCallFrame* inlineCallFrame = inlineStackEntry->m_inlineCallFrame) { + numArguments = inlineCallFrame->arguments.size(); + if (inlineCallFrame->isClosureCall) { + flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::Callee))); + flushDirect(inlineStackEntry->remapOperand(VirtualRegister(JSStack::ScopeChain))); + } + } else + numArguments = inlineStackEntry->m_codeBlock->numParameters(); + for (unsigned argument = numArguments; argument-- > 1;) + flushDirect(inlineStackEntry->remapOperand(virtualRegisterForArgument(argument))); + for (int local = 0; local < inlineStackEntry->m_codeBlock->m_numVars; ++local) { + if (!inlineStackEntry->m_codeBlock->isCaptured(virtualRegisterForLocal(local))) + continue; + flushDirect(inlineStackEntry->remapOperand(virtualRegisterForLocal(local))); } - - return addToGraph(ValueToNumber, index); } + void flushForTerminal() + { + for (InlineStackEntry* inlineStackEntry = m_inlineStackTop; inlineStackEntry; inlineStackEntry = inlineStackEntry->m_caller) + flush(inlineStackEntry); + } - // Used in implementing get, above, where the operand is a constant. - NodeIndex getInt32Constant(int32_t value, unsigned constant) + void flushForReturn() { - NodeIndex index = m_constants[constant].asInt32; - if (index != NoNode) - return index; - NodeIndex resultIndex = addToGraph(Int32Constant, OpInfo(constant)); - m_graph[resultIndex].setInt32Constant(value); - m_constants[constant].asInt32 = resultIndex; - return resultIndex; + flush(m_inlineStackTop); } - NodeIndex getDoubleConstant(double value, unsigned constant) + + void flushIfTerminal(SwitchData& data) + { + if (data.fallThrough.bytecodeIndex() > m_currentIndex) + return; + + for (unsigned i = data.cases.size(); i--;) { + if (data.cases[i].target.bytecodeIndex() > m_currentIndex) + return; + } + + flushForTerminal(); + } + + // NOTE: Only use this to construct constants that arise from non-speculative + // constant folding. I.e. creating constants using this if we had constant + // field inference would be a bad idea, since the bytecode parser's folding + // doesn't handle liveness preservation. + Node* getJSConstantForValue(JSValue constantValue) { - NodeIndex index = m_constants[constant].asNumeric; - if (index != NoNode) - return index; - NodeIndex resultIndex = addToGraph(DoubleConstant, OpInfo(constant)); - m_graph[resultIndex].setDoubleConstant(value); - m_constants[constant].asNumeric = resultIndex; - return resultIndex; + unsigned constantIndex; + if (!m_codeBlock->findConstant(constantValue, constantIndex)) { + addConstant(constantValue); + m_constants.append(ConstantRecord()); + } + + ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); + + return getJSConstant(constantIndex); } - NodeIndex getJSConstant(unsigned constant) + + Node* getJSConstant(unsigned constant) { - NodeIndex index = m_constants[constant].asJSValue; - if (index != NoNode) - return index; + Node* node = m_constants[constant].asJSValue; + if (node) + return node; - NodeIndex resultIndex = addToGraph(JSConstant, OpInfo(constant)); - m_constants[constant].asJSValue = resultIndex; - return resultIndex; + Node* result = addToGraph(JSConstant, OpInfo(constant)); + m_constants[constant].asJSValue = result; + return result; } // Helper functions to get/set the this value. - NodeIndex getThis() + Node* getThis() { - return getArgument(m_codeBlock->thisRegister()); + return get(m_inlineStackTop->m_codeBlock->thisRegister()); } - void setThis(NodeIndex value) + + void setThis(Node* value) { - setArgument(m_codeBlock->thisRegister(), value); + set(m_inlineStackTop->m_codeBlock->thisRegister(), value); } // Convenience methods for checking nodes for constants. - bool isInt32Constant(NodeIndex index) - { - return m_graph[index].op == Int32Constant; - } - bool isDoubleConstant(NodeIndex index) + bool isJSConstant(Node* node) { - return m_graph[index].op == DoubleConstant; + return node->op() == JSConstant; } - bool isJSConstant(NodeIndex index) + bool isInt32Constant(Node* node) { - return m_graph[index].op == JSConstant; + return isJSConstant(node) && valueOfJSConstant(node).isInt32(); } - // Convenience methods for getting constant values. - int32_t valueOfInt32Constant(NodeIndex index) - { - ASSERT(isInt32Constant(index)); - return m_graph[index].int32Constant(); - } - double valueOfDoubleConstant(NodeIndex index) + JSValue valueOfJSConstant(Node* node) { - ASSERT(isDoubleConstant(index)); - return m_graph[index].numericConstant(); + ASSERT(isJSConstant(node)); + return m_codeBlock->getConstant(FirstConstantRegisterIndex + node->constantNumber()); } - JSValue valueOfJSConstant(NodeIndex index) + int32_t valueOfInt32Constant(Node* node) { - ASSERT(isJSConstant(index)); - return m_codeBlock->getConstant(FirstConstantRegisterIndex + m_graph[index].constantNumber()); + ASSERT(isInt32Constant(node)); + return valueOfJSConstant(node).asInt32(); } - + // This method returns a JSConstant with the value 'undefined'. - NodeIndex constantUndefined() + Node* constantUndefined() { // Has m_constantUndefined been set up yet? if (m_constantUndefined == UINT_MAX) { @@ -350,7 +629,7 @@ private: // Add undefined to the CodeBlock's constants, and add a corresponding slot in m_constants. ASSERT(m_constants.size() == numberOfConstants); - m_codeBlock->addConstant(jsUndefined()); + addConstant(jsUndefined()); m_constants.append(ConstantRecord()); ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); } @@ -361,7 +640,7 @@ private: } // This method returns a JSConstant with the value 'null'. - NodeIndex constantNull() + Node* constantNull() { // Has m_constantNull been set up yet? if (m_constantNull == UINT_MAX) { @@ -375,7 +654,7 @@ private: // Add null to the CodeBlock's constants, and add a corresponding slot in m_constants. ASSERT(m_constants.size() == numberOfConstants); - m_codeBlock->addConstant(jsNull()); + addConstant(jsNull()); m_constants.append(ConstantRecord()); ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); } @@ -386,7 +665,7 @@ private: } // This method returns a DoubleConstant with the value 1. - NodeIndex one() + Node* one() { // Has m_constant1 been set up yet? if (m_constant1 == UINT_MAX) { @@ -395,12 +674,12 @@ private: for (m_constant1 = 0; m_constant1 < numberOfConstants; ++m_constant1) { JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1); if (testMe.isInt32() && testMe.asInt32() == 1) - return getDoubleConstant(1, m_constant1); + return getJSConstant(m_constant1); } // Add the value 1 to the CodeBlock's constants, and add a corresponding slot in m_constants. ASSERT(m_constants.size() == numberOfConstants); - m_codeBlock->addConstant(jsNumber(1)); + addConstant(jsNumber(1)); m_constants.append(ConstantRecord()); ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); } @@ -408,789 +687,3042 @@ private: // m_constant1 must refer to an entry in the CodeBlock's constant pool that has the integer value 1. ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).isInt32()); ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constant1).asInt32() == 1); - return getDoubleConstant(1, m_constant1); + return getJSConstant(m_constant1); } + + // This method returns a DoubleConstant with the value NaN. + Node* constantNaN() + { + JSValue nan = jsNaN(); + + // Has m_constantNaN been set up yet? + if (m_constantNaN == UINT_MAX) { + // Search the constant pool for the value NaN, if we find it, we can just reuse this! + unsigned numberOfConstants = m_codeBlock->numberOfConstantRegisters(); + for (m_constantNaN = 0; m_constantNaN < numberOfConstants; ++m_constantNaN) { + JSValue testMe = m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN); + if (JSValue::encode(testMe) == JSValue::encode(nan)) + return getJSConstant(m_constantNaN); + } + // Add the value nan to the CodeBlock's constants, and add a corresponding slot in m_constants. + ASSERT(m_constants.size() == numberOfConstants); + addConstant(nan); + m_constants.append(ConstantRecord()); + ASSERT(m_constants.size() == m_codeBlock->numberOfConstantRegisters()); + } - // These methods create a node and add it to the graph. If nodes of this type are - // 'mustGenerate' then the node will implicitly be ref'ed to ensure generation. - NodeIndex addToGraph(NodeType op, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) + // m_constantNaN must refer to an entry in the CodeBlock's constant pool that has the value nan. + ASSERT(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).isDouble()); + ASSERT(std::isnan(m_codeBlock->getConstant(FirstConstantRegisterIndex + m_constantNaN).asDouble())); + return getJSConstant(m_constantNaN); + } + + Node* cellConstant(JSCell* cell) + { + HashMap::AddResult result = m_cellConstantNodes.add(cell, nullptr); + if (result.isNewEntry) { + ASSERT(!Heap::isZombified(cell)); + result.iterator->value = addToGraph(WeakJSConstant, OpInfo(cell)); + } + + return result.iterator->value; + } + + Node* inferredConstant(JSValue value) + { + if (value.isCell()) + return cellConstant(value.asCell()); + return getJSConstantForValue(value); + } + + InlineCallFrame* inlineCallFrame() { - NodeIndex resultIndex = (NodeIndex)m_graph.size(); - m_graph.append(Node(op, m_currentIndex, child1, child2, child3)); + return m_inlineStackTop->m_inlineCallFrame; + } - if (op & NodeMustGenerate) - m_graph.ref(resultIndex); - return resultIndex; + CodeOrigin currentCodeOrigin() + { + return CodeOrigin(m_currentIndex, inlineCallFrame()); + } + + BranchData* branchData(unsigned taken, unsigned notTaken) + { + // We assume that branches originating from bytecode always have a fall-through. We + // use this assumption to avoid checking for the creation of terminal blocks. + ASSERT((taken > m_currentIndex) || (notTaken > m_currentIndex)); + BranchData* data = m_graph.m_branchData.add(); + *data = BranchData::withBytecodeIndices(taken, notTaken); + return data; + } + + Node* addToGraph(NodeType op, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) + { + Node* result = m_graph.addNode( + SpecNone, op, NodeOrigin(currentCodeOrigin()), Edge(child1), Edge(child2), + Edge(child3)); + ASSERT(op != Phi); + m_currentBlock->append(result); + return result; + } + Node* addToGraph(NodeType op, Edge child1, Edge child2 = Edge(), Edge child3 = Edge()) + { + Node* result = m_graph.addNode( + SpecNone, op, NodeOrigin(currentCodeOrigin()), child1, child2, child3); + ASSERT(op != Phi); + m_currentBlock->append(result); + return result; + } + Node* addToGraph(NodeType op, OpInfo info, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) + { + Node* result = m_graph.addNode( + SpecNone, op, NodeOrigin(currentCodeOrigin()), info, Edge(child1), Edge(child2), + Edge(child3)); + ASSERT(op != Phi); + m_currentBlock->append(result); + return result; + } + Node* addToGraph(NodeType op, OpInfo info1, OpInfo info2, Node* child1 = 0, Node* child2 = 0, Node* child3 = 0) + { + Node* result = m_graph.addNode( + SpecNone, op, NodeOrigin(currentCodeOrigin()), info1, info2, + Edge(child1), Edge(child2), Edge(child3)); + ASSERT(op != Phi); + m_currentBlock->append(result); + return result; + } + + Node* addToGraph(Node::VarArgTag, NodeType op, OpInfo info1, OpInfo info2) + { + Node* result = m_graph.addNode( + SpecNone, Node::VarArg, op, NodeOrigin(currentCodeOrigin()), info1, info2, + m_graph.m_varArgChildren.size() - m_numPassedVarArgs, m_numPassedVarArgs); + ASSERT(op != Phi); + m_currentBlock->append(result); + + m_numPassedVarArgs = 0; + + return result; + } + + void addVarArgChild(Node* child) + { + m_graph.m_varArgChildren.append(Edge(child)); + m_numPassedVarArgs++; + } + + Node* addCall(int result, NodeType op, int callee, int argCount, int registerOffset) + { + SpeculatedType prediction = getPrediction(); + + addVarArgChild(get(VirtualRegister(callee))); + size_t parameterSlots = JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + argCount; + if (parameterSlots > m_parameterSlots) + m_parameterSlots = parameterSlots; + + int dummyThisArgument = op == Call ? 0 : 1; + for (int i = 0 + dummyThisArgument; i < argCount; ++i) + addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); + + Node* call = addToGraph(Node::VarArg, op, OpInfo(0), OpInfo(prediction)); + set(VirtualRegister(result), call); + return call; + } + + Node* cellConstantWithStructureCheck(JSCell* object, Structure* structure) + { + Node* objectNode = cellConstant(object); + addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(structure)), objectNode); + return objectNode; + } + + Node* cellConstantWithStructureCheck(JSCell* object) + { + return cellConstantWithStructureCheck(object, object->structure()); + } + + SpeculatedType getPredictionWithoutOSRExit(unsigned bytecodeIndex) + { + ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); + return m_inlineStackTop->m_profiledBlock->valueProfilePredictionForBytecodeOffset(locker, bytecodeIndex); + } + + SpeculatedType getPrediction(unsigned bytecodeIndex) + { + SpeculatedType prediction = getPredictionWithoutOSRExit(bytecodeIndex); + + if (prediction == SpecNone) { + // We have no information about what values this node generates. Give up + // on executing this code, since we're likely to do more damage than good. + addToGraph(ForceOSRExit); + } + + return prediction; + } + + SpeculatedType getPredictionWithoutOSRExit() + { + return getPredictionWithoutOSRExit(m_currentIndex); + } + + SpeculatedType getPrediction() + { + return getPrediction(m_currentIndex); + } + + ArrayMode getArrayMode(ArrayProfile* profile, Array::Action action) + { + ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); + profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock); + return ArrayMode::fromObserved(locker, profile, action, false); + } + + ArrayMode getArrayMode(ArrayProfile* profile) + { + return getArrayMode(profile, Array::Read); + } + + ArrayMode getArrayModeConsideringSlowPath(ArrayProfile* profile, Array::Action action) + { + ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); + + profile->computeUpdatedPrediction(locker, m_inlineStackTop->m_profiledBlock); + + bool makeSafe = + m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex) + || profile->outOfBounds(locker); + + ArrayMode result = ArrayMode::fromObserved(locker, profile, action, makeSafe); + + return result; + } + + Node* makeSafe(Node* node) + { + if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) + node->mergeFlags(NodeMayOverflowInDFG); + if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) + node->mergeFlags(NodeMayNegZeroInDFG); + + if (!isX86() && node->op() == ArithMod) + return node; + + if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex)) + return node; + + switch (node->op()) { + case UInt32ToNumber: + case ArithAdd: + case ArithSub: + case ValueAdd: + case ArithMod: // for ArithMod "MayOverflow" means we tried to divide by zero, or we saw double. + node->mergeFlags(NodeMayOverflowInBaseline); + break; + + case ArithNegate: + // Currently we can't tell the difference between a negation overflowing + // (i.e. -(1 << 31)) or generating negative zero (i.e. -0). If it took slow + // path then we assume that it did both of those things. + node->mergeFlags(NodeMayOverflowInBaseline); + node->mergeFlags(NodeMayNegZeroInBaseline); + break; + + case ArithMul: + // FIXME: We should detect cases where we only overflowed but never created + // negative zero. + // https://bugs.webkit.org/show_bug.cgi?id=132470 + if (m_inlineStackTop->m_profiledBlock->likelyToTakeDeepestSlowCase(m_currentIndex) + || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) + node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline); + else if (m_inlineStackTop->m_profiledBlock->likelyToTakeSlowCase(m_currentIndex) + || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) + node->mergeFlags(NodeMayNegZeroInBaseline); + break; + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + + return node; + } + + Node* makeDivSafe(Node* node) + { + ASSERT(node->op() == ArithDiv); + + if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) + node->mergeFlags(NodeMayOverflowInDFG); + if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) + node->mergeFlags(NodeMayNegZeroInDFG); + + // The main slow case counter for op_div in the old JIT counts only when + // the operands are not numbers. We don't care about that since we already + // have speculations in place that take care of that separately. We only + // care about when the outcome of the division is not an integer, which + // is what the special fast case counter tells us. + + if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex)) + return node; + + // FIXME: It might be possible to make this more granular. + node->mergeFlags(NodeMayOverflowInBaseline | NodeMayNegZeroInBaseline); + + return node; + } + + bool structureChainIsStillValid(bool direct, Structure* previousStructure, StructureChain* chain) + { + if (direct) + return true; + + if (!previousStructure->storedPrototype().isNull() && previousStructure->storedPrototype().asCell()->structure() != chain->head()->get()) + return false; + + for (WriteBarrier* it = chain->head(); *it; ++it) { + if (!(*it)->storedPrototype().isNull() && (*it)->storedPrototype().asCell()->structure() != it[1].get()) + return false; + } + + return true; + } + + void buildOperandMapsIfNecessary(); + + VM* m_vm; + CodeBlock* m_codeBlock; + CodeBlock* m_profiledBlock; + Graph& m_graph; + + // The current block being generated. + BasicBlock* m_currentBlock; + // The bytecode index of the current instruction being generated. + unsigned m_currentIndex; + + // We use these values during code generation, and to avoid the need for + // special handling we make sure they are available as constants in the + // CodeBlock's constant pool. These variables are initialized to + // UINT_MAX, and lazily updated to hold an index into the CodeBlock's + // constant pool, as necessary. + unsigned m_constantUndefined; + unsigned m_constantNull; + unsigned m_constantNaN; + unsigned m_constant1; + HashMap m_cellConstants; + HashMap m_cellConstantNodes; + + // A constant in the constant pool may be represented by more than one + // node in the graph, depending on the context in which it is being used. + struct ConstantRecord { + ConstantRecord() + : asInt32(0) + , asNumeric(0) + , asJSValue(0) + { + } + + Node* asInt32; + Node* asNumeric; + Node* asJSValue; + }; + + // Track the index of the node whose result is the current value for every + // register value in the bytecode - argument, local, and temporary. + Vector m_constants; + + // The number of arguments passed to the function. + unsigned m_numArguments; + // The number of locals (vars + temporaries) used in the function. + unsigned m_numLocals; + // The number of slots (in units of sizeof(Register)) that we need to + // preallocate for arguments to outgoing calls from this frame. This + // number includes the CallFrame slots that we initialize for the callee + // (but not the callee-initialized CallerFrame and ReturnPC slots). + // This number is 0 if and only if this function is a leaf. + unsigned m_parameterSlots; + // The number of var args passed to the next var arg node. + unsigned m_numPassedVarArgs; + + HashMap m_constantBufferCache; + + Vector m_localWatchpoints; + + struct InlineStackEntry { + ByteCodeParser* m_byteCodeParser; + + CodeBlock* m_codeBlock; + CodeBlock* m_profiledBlock; + InlineCallFrame* m_inlineCallFrame; + + ScriptExecutable* executable() { return m_codeBlock->ownerExecutable(); } + + QueryableExitProfile m_exitProfile; + + // Remapping of identifier and constant numbers from the code block being + // inlined (inline callee) to the code block that we're inlining into + // (the machine code block, which is the transitive, though not necessarily + // direct, caller). + Vector m_identifierRemap; + Vector m_constantRemap; + Vector m_constantBufferRemap; + Vector m_switchRemap; + + // Blocks introduced by this code block, which need successor linking. + // May include up to one basic block that includes the continuation after + // the callsite in the caller. These must be appended in the order that they + // are created, but their bytecodeBegin values need not be in order as they + // are ignored. + Vector m_unlinkedBlocks; + + // Potential block linking targets. Must be sorted by bytecodeBegin, and + // cannot have two blocks that have the same bytecodeBegin. For this very + // reason, this is not equivalent to + Vector m_blockLinkingTargets; + + // If the callsite's basic block was split into two, then this will be + // the head of the callsite block. It needs its successors linked to the + // m_unlinkedBlocks, but not the other way around: there's no way for + // any blocks in m_unlinkedBlocks to jump back into this block. + BasicBlock* m_callsiteBlockHead; + + // Does the callsite block head need linking? This is typically true + // but will be false for the machine code block's inline stack entry + // (since that one is not inlined) and for cases where an inline callee + // did the linking for us. + bool m_callsiteBlockHeadNeedsLinking; + + VirtualRegister m_returnValue; + + // Speculations about variable types collected from the profiled code block, + // which are based on OSR exit profiles that past DFG compilatins of this + // code block had gathered. + LazyOperandValueProfileParser m_lazyOperands; + + CallLinkInfoMap m_callLinkInfos; + StubInfoMap m_stubInfos; + + // Did we see any returns? We need to handle the (uncommon but necessary) + // case where a procedure that does not return was inlined. + bool m_didReturn; + + // Did we have any early returns? + bool m_didEarlyReturn; + + // Pointers to the argument position trackers for this slice of code. + Vector m_argumentPositions; + + InlineStackEntry* m_caller; + + InlineStackEntry( + ByteCodeParser*, + CodeBlock*, + CodeBlock* profiledBlock, + BasicBlock* callsiteBlockHead, + JSFunction* callee, // Null if this is a closure call. + VirtualRegister returnValueVR, + VirtualRegister inlineCallFrameStart, + int argumentCountIncludingThis, + CodeSpecializationKind); + + ~InlineStackEntry() + { + m_byteCodeParser->m_inlineStackTop = m_caller; + } + + VirtualRegister remapOperand(VirtualRegister operand) const + { + if (!m_inlineCallFrame) + return operand; + + if (operand.isConstant()) { + VirtualRegister result = VirtualRegister(m_constantRemap[operand.toConstantIndex()]); + ASSERT(result.isConstant()); + return result; + } + + return VirtualRegister(operand.offset() + m_inlineCallFrame->stackOffset); + } + }; + + InlineStackEntry* m_inlineStackTop; + + struct DelayedSetLocal { + VirtualRegister m_operand; + Node* m_value; + + DelayedSetLocal() { } + DelayedSetLocal(VirtualRegister operand, Node* value) + : m_operand(operand) + , m_value(value) + { + } + + Node* execute(ByteCodeParser* parser, SetMode setMode = NormalSet) + { + if (m_operand.isArgument()) + return parser->setArgument(m_operand, m_value, setMode); + return parser->setLocal(m_operand, m_value, setMode); + } + }; + + Vector m_setLocalQueue; + + // Have we built operand maps? We initialize them lazily, and only when doing + // inlining. + bool m_haveBuiltOperandMaps; + // Mapping between identifier names and numbers. + BorrowedIdentifierMap m_identifierMap; + // Mapping between values and constant numbers. + JSValueMap m_jsValueMap; + // Index of the empty value, or UINT_MAX if there is no mapping. This is a horrible + // work-around for the fact that JSValueMap can't handle "empty" values. + unsigned m_emptyJSValueIndex; + + CodeBlock* m_dfgCodeBlock; + CallLinkStatus::ContextMap m_callContextMap; + StubInfoMap m_dfgStubInfos; + + Instruction* m_currentInstruction; +}; + +#define NEXT_OPCODE(name) \ + m_currentIndex += OPCODE_LENGTH(name); \ + continue + +#define LAST_OPCODE(name) \ + m_currentIndex += OPCODE_LENGTH(name); \ + return shouldContinueParsing + +void ByteCodeParser::handleCall(Instruction* pc, NodeType op, CodeSpecializationKind kind) +{ + ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct)); + handleCall( + pc[1].u.operand, op, kind, OPCODE_LENGTH(op_call), + pc[2].u.operand, pc[3].u.operand, -pc[4].u.operand); +} + +void ByteCodeParser::handleCall( + int result, NodeType op, CodeSpecializationKind kind, unsigned instructionSize, + int callee, int argumentCountIncludingThis, int registerOffset) +{ + ASSERT(registerOffset <= 0); + + Node* callTarget = get(VirtualRegister(callee)); + + CallLinkStatus callLinkStatus; + + if (m_graph.isConstant(callTarget)) { + callLinkStatus = CallLinkStatus( + m_graph.valueOfJSConstant(callTarget)).setIsProved(true); + } else { + callLinkStatus = CallLinkStatus::computeFor( + m_inlineStackTop->m_profiledBlock, currentCodeOrigin(), + m_inlineStackTop->m_callLinkInfos, m_callContextMap); + } + + if (!callLinkStatus.canOptimize()) { + // Oddly, this conflates calls that haven't executed with calls that behaved sufficiently polymorphically + // that we cannot optimize them. + + addCall(result, op, callee, argumentCountIncludingThis, registerOffset); + return; + } + + unsigned nextOffset = m_currentIndex + instructionSize; + SpeculatedType prediction = getPrediction(); + + if (InternalFunction* function = callLinkStatus.internalFunction()) { + if (handleConstantInternalFunction(result, function, registerOffset, argumentCountIncludingThis, prediction, kind)) { + // This phantoming has to be *after* the code for the intrinsic, to signify that + // the inputs must be kept alive whatever exits the intrinsic may do. + addToGraph(Phantom, callTarget); + emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind); + return; + } + + // Can only handle this using the generic call handler. + addCall(result, op, callee, argumentCountIncludingThis, registerOffset); + return; + } + + Intrinsic intrinsic = callLinkStatus.intrinsicFor(kind); + if (intrinsic != NoIntrinsic) { + emitFunctionChecks(callLinkStatus, callTarget, registerOffset, kind); + + if (handleIntrinsic(result, intrinsic, registerOffset, argumentCountIncludingThis, prediction)) { + // This phantoming has to be *after* the code for the intrinsic, to signify that + // the inputs must be kept alive whatever exits the intrinsic may do. + addToGraph(Phantom, callTarget); + emitArgumentPhantoms(registerOffset, argumentCountIncludingThis, kind); + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedCall(); + return; + } + } else if (handleInlining(callTarget, result, callLinkStatus, registerOffset, argumentCountIncludingThis, nextOffset, kind)) { + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedCall(); + return; + } + + addCall(result, op, callee, argumentCountIncludingThis, registerOffset); +} + +void ByteCodeParser::emitFunctionChecks(const CallLinkStatus& callLinkStatus, Node* callTarget, int registerOffset, CodeSpecializationKind kind) +{ + Node* thisArgument; + if (kind == CodeForCall) + thisArgument = get(virtualRegisterForArgument(0, registerOffset)); + else + thisArgument = 0; + + if (callLinkStatus.isProved()) { + addToGraph(Phantom, callTarget, thisArgument); + return; + } + + ASSERT(callLinkStatus.canOptimize()); + + if (JSFunction* function = callLinkStatus.function()) + addToGraph(CheckFunction, OpInfo(function), callTarget, thisArgument); + else { + ASSERT(callLinkStatus.structure()); + ASSERT(callLinkStatus.executable()); + + addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(callLinkStatus.structure())), callTarget); + addToGraph(CheckExecutable, OpInfo(callLinkStatus.executable()), callTarget, thisArgument); + } +} + +void ByteCodeParser::emitArgumentPhantoms(int registerOffset, int argumentCountIncludingThis, CodeSpecializationKind kind) +{ + for (int i = kind == CodeForCall ? 0 : 1; i < argumentCountIncludingThis; ++i) + addToGraph(Phantom, get(virtualRegisterForArgument(i, registerOffset))); +} + +bool ByteCodeParser::handleInlining(Node* callTargetNode, int resultOperand, const CallLinkStatus& callLinkStatus, int registerOffset, int argumentCountIncludingThis, unsigned nextOffset, CodeSpecializationKind kind) +{ + static const bool verbose = false; + + if (verbose) + dataLog("Considering inlining ", callLinkStatus, " into ", currentCodeOrigin(), "\n"); + + // First, the really simple checks: do we have an actual JS function? + if (!callLinkStatus.executable()) { + if (verbose) + dataLog(" Failing because there is no executable.\n"); + return false; + } + if (callLinkStatus.executable()->isHostFunction()) { + if (verbose) + dataLog(" Failing because it's a host function.\n"); + return false; + } + + FunctionExecutable* executable = jsCast(callLinkStatus.executable()); + + // Does the number of arguments we're passing match the arity of the target? We currently + // inline only if the number of arguments passed is greater than or equal to the number + // arguments expected. + if (static_cast(executable->parameterCount()) + 1 > argumentCountIncludingThis) { + if (verbose) + dataLog(" Failing because of arity mismatch.\n"); + return false; + } + + // Do we have a code block, and does the code block's size match the heuristics/requirements for + // being an inline candidate? We might not have a code block if code was thrown away or if we + // simply hadn't actually made this call yet. We could still theoretically attempt to inline it + // if we had a static proof of what was being called; this might happen for example if you call a + // global function, where watchpointing gives us static information. Overall, it's a rare case + // because we expect that any hot callees would have already been compiled. + CodeBlock* codeBlock = executable->baselineCodeBlockFor(kind); + if (!codeBlock) { + if (verbose) + dataLog(" Failing because no code block available.\n"); + return false; + } + CapabilityLevel capabilityLevel = inlineFunctionForCapabilityLevel( + codeBlock, kind, callLinkStatus.isClosureCall()); + if (!canInline(capabilityLevel)) { + if (verbose) + dataLog(" Failing because the function is not inlineable.\n"); + return false; + } + + // Check if the caller is already too large. We do this check here because that's just + // where we happen to also have the callee's code block, and we want that for the + // purpose of unsetting SABI. + if (!isSmallEnoughToInlineCodeInto(m_codeBlock)) { + codeBlock->m_shouldAlwaysBeInlined = false; + if (verbose) + dataLog(" Failing because the caller is too large.\n"); + return false; + } + + // FIXME: this should be better at predicting how much bloat we will introduce by inlining + // this function. + // https://bugs.webkit.org/show_bug.cgi?id=127627 + + // Have we exceeded inline stack depth, or are we trying to inline a recursive call to + // too many levels? If either of these are detected, then don't inline. We adjust our + // heuristics if we are dealing with a function that cannot otherwise be compiled. + + unsigned depth = 0; + unsigned recursion = 0; + + for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) { + ++depth; + if (depth >= Options::maximumInliningDepth()) { + if (verbose) + dataLog(" Failing because depth exceeded.\n"); + return false; + } + + if (entry->executable() == executable) { + ++recursion; + if (recursion >= Options::maximumInliningRecursion()) { + if (verbose) + dataLog(" Failing because recursion detected.\n"); + return false; + } + } + } + + if (verbose) + dataLog(" Committing to inlining.\n"); + + // Now we know without a doubt that we are committed to inlining. So begin the process + // by checking the callee (if necessary) and making sure that arguments and the callee + // are flushed. + emitFunctionChecks(callLinkStatus, callTargetNode, registerOffset, kind); + + // FIXME: Don't flush constants! + + int inlineCallFrameStart = m_inlineStackTop->remapOperand(VirtualRegister(registerOffset)).offset() + JSStack::CallFrameHeaderSize; + + ensureLocals( + VirtualRegister(inlineCallFrameStart).toLocal() + 1 + + JSStack::CallFrameHeaderSize + codeBlock->m_numCalleeRegisters); + + size_t argumentPositionStart = m_graph.m_argumentPositions.size(); + + InlineStackEntry inlineStackEntry( + this, codeBlock, codeBlock, m_graph.lastBlock(), callLinkStatus.function(), + m_inlineStackTop->remapOperand(VirtualRegister(resultOperand)), + (VirtualRegister)inlineCallFrameStart, argumentCountIncludingThis, kind); + + // This is where the actual inlining really happens. + unsigned oldIndex = m_currentIndex; + m_currentIndex = 0; + + InlineVariableData inlineVariableData; + inlineVariableData.inlineCallFrame = m_inlineStackTop->m_inlineCallFrame; + inlineVariableData.argumentPositionStart = argumentPositionStart; + inlineVariableData.calleeVariable = 0; + + RELEASE_ASSERT( + m_inlineStackTop->m_inlineCallFrame->isClosureCall + == callLinkStatus.isClosureCall()); + if (callLinkStatus.isClosureCall()) { + VariableAccessData* calleeVariable = + set(VirtualRegister(JSStack::Callee), callTargetNode, ImmediateNakedSet)->variableAccessData(); + VariableAccessData* scopeVariable = + set(VirtualRegister(JSStack::ScopeChain), addToGraph(GetScope, callTargetNode), ImmediateNakedSet)->variableAccessData(); + + calleeVariable->mergeShouldNeverUnbox(true); + scopeVariable->mergeShouldNeverUnbox(true); + + inlineVariableData.calleeVariable = calleeVariable; + } + + m_graph.m_inlineVariableData.append(inlineVariableData); + + parseCodeBlock(); + + m_currentIndex = oldIndex; + + // If the inlined code created some new basic blocks, then we have linking to do. + if (inlineStackEntry.m_callsiteBlockHead != m_graph.lastBlock()) { + + ASSERT(!inlineStackEntry.m_unlinkedBlocks.isEmpty()); + if (inlineStackEntry.m_callsiteBlockHeadNeedsLinking) + linkBlock(inlineStackEntry.m_callsiteBlockHead, inlineStackEntry.m_blockLinkingTargets); + else + ASSERT(inlineStackEntry.m_callsiteBlockHead->isLinked); + + // It's possible that the callsite block head is not owned by the caller. + if (!inlineStackEntry.m_caller->m_unlinkedBlocks.isEmpty()) { + // It's definitely owned by the caller, because the caller created new blocks. + // Assert that this all adds up. + ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_block == inlineStackEntry.m_callsiteBlockHead); + ASSERT(inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking); + inlineStackEntry.m_caller->m_unlinkedBlocks.last().m_needsNormalLinking = false; + } else { + // It's definitely not owned by the caller. Tell the caller that he does not + // need to link his callsite block head, because we did it for him. + ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking); + ASSERT(inlineStackEntry.m_caller->m_callsiteBlockHead == inlineStackEntry.m_callsiteBlockHead); + inlineStackEntry.m_caller->m_callsiteBlockHeadNeedsLinking = false; + } + + linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets); + } else + ASSERT(inlineStackEntry.m_unlinkedBlocks.isEmpty()); + + BasicBlock* lastBlock = m_graph.lastBlock(); + // If there was a return, but no early returns, then we're done. We allow parsing of + // the caller to continue in whatever basic block we're in right now. + if (!inlineStackEntry.m_didEarlyReturn && inlineStackEntry.m_didReturn) { + ASSERT(lastBlock->isEmpty() || !lastBlock->last()->isTerminal()); + + // If we created new blocks then the last block needs linking, but in the + // caller. It doesn't need to be linked to, but it needs outgoing links. + if (!inlineStackEntry.m_unlinkedBlocks.isEmpty()) { + // For debugging purposes, set the bytecodeBegin. Note that this doesn't matter + // for release builds because this block will never serve as a potential target + // in the linker's binary search. + lastBlock->bytecodeBegin = m_currentIndex; + m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(m_graph.lastBlock())); + } + + m_currentBlock = m_graph.lastBlock(); + return true; + } + + // If we get to this point then all blocks must end in some sort of terminals. + ASSERT(lastBlock->last()->isTerminal()); + + + // Need to create a new basic block for the continuation at the caller. + RefPtr block = adoptRef(new BasicBlock(nextOffset, m_numArguments, m_numLocals, PNaN)); + + // Link the early returns to the basic block we're about to create. + for (size_t i = 0; i < inlineStackEntry.m_unlinkedBlocks.size(); ++i) { + if (!inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking) + continue; + BasicBlock* blockToLink = inlineStackEntry.m_unlinkedBlocks[i].m_block; + ASSERT(!blockToLink->isLinked); + Node* node = blockToLink->last(); + ASSERT(node->op() == Jump); + ASSERT(!node->targetBlock()); + node->targetBlock() = block.get(); + inlineStackEntry.m_unlinkedBlocks[i].m_needsEarlyReturnLinking = false; +#if !ASSERT_DISABLED + blockToLink->isLinked = true; +#endif + } + + m_currentBlock = block.get(); + ASSERT(m_inlineStackTop->m_caller->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_caller->m_blockLinkingTargets.last()->bytecodeBegin < nextOffset); + m_inlineStackTop->m_caller->m_unlinkedBlocks.append(UnlinkedBlock(block.get())); + m_inlineStackTop->m_caller->m_blockLinkingTargets.append(block.get()); + m_graph.appendBlock(block); + prepareToParseBlock(); + + // At this point we return and continue to generate code for the caller, but + // in the new basic block. + return true; +} + +bool ByteCodeParser::handleMinMax(int resultOperand, NodeType op, int registerOffset, int argumentCountIncludingThis) +{ + if (argumentCountIncludingThis == 1) { // Math.min() + set(VirtualRegister(resultOperand), constantNaN()); + return true; + } + + if (argumentCountIncludingThis == 2) { // Math.min(x) + Node* result = get(VirtualRegister(virtualRegisterForArgument(1, registerOffset))); + addToGraph(Phantom, Edge(result, NumberUse)); + set(VirtualRegister(resultOperand), result); + return true; + } + + if (argumentCountIncludingThis == 3) { // Math.min(x, y) + set(VirtualRegister(resultOperand), addToGraph(op, get(virtualRegisterForArgument(1, registerOffset)), get(virtualRegisterForArgument(2, registerOffset)))); + return true; + } + + // Don't handle >=3 arguments for now. + return false; +} + +bool ByteCodeParser::handleIntrinsic(int resultOperand, Intrinsic intrinsic, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction) +{ + switch (intrinsic) { + case AbsIntrinsic: { + if (argumentCountIncludingThis == 1) { // Math.abs() + set(VirtualRegister(resultOperand), constantNaN()); + return true; + } + + if (!MacroAssembler::supportsFloatingPointAbs()) + return false; + + Node* node = addToGraph(ArithAbs, get(virtualRegisterForArgument(1, registerOffset))); + if (m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow)) + node->mergeFlags(NodeMayOverflowInDFG); + set(VirtualRegister(resultOperand), node); + return true; + } + + case MinIntrinsic: + return handleMinMax(resultOperand, ArithMin, registerOffset, argumentCountIncludingThis); + + case MaxIntrinsic: + return handleMinMax(resultOperand, ArithMax, registerOffset, argumentCountIncludingThis); + + case SqrtIntrinsic: + case CosIntrinsic: + case SinIntrinsic: { + if (argumentCountIncludingThis == 1) { + set(VirtualRegister(resultOperand), constantNaN()); + return true; + } + + switch (intrinsic) { + case SqrtIntrinsic: + if (!MacroAssembler::supportsFloatingPointSqrt()) + return false; + + set(VirtualRegister(resultOperand), addToGraph(ArithSqrt, get(virtualRegisterForArgument(1, registerOffset)))); + return true; + + case CosIntrinsic: + set(VirtualRegister(resultOperand), addToGraph(ArithCos, get(virtualRegisterForArgument(1, registerOffset)))); + return true; + + case SinIntrinsic: + set(VirtualRegister(resultOperand), addToGraph(ArithSin, get(virtualRegisterForArgument(1, registerOffset)))); + return true; + + default: + RELEASE_ASSERT_NOT_REACHED(); + return false; + } + } + + case ArrayPushIntrinsic: { + if (argumentCountIncludingThis != 2) + return false; + + ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile); + if (!arrayMode.isJSArray()) + return false; + switch (arrayMode.type()) { + case Array::Undecided: + case Array::Int32: + case Array::Double: + case Array::Contiguous: + case Array::ArrayStorage: { + Node* arrayPush = addToGraph(ArrayPush, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); + set(VirtualRegister(resultOperand), arrayPush); + + return true; + } + + default: + return false; + } + } + + case ArrayPopIntrinsic: { + if (argumentCountIncludingThis != 1) + return false; + + ArrayMode arrayMode = getArrayMode(m_currentInstruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile); + if (!arrayMode.isJSArray()) + return false; + switch (arrayMode.type()) { + case Array::Int32: + case Array::Double: + case Array::Contiguous: + case Array::ArrayStorage: { + Node* arrayPop = addToGraph(ArrayPop, OpInfo(arrayMode.asWord()), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset))); + set(VirtualRegister(resultOperand), arrayPop); + return true; + } + + default: + return false; + } + } + + case CharCodeAtIntrinsic: { + if (argumentCountIncludingThis != 2) + return false; + + VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset); + VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); + Node* charCode = addToGraph(StringCharCodeAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand)); + + set(VirtualRegister(resultOperand), charCode); + return true; + } + + case CharAtIntrinsic: { + if (argumentCountIncludingThis != 2) + return false; + + VirtualRegister thisOperand = virtualRegisterForArgument(0, registerOffset); + VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); + Node* charCode = addToGraph(StringCharAt, OpInfo(ArrayMode(Array::String).asWord()), get(thisOperand), get(indexOperand)); + + set(VirtualRegister(resultOperand), charCode); + return true; + } + case FromCharCodeIntrinsic: { + if (argumentCountIncludingThis != 2) + return false; + + VirtualRegister indexOperand = virtualRegisterForArgument(1, registerOffset); + Node* charCode = addToGraph(StringFromCharCode, get(indexOperand)); + + set(VirtualRegister(resultOperand), charCode); + + return true; + } + + case RegExpExecIntrinsic: { + if (argumentCountIncludingThis != 2) + return false; + + Node* regExpExec = addToGraph(RegExpExec, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); + set(VirtualRegister(resultOperand), regExpExec); + + return true; + } + + case RegExpTestIntrinsic: { + if (argumentCountIncludingThis != 2) + return false; + + Node* regExpExec = addToGraph(RegExpTest, OpInfo(0), OpInfo(prediction), get(virtualRegisterForArgument(0, registerOffset)), get(virtualRegisterForArgument(1, registerOffset))); + set(VirtualRegister(resultOperand), regExpExec); + + return true; + } + + case IMulIntrinsic: { + if (argumentCountIncludingThis != 3) + return false; + VirtualRegister leftOperand = virtualRegisterForArgument(1, registerOffset); + VirtualRegister rightOperand = virtualRegisterForArgument(2, registerOffset); + Node* left = get(leftOperand); + Node* right = get(rightOperand); + set(VirtualRegister(resultOperand), addToGraph(ArithIMul, left, right)); + return true; + } + + case FRoundIntrinsic: { + if (argumentCountIncludingThis != 2) + return false; + VirtualRegister operand = virtualRegisterForArgument(1, registerOffset); + set(VirtualRegister(resultOperand), addToGraph(ArithFRound, get(operand))); + return true; + } + + case DFGTrueIntrinsic: { + set(VirtualRegister(resultOperand), getJSConstantForValue(jsBoolean(true))); + return true; + } + + case OSRExitIntrinsic: { + addToGraph(ForceOSRExit); + set(VirtualRegister(resultOperand), constantUndefined()); + return true; + } + + case IsFinalTierIntrinsic: { + set(VirtualRegister(resultOperand), + getJSConstantForValue(jsBoolean(Options::useFTLJIT() ? isFTL(m_graph.m_plan.mode) : true))); + return true; + } + + case SetInt32HeapPredictionIntrinsic: { + for (int i = 1; i < argumentCountIncludingThis; ++i) { + Node* node = get(virtualRegisterForArgument(i, registerOffset)); + if (node->hasHeapPrediction()) + node->setHeapPrediction(SpecInt32); + } + set(VirtualRegister(resultOperand), constantUndefined()); + return true; + } + + case FiatInt52Intrinsic: { + if (argumentCountIncludingThis != 2) + return false; + VirtualRegister operand = virtualRegisterForArgument(1, registerOffset); + if (enableInt52()) + set(VirtualRegister(resultOperand), addToGraph(FiatInt52, get(operand))); + else + set(VirtualRegister(resultOperand), get(operand)); + return true; + } + + default: + return false; + } +} + +bool ByteCodeParser::handleTypedArrayConstructor( + int resultOperand, InternalFunction* function, int registerOffset, + int argumentCountIncludingThis, TypedArrayType type) +{ + if (!isTypedView(type)) + return false; + + if (function->classInfo() != constructorClassInfoForType(type)) + return false; + + if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject()) + return false; + + // We only have an intrinsic for the case where you say: + // + // new FooArray(blah); + // + // Of course, 'blah' could be any of the following: + // + // - Integer, indicating that you want to allocate an array of that length. + // This is the thing we're hoping for, and what we can actually do meaningful + // optimizations for. + // + // - Array buffer, indicating that you want to create a view onto that _entire_ + // buffer. + // + // - Non-buffer object, indicating that you want to create a copy of that + // object by pretending that it quacks like an array. + // + // - Anything else, indicating that you want to have an exception thrown at + // you. + // + // The intrinsic, NewTypedArray, will behave as if it could do any of these + // things up until we do Fixup. Thereafter, if child1 (i.e. 'blah') is + // predicted Int32, then we lock it in as a normal typed array allocation. + // Otherwise, NewTypedArray turns into a totally opaque function call that + // may clobber the world - by virtue of it accessing properties on what could + // be an object. + // + // Note that although the generic form of NewTypedArray sounds sort of awful, + // it is actually quite likely to be more efficient than a fully generic + // Construct. So, we might want to think about making NewTypedArray variadic, + // or else making Construct not super slow. + + if (argumentCountIncludingThis != 2) + return false; + + set(VirtualRegister(resultOperand), + addToGraph(NewTypedArray, OpInfo(type), get(virtualRegisterForArgument(1, registerOffset)))); + return true; +} + +bool ByteCodeParser::handleConstantInternalFunction( + int resultOperand, InternalFunction* function, int registerOffset, + int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind kind) +{ + // If we ever find that we have a lot of internal functions that we specialize for, + // then we should probably have some sort of hashtable dispatch, or maybe even + // dispatch straight through the MethodTable of the InternalFunction. But for now, + // it seems that this case is hit infrequently enough, and the number of functions + // we know about is small enough, that having just a linear cascade of if statements + // is good enough. + + UNUSED_PARAM(prediction); // Remove this once we do more things. + + if (function->classInfo() == ArrayConstructor::info()) { + if (function->globalObject() != m_inlineStackTop->m_codeBlock->globalObject()) + return false; + + if (argumentCountIncludingThis == 2) { + set(VirtualRegister(resultOperand), + addToGraph(NewArrayWithSize, OpInfo(ArrayWithUndecided), get(virtualRegisterForArgument(1, registerOffset)))); + return true; + } + + for (int i = 1; i < argumentCountIncludingThis; ++i) + addVarArgChild(get(virtualRegisterForArgument(i, registerOffset))); + set(VirtualRegister(resultOperand), + addToGraph(Node::VarArg, NewArray, OpInfo(ArrayWithUndecided), OpInfo(0))); + return true; + } + + if (function->classInfo() == StringConstructor::info()) { + Node* result; + + if (argumentCountIncludingThis <= 1) + result = cellConstant(m_vm->smallStrings.emptyString()); + else + result = addToGraph(ToString, get(virtualRegisterForArgument(1, registerOffset))); + + if (kind == CodeForConstruct) + result = addToGraph(NewStringObject, OpInfo(function->globalObject()->stringObjectStructure()), result); + + set(VirtualRegister(resultOperand), result); + return true; + } + + for (unsigned typeIndex = 0; typeIndex < NUMBER_OF_TYPED_ARRAY_TYPES; ++typeIndex) { + bool result = handleTypedArrayConstructor( + resultOperand, function, registerOffset, argumentCountIncludingThis, + indexToTypedArrayType(typeIndex)); + if (result) + return true; + } + + return false; +} + +Node* ByteCodeParser::handleGetByOffset(SpeculatedType prediction, Node* base, unsigned identifierNumber, PropertyOffset offset) +{ + Node* propertyStorage; + if (isInlineOffset(offset)) + propertyStorage = base; + else + propertyStorage = addToGraph(GetButterfly, base); + Node* getByOffset = addToGraph(GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage, base); + + StorageAccessData storageAccessData; + storageAccessData.offset = offset; + storageAccessData.identifierNumber = identifierNumber; + m_graph.m_storageAccessData.append(storageAccessData); + + return getByOffset; +} + +void ByteCodeParser::handleGetByOffset( + int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber, + PropertyOffset offset) +{ + set(VirtualRegister(destinationOperand), handleGetByOffset(prediction, base, identifierNumber, offset)); +} + +Node* ByteCodeParser::handlePutByOffset(Node* base, unsigned identifier, PropertyOffset offset, Node* value) +{ + Node* propertyStorage; + if (isInlineOffset(offset)) + propertyStorage = base; + else + propertyStorage = addToGraph(GetButterfly, base); + Node* result = addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value); + + StorageAccessData storageAccessData; + storageAccessData.offset = offset; + storageAccessData.identifierNumber = identifier; + m_graph.m_storageAccessData.append(storageAccessData); + + return result; +} + +Node* ByteCodeParser::emitPrototypeChecks( + Structure* structure, IntendedStructureChain* chain) +{ + Node* base = 0; + m_graph.chains().addLazily(chain); + Structure* currentStructure = structure; + JSObject* currentObject = 0; + for (unsigned i = 0; i < chain->size(); ++i) { + currentObject = asObject(currentStructure->prototypeForLookup(m_inlineStackTop->m_codeBlock)); + currentStructure = chain->at(i); + base = cellConstantWithStructureCheck(currentObject, currentStructure); + } + return base; +} + +void ByteCodeParser::handleGetById( + int destinationOperand, SpeculatedType prediction, Node* base, unsigned identifierNumber, + const GetByIdStatus& getByIdStatus) +{ + if (!getByIdStatus.isSimple() || !Options::enableAccessInlining()) { + set(VirtualRegister(destinationOperand), + addToGraph( + getByIdStatus.makesCalls() ? GetByIdFlush : GetById, + OpInfo(identifierNumber), OpInfo(prediction), base)); + return; + } + + if (getByIdStatus.numVariants() > 1) { + if (!isFTL(m_graph.m_plan.mode) || !Options::enablePolymorphicAccessInlining()) { + set(VirtualRegister(destinationOperand), + addToGraph(GetById, OpInfo(identifierNumber), OpInfo(prediction), base)); + return; + } + + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedGetById(); + + // 1) Emit prototype structure checks for all chains. This could sort of maybe not be + // optimal, if there is some rarely executed case in the chain that requires a lot + // of checks and those checks are not watchpointable. + for (unsigned variantIndex = getByIdStatus.numVariants(); variantIndex--;) { + if (getByIdStatus[variantIndex].chain()) { + emitPrototypeChecks( + getByIdStatus[variantIndex].structureSet().singletonStructure(), + getByIdStatus[variantIndex].chain()); + } + } + + // 2) Emit a MultiGetByOffset + MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add(); + data->variants = getByIdStatus.variants(); + data->identifierNumber = identifierNumber; + set(VirtualRegister(destinationOperand), + addToGraph(MultiGetByOffset, OpInfo(data), OpInfo(prediction), base)); + return; + } + + ASSERT(getByIdStatus.numVariants() == 1); + GetByIdVariant variant = getByIdStatus[0]; + + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedGetById(); + + Node* originalBaseForBaselineJIT = base; + + addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structureSet())), base); + + if (variant.chain()) { + base = emitPrototypeChecks( + variant.structureSet().singletonStructure(), variant.chain()); + } + + // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to + // ensure that the base of the original get_by_id is kept alive until we're done with + // all of the speculations. We only insert the Phantom if there had been a CheckStructure + // on something other than the base following the CheckStructure on base, or if the + // access was compiled to a WeakJSConstant specific value, in which case we might not + // have any explicit use of the base at all. + if (variant.specificValue() || originalBaseForBaselineJIT != base) + addToGraph(Phantom, originalBaseForBaselineJIT); + + if (variant.specificValue()) { + ASSERT(variant.specificValue().isCell()); + + set(VirtualRegister(destinationOperand), cellConstant(variant.specificValue().asCell())); + return; + } + + handleGetByOffset( + destinationOperand, prediction, base, identifierNumber, variant.offset()); +} + +void ByteCodeParser::emitPutById( + Node* base, unsigned identifierNumber, Node* value, const PutByIdStatus& putByIdStatus, bool isDirect) +{ + if (isDirect) + addToGraph(PutByIdDirect, OpInfo(identifierNumber), base, value); + else + addToGraph(putByIdStatus.makesCalls() ? PutByIdFlush : PutById, OpInfo(identifierNumber), base, value); +} + +void ByteCodeParser::handlePutById( + Node* base, unsigned identifierNumber, Node* value, + const PutByIdStatus& putByIdStatus, bool isDirect) +{ + if (!putByIdStatus.isSimple() || !Options::enableAccessInlining()) { + if (!putByIdStatus.isSet()) + addToGraph(ForceOSRExit); + emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); + return; + } + + if (putByIdStatus.numVariants() > 1) { + if (!isFTL(m_graph.m_plan.mode) || putByIdStatus.makesCalls() + || !Options::enablePolymorphicAccessInlining()) { + emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); + return; + } + + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedPutById(); + + if (!isDirect) { + for (unsigned variantIndex = putByIdStatus.numVariants(); variantIndex--;) { + if (putByIdStatus[variantIndex].kind() != PutByIdVariant::Transition) + continue; + if (!putByIdStatus[variantIndex].structureChain()) + continue; + emitPrototypeChecks( + putByIdStatus[variantIndex].oldStructure(), + putByIdStatus[variantIndex].structureChain()); + } + } + + MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add(); + data->variants = putByIdStatus.variants(); + data->identifierNumber = identifierNumber; + addToGraph(MultiPutByOffset, OpInfo(data), base, value); + return; } - NodeIndex addToGraph(NodeType op, OpInfo info, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) - { - NodeIndex resultIndex = (NodeIndex)m_graph.size(); - m_graph.append(Node(op, m_currentIndex, info, child1, child2, child3)); - - if (op & NodeMustGenerate) - m_graph.ref(resultIndex); - return resultIndex; + + ASSERT(putByIdStatus.numVariants() == 1); + const PutByIdVariant& variant = putByIdStatus[0]; + + if (variant.kind() == PutByIdVariant::Replace) { + addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.structure())), base); + handlePutByOffset(base, identifierNumber, variant.offset(), value); + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedPutById(); + return; } - NodeIndex addToGraph(NodeType op, OpInfo info1, OpInfo info2, NodeIndex child1 = NoNode, NodeIndex child2 = NoNode, NodeIndex child3 = NoNode) - { - NodeIndex resultIndex = (NodeIndex)m_graph.size(); - m_graph.append(Node(op, m_currentIndex, info1, info2, child1, child2, child3)); - - if (op & NodeMustGenerate) - m_graph.ref(resultIndex); - return resultIndex; + + if (variant.kind() != PutByIdVariant::Transition) { + emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); + return; } - void predictArray(NodeIndex nodeIndex) - { - Node* nodePtr = &m_graph[nodeIndex]; - - if (nodePtr->op == GetLocal) - m_graph.predict(nodePtr->local(), PredictArray); + if (variant.structureChain() && !variant.structureChain()->isStillValid()) { + emitPutById(base, identifierNumber, value, putByIdStatus, isDirect); + return; } - - void predictInt32(NodeIndex nodeIndex) - { - Node* nodePtr = &m_graph[nodeIndex]; - - if (nodePtr->op == ValueToNumber) - nodePtr = &m_graph[nodePtr->child1]; - - if (nodePtr->op == ValueToInt32) - nodePtr = &m_graph[nodePtr->child1]; - - if (nodePtr->op == NumberToInt32) - nodePtr = &m_graph[nodePtr->child1]; - - if (nodePtr->op == GetLocal) - m_graph.predict(nodePtr->local(), PredictInt32); + + m_graph.chains().addLazily(variant.structureChain()); + + addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(variant.oldStructure())), base); + if (!isDirect) + emitPrototypeChecks(variant.oldStructure(), variant.structureChain()); + + ASSERT(variant.oldStructure()->transitionWatchpointSetHasBeenInvalidated()); + + Node* propertyStorage; + StructureTransitionData* transitionData = m_graph.addStructureTransitionData( + StructureTransitionData(variant.oldStructure(), variant.newStructure())); + + if (variant.oldStructure()->outOfLineCapacity() + != variant.newStructure()->outOfLineCapacity()) { + + // If we're growing the property storage then it must be because we're + // storing into the out-of-line storage. + ASSERT(!isInlineOffset(variant.offset())); + + if (!variant.oldStructure()->outOfLineCapacity()) { + propertyStorage = addToGraph( + AllocatePropertyStorage, OpInfo(transitionData), base); + } else { + propertyStorage = addToGraph( + ReallocatePropertyStorage, OpInfo(transitionData), + base, addToGraph(GetButterfly, base)); + } + } else { + if (isInlineOffset(variant.offset())) + propertyStorage = base; + else + propertyStorage = addToGraph(GetButterfly, base); } - JSGlobalData* m_globalData; - CodeBlock* m_codeBlock; - Graph& m_graph; - - // The current block being generated. - BasicBlock* m_currentBlock; - // The bytecode index of the current instruction being generated. - unsigned m_currentIndex; - - // Record failures due to unimplemented functionality or regressions. - bool m_parseFailed; - - // We use these values during code generation, and to avoid the need for - // special handling we make sure they are available as constants in the - // CodeBlock's constant pool. These variables are initialized to - // UINT_MAX, and lazily updated to hold an index into the CodeBlock's - // constant pool, as necessary. - unsigned m_constantUndefined; - unsigned m_constantNull; - unsigned m_constant1; - - // A constant in the constant pool may be represented by more than one - // node in the graph, depending on the context in which it is being used. - struct ConstantRecord { - ConstantRecord() - : asInt32(NoNode) - , asNumeric(NoNode) - , asJSValue(NoNode) - { - } - - NodeIndex asInt32; - NodeIndex asNumeric; - NodeIndex asJSValue; - }; + addToGraph(PutStructure, OpInfo(transitionData), base); - // Track the index of the node whose result is the current value for every - // register value in the bytecode - argument, local, and temporary. - Vector m_constants; - - // The number of arguments passed to the function. - unsigned m_numArguments; - // The number of locals (vars + temporaries) used in the function. - unsigned m_numLocals; - // The number of registers we need to preserve across BasicBlock boundaries; - // typically equal to the number vars, but we expand this to cover all - // temporaries that persist across blocks (dues to ?:, &&, ||, etc). - unsigned m_preservedVars; - - struct PhiStackEntry { - PhiStackEntry(BasicBlock* block, NodeIndex phi, unsigned varNo) - : m_block(block) - , m_phi(phi) - , m_varNo(varNo) - { - } + addToGraph( + PutByOffset, + OpInfo(m_graph.m_storageAccessData.size()), + propertyStorage, + base, + value); - BasicBlock* m_block; - NodeIndex m_phi; - unsigned m_varNo; - }; - Vector m_argumentPhiStack; - Vector m_localPhiStack; + StorageAccessData storageAccessData; + storageAccessData.offset = variant.offset(); + storageAccessData.identifierNumber = identifierNumber; + m_graph.m_storageAccessData.append(storageAccessData); - // These maps are used to unique ToNumber and ToInt32 operations. - typedef HashMap UnaryOpMap; - UnaryOpMap m_int32ToNumberNodes; - UnaryOpMap m_numberToInt32Nodes; -}; + if (m_graph.compilation()) + m_graph.compilation()->noticeInlinedPutById(); +} -#define NEXT_OPCODE(name) \ - m_currentIndex += OPCODE_LENGTH(name); \ - continue +void ByteCodeParser::prepareToParseBlock() +{ + for (unsigned i = 0; i < m_constants.size(); ++i) + m_constants[i] = ConstantRecord(); + m_cellConstantNodes.clear(); +} -#define LAST_OPCODE(name) \ - m_currentIndex += OPCODE_LENGTH(name); \ - return !m_parseFailed +Node* ByteCodeParser::getScope(bool skipTop, unsigned skipCount) +{ + Node* localBase = get(VirtualRegister(JSStack::ScopeChain)); + if (skipTop) { + ASSERT(!inlineCallFrame()); + localBase = addToGraph(SkipTopScope, localBase); + } + for (unsigned n = skipCount; n--;) + localBase = addToGraph(SkipScope, localBase); + return localBase; +} bool ByteCodeParser::parseBlock(unsigned limit) { - // No need to reset state initially, since it has been set by the constructor. - if (m_currentIndex) { - for (unsigned i = 0; i < m_constants.size(); ++i) - m_constants[i] = ConstantRecord(); + bool shouldContinueParsing = true; + + Interpreter* interpreter = m_vm->interpreter; + Instruction* instructionsBegin = m_inlineStackTop->m_codeBlock->instructions().begin(); + unsigned blockBegin = m_currentIndex; + + // If we are the first basic block, introduce markers for arguments. This allows + // us to track if a use of an argument may use the actual argument passed, as + // opposed to using a value we set explicitly. + if (m_currentBlock == m_graph.block(0) && !inlineCallFrame()) { + m_graph.m_arguments.resize(m_numArguments); + for (unsigned argument = 0; argument < m_numArguments; ++argument) { + VariableAccessData* variable = newVariableAccessData( + virtualRegisterForArgument(argument), m_codeBlock->isCaptured(virtualRegisterForArgument(argument))); + variable->mergeStructureCheckHoistingFailed( + m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) + || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint)); + variable->mergeCheckArrayHoistingFailed( + m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadIndexingType)); + + Node* setArgument = addToGraph(SetArgument, OpInfo(variable)); + m_graph.m_arguments[argument] = setArgument; + m_currentBlock->variablesAtTail.setArgumentFirstTime(argument, setArgument); + } } - AliasTracker aliases(m_graph); - - Interpreter* interpreter = m_globalData->interpreter; - Instruction* instructionsBegin = m_codeBlock->instructions().begin(); while (true) { + for (unsigned i = 0; i < m_setLocalQueue.size(); ++i) + m_setLocalQueue[i].execute(this); + m_setLocalQueue.resize(0); + // Don't extend over jump destinations. if (m_currentIndex == limit) { - addToGraph(Jump, OpInfo(m_currentIndex)); - return !m_parseFailed; + // Ordinarily we want to plant a jump. But refuse to do this if the block is + // empty. This is a special case for inlining, which might otherwise create + // some empty blocks in some cases. When parseBlock() returns with an empty + // block, it will get repurposed instead of creating a new one. Note that this + // logic relies on every bytecode resulting in one or more nodes, which would + // be true anyway except for op_loop_hint, which emits a Phantom to force this + // to be true. + if (!m_currentBlock->isEmpty()) + addToGraph(Jump, OpInfo(m_currentIndex)); + return shouldContinueParsing; } - + // Switch on the current bytecode opcode. Instruction* currentInstruction = instructionsBegin + m_currentIndex; - switch (interpreter->getOpcodeID(currentInstruction->u.opcode)) { + m_currentInstruction = currentInstruction; // Some methods want to use this, and we'd rather not thread it through calls. + OpcodeID opcodeID = interpreter->getOpcodeID(currentInstruction->u.opcode); + + if (Options::verboseDFGByteCodeParsing()) + dataLog(" parsing ", currentCodeOrigin(), "\n"); + + if (m_graph.compilation()) { + addToGraph(CountExecution, OpInfo(m_graph.compilation()->executionCounterFor( + Profiler::OriginStack(*m_vm->m_perBytecodeProfiler, m_codeBlock, currentCodeOrigin())))); + } + + switch (opcodeID) { // === Function entry opcodes === case op_enter: // Initialize all locals to undefined. - for (int i = 0; i < m_codeBlock->m_numVars; ++i) - set(i, constantUndefined()); + for (int i = 0; i < m_inlineStackTop->m_codeBlock->m_numVars; ++i) + set(virtualRegisterForLocal(i), constantUndefined(), ImmediateNakedSet); + if (m_inlineStackTop->m_codeBlock->specializationKind() == CodeForConstruct) + set(virtualRegisterForArgument(0), constantUndefined(), ImmediateNakedSet); NEXT_OPCODE(op_enter); - - case op_convert_this: { - NodeIndex op1 = getThis(); - setThis(addToGraph(ConvertThis, op1)); - NEXT_OPCODE(op_convert_this); + + case op_touch_entry: + if (m_inlineStackTop->m_codeBlock->symbolTable()->m_functionEnteredOnce.isStillValid()) + addToGraph(ForceOSRExit); + NEXT_OPCODE(op_touch_entry); + + case op_to_this: { + Node* op1 = getThis(); + if (op1->op() != ToThis) { + Structure* cachedStructure = currentInstruction[2].u.structure.get(); + if (!cachedStructure + || cachedStructure->classInfo()->methodTable.toThis != JSObject::info()->methodTable.toThis + || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex) + || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCache) + || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadCacheWatchpoint) + || (op1->op() == GetLocal && op1->variableAccessData()->structureCheckHoistingFailed())) { + setThis(addToGraph(ToThis, op1)); + } else { + addToGraph( + CheckStructure, + OpInfo(m_graph.addStructureSet(cachedStructure)), + op1); + } + } + NEXT_OPCODE(op_to_this); + } + + case op_create_this: { + int calleeOperand = currentInstruction[2].u.operand; + Node* callee = get(VirtualRegister(calleeOperand)); + bool alreadyEmitted = false; + if (callee->op() == WeakJSConstant) { + JSCell* cell = callee->weakConstant(); + ASSERT(cell->inherits(JSFunction::info())); + + JSFunction* function = jsCast(cell); + if (Structure* structure = function->allocationStructure()) { + addToGraph(AllocationProfileWatchpoint, OpInfo(function)); + // The callee is still live up to this point. + addToGraph(Phantom, callee); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewObject, OpInfo(structure))); + alreadyEmitted = true; + } + } + if (!alreadyEmitted) { + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph(CreateThis, OpInfo(currentInstruction[3].u.operand), callee)); + } + NEXT_OPCODE(op_create_this); + } + + case op_new_object: { + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph(NewObject, + OpInfo(currentInstruction[3].u.objectAllocationProfile->structure()))); + NEXT_OPCODE(op_new_object); + } + + case op_new_array: { + int startOperand = currentInstruction[2].u.operand; + int numOperands = currentInstruction[3].u.operand; + ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile; + for (int operandIdx = startOperand; operandIdx > startOperand - numOperands; --operandIdx) + addVarArgChild(get(VirtualRegister(operandIdx))); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(Node::VarArg, NewArray, OpInfo(profile->selectIndexingType()), OpInfo(0))); + NEXT_OPCODE(op_new_array); + } + + case op_new_array_with_size: { + int lengthOperand = currentInstruction[2].u.operand; + ArrayAllocationProfile* profile = currentInstruction[3].u.arrayAllocationProfile; + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayWithSize, OpInfo(profile->selectIndexingType()), get(VirtualRegister(lengthOperand)))); + NEXT_OPCODE(op_new_array_with_size); + } + + case op_new_array_buffer: { + int startConstant = currentInstruction[2].u.operand; + int numConstants = currentInstruction[3].u.operand; + ArrayAllocationProfile* profile = currentInstruction[4].u.arrayAllocationProfile; + NewArrayBufferData data; + data.startConstant = m_inlineStackTop->m_constantBufferRemap[startConstant]; + data.numConstants = numConstants; + data.indexingType = profile->selectIndexingType(); + + // If this statement has never executed, we'll have the wrong indexing type in the profile. + for (int i = 0; i < numConstants; ++i) { + data.indexingType = + leastUpperBoundOfIndexingTypeAndValue( + data.indexingType, + m_codeBlock->constantBuffer(data.startConstant)[i]); + } + + m_graph.m_newArrayBufferData.append(data); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewArrayBuffer, OpInfo(&m_graph.m_newArrayBufferData.last()))); + NEXT_OPCODE(op_new_array_buffer); + } + + case op_new_regexp: { + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(NewRegexp, OpInfo(currentInstruction[2].u.operand))); + NEXT_OPCODE(op_new_regexp); + } + + case op_get_callee: { + JSCell* cachedFunction = currentInstruction[2].u.jsCell.get(); + if (!cachedFunction + || m_inlineStackTop->m_profiledBlock->couldTakeSlowCase(m_currentIndex) + || m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, BadFunction)) { + set(VirtualRegister(currentInstruction[1].u.operand), get(VirtualRegister(JSStack::Callee))); + } else { + ASSERT(cachedFunction->inherits(JSFunction::info())); + Node* actualCallee = get(VirtualRegister(JSStack::Callee)); + addToGraph(CheckFunction, OpInfo(cachedFunction), actualCallee); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(WeakJSConstant, OpInfo(cachedFunction))); + } + NEXT_OPCODE(op_get_callee); } // === Bitwise operations === case op_bitand: { - NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); - NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); - predictInt32(op1); - predictInt32(op2); - set(currentInstruction[1].u.operand, addToGraph(BitAnd, op1, op2), PredictInt32); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitAnd, op1, op2)); NEXT_OPCODE(op_bitand); } case op_bitor: { - NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); - NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); - predictInt32(op1); - predictInt32(op2); - set(currentInstruction[1].u.operand, addToGraph(BitOr, op1, op2), PredictInt32); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitOr, op1, op2)); NEXT_OPCODE(op_bitor); } case op_bitxor: { - NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); - NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); - predictInt32(op1); - predictInt32(op2); - set(currentInstruction[1].u.operand, addToGraph(BitXor, op1, op2), PredictInt32); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(BitXor, op1, op2)); NEXT_OPCODE(op_bitxor); } case op_rshift: { - NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); - NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); - predictInt32(op1); - predictInt32(op2); - NodeIndex result; - // Optimize out shifts by zero. - if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f)) - result = op1; - else - result = addToGraph(BitRShift, op1, op2); - set(currentInstruction[1].u.operand, result, PredictInt32); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph(BitRShift, op1, op2)); NEXT_OPCODE(op_rshift); } case op_lshift: { - NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); - NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); - predictInt32(op1); - predictInt32(op2); - NodeIndex result; - // Optimize out shifts by zero. - if (isInt32Constant(op2) && !(valueOfInt32Constant(op2) & 0x1f)) - result = op1; - else - result = addToGraph(BitLShift, op1, op2); - set(currentInstruction[1].u.operand, result, PredictInt32); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph(BitLShift, op1, op2)); NEXT_OPCODE(op_lshift); } case op_urshift: { - NodeIndex op1 = getToInt32(currentInstruction[2].u.operand); - NodeIndex op2 = getToInt32(currentInstruction[3].u.operand); - predictInt32(op1); - predictInt32(op2); - NodeIndex result; - // The result of a zero-extending right shift is treated as an unsigned value. - // This means that if the top bit is set, the result is not in the int32 range, - // and as such must be stored as a double. If the shift amount is a constant, - // we may be able to optimize. - if (isInt32Constant(op2)) { - // If we know we are shifting by a non-zero amount, then since the operation - // zero fills we know the top bit of the result must be zero, and as such the - // result must be within the int32 range. Conversely, if this is a shift by - // zero, then the result may be changed by the conversion to unsigned, but it - // is not necessary to perform the shift! - if (valueOfInt32Constant(op2) & 0x1f) - result = addToGraph(BitURShift, op1, op2); - else - result = addToGraph(UInt32ToNumber, op1); - } else { - // Cannot optimize at this stage; shift & potentially rebox as a double. - result = addToGraph(BitURShift, op1, op2); - result = addToGraph(UInt32ToNumber, result); - } - set(currentInstruction[1].u.operand, result, PredictInt32); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph(BitURShift, op1, op2)); NEXT_OPCODE(op_urshift); } - - // === Increment/Decrement opcodes === - - case op_pre_inc: { - unsigned srcDst = currentInstruction[1].u.operand; - NodeIndex op = getToNumber(srcDst); - predictInt32(op); - set(srcDst, addToGraph(ArithAdd, op, one())); - NEXT_OPCODE(op_pre_inc); + + case op_unsigned: { + set(VirtualRegister(currentInstruction[1].u.operand), + makeSafe(addToGraph(UInt32ToNumber, get(VirtualRegister(currentInstruction[2].u.operand))))); + NEXT_OPCODE(op_unsigned); } - case op_post_inc: { - unsigned result = currentInstruction[1].u.operand; - unsigned srcDst = currentInstruction[2].u.operand; - NodeIndex op = getToNumber(srcDst); - predictInt32(op); - set(result, op); - set(srcDst, addToGraph(ArithAdd, op, one())); - NEXT_OPCODE(op_post_inc); - } + // === Increment/Decrement opcodes === - case op_pre_dec: { - unsigned srcDst = currentInstruction[1].u.operand; - NodeIndex op = getToNumber(srcDst); - predictInt32(op); - set(srcDst, addToGraph(ArithSub, op, one())); - NEXT_OPCODE(op_pre_dec); + case op_inc: { + int srcDst = currentInstruction[1].u.operand; + VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); + Node* op = get(srcDstVirtualRegister); + set(srcDstVirtualRegister, makeSafe(addToGraph(ArithAdd, op, one()))); + NEXT_OPCODE(op_inc); } - case op_post_dec: { - unsigned result = currentInstruction[1].u.operand; - unsigned srcDst = currentInstruction[2].u.operand; - NodeIndex op = getToNumber(srcDst); - predictInt32(op); - set(result, op); - set(srcDst, addToGraph(ArithSub, op, one())); - NEXT_OPCODE(op_post_dec); + case op_dec: { + int srcDst = currentInstruction[1].u.operand; + VirtualRegister srcDstVirtualRegister = VirtualRegister(srcDst); + Node* op = get(srcDstVirtualRegister); + set(srcDstVirtualRegister, makeSafe(addToGraph(ArithSub, op, one()))); + NEXT_OPCODE(op_dec); } // === Arithmetic operations === case op_add: { - ARITHMETIC_OP(); - NodeIndex op1 = get(currentInstruction[2].u.operand); - NodeIndex op2 = get(currentInstruction[3].u.operand); - // If both operands can statically be determined to the numbers, then this is an arithmetic add. - // Otherwise, we must assume this may be performing a concatenation to a string. - if (m_graph[op1].hasNumericResult() && m_graph[op2].hasNumericResult()) - set(currentInstruction[1].u.operand, addToGraph(ArithAdd, toNumber(op1), toNumber(op2))); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + if (op1->hasNumberResult() && op2->hasNumberResult()) + set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithAdd, op1, op2))); else - set(currentInstruction[1].u.operand, addToGraph(ValueAdd, op1, op2)); + set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ValueAdd, op1, op2))); NEXT_OPCODE(op_add); } case op_sub: { - ARITHMETIC_OP(); - NodeIndex op1 = getToNumber(currentInstruction[2].u.operand); - NodeIndex op2 = getToNumber(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, addToGraph(ArithSub, op1, op2)); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithSub, op1, op2))); NEXT_OPCODE(op_sub); } + case op_negate: { + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithNegate, op1))); + NEXT_OPCODE(op_negate); + } + case op_mul: { - ARITHMETIC_OP(); - NodeIndex op1 = getToNumber(currentInstruction[2].u.operand); - NodeIndex op2 = getToNumber(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, addToGraph(ArithMul, op1, op2)); + // Multiply requires that the inputs are not truncated, unfortunately. + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMul, op1, op2))); NEXT_OPCODE(op_mul); } case op_mod: { - ARITHMETIC_OP(); - NodeIndex op1 = getToNumber(currentInstruction[2].u.operand); - NodeIndex op2 = getToNumber(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, addToGraph(ArithMod, op1, op2)); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), makeSafe(addToGraph(ArithMod, op1, op2))); NEXT_OPCODE(op_mod); } case op_div: { - ARITHMETIC_OP(); - NodeIndex op1 = getToNumber(currentInstruction[2].u.operand); - NodeIndex op2 = getToNumber(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, addToGraph(ArithDiv, op1, op2)); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), makeDivSafe(addToGraph(ArithDiv, op1, op2))); NEXT_OPCODE(op_div); } // === Misc operations === + case op_debug: + addToGraph(Breakpoint); + NEXT_OPCODE(op_debug); + + case op_profile_will_call: { + addToGraph(ProfileWillCall); + NEXT_OPCODE(op_profile_will_call); + } + + case op_profile_did_call: { + addToGraph(ProfileDidCall); + NEXT_OPCODE(op_profile_did_call); + } + case op_mov: { - NodeIndex op = get(currentInstruction[2].u.operand); - set(currentInstruction[1].u.operand, op); + Node* op = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), op); NEXT_OPCODE(op_mov); } + + case op_captured_mov: { + Node* op = get(VirtualRegister(currentInstruction[2].u.operand)); + if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet) { + if (set->state() != IsInvalidated) + addToGraph(NotifyWrite, OpInfo(set), op); + } + set(VirtualRegister(currentInstruction[1].u.operand), op); + NEXT_OPCODE(op_captured_mov); + } + + case op_check_has_instance: + addToGraph(CheckHasInstance, get(VirtualRegister(currentInstruction[3].u.operand))); + NEXT_OPCODE(op_check_has_instance); + + case op_instanceof: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* prototype = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(InstanceOf, value, prototype)); + NEXT_OPCODE(op_instanceof); + } + + case op_is_undefined: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsUndefined, value)); + NEXT_OPCODE(op_is_undefined); + } + + case op_is_boolean: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsBoolean, value)); + NEXT_OPCODE(op_is_boolean); + } + + case op_is_number: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsNumber, value)); + NEXT_OPCODE(op_is_number); + } + + case op_is_string: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsString, value)); + NEXT_OPCODE(op_is_string); + } + + case op_is_object: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsObject, value)); + NEXT_OPCODE(op_is_object); + } + + case op_is_function: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(IsFunction, value)); + NEXT_OPCODE(op_is_function); + } case op_not: { - ARITHMETIC_OP(); - NodeIndex value = get(currentInstruction[2].u.operand); - set(currentInstruction[1].u.operand, addToGraph(LogicalNot, value)); + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, value)); NEXT_OPCODE(op_not); } + + case op_to_primitive: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(ToPrimitive, value)); + NEXT_OPCODE(op_to_primitive); + } + + case op_strcat: { + int startOperand = currentInstruction[2].u.operand; + int numOperands = currentInstruction[3].u.operand; +#if CPU(X86) + // X86 doesn't have enough registers to compile MakeRope with three arguments. + // Rather than try to be clever, we just make MakeRope dumber on this processor. + const unsigned maxRopeArguments = 2; +#else + const unsigned maxRopeArguments = 3; +#endif + auto toStringNodes = std::make_unique(numOperands); + for (int i = 0; i < numOperands; i++) + toStringNodes[i] = addToGraph(ToString, get(VirtualRegister(startOperand - i))); + + for (int i = 0; i < numOperands; i++) + addToGraph(Phantom, toStringNodes[i]); + + Node* operands[AdjacencyList::Size]; + unsigned indexInOperands = 0; + for (unsigned i = 0; i < AdjacencyList::Size; ++i) + operands[i] = 0; + for (int operandIdx = 0; operandIdx < numOperands; ++operandIdx) { + if (indexInOperands == maxRopeArguments) { + operands[0] = addToGraph(MakeRope, operands[0], operands[1], operands[2]); + for (unsigned i = 1; i < AdjacencyList::Size; ++i) + operands[i] = 0; + indexInOperands = 1; + } + + ASSERT(indexInOperands < AdjacencyList::Size); + ASSERT(indexInOperands < maxRopeArguments); + operands[indexInOperands++] = toStringNodes[operandIdx]; + } + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph(MakeRope, operands[0], operands[1], operands[2])); + NEXT_OPCODE(op_strcat); + } case op_less: { - ARITHMETIC_OP(); - NodeIndex op1 = get(currentInstruction[2].u.operand); - NodeIndex op2 = get(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, addToGraph(CompareLess, op1, op2)); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLess, op1, op2)); NEXT_OPCODE(op_less); } case op_lesseq: { - ARITHMETIC_OP(); - NodeIndex op1 = get(currentInstruction[2].u.operand); - NodeIndex op2 = get(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, addToGraph(CompareLessEq, op1, op2)); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareLessEq, op1, op2)); NEXT_OPCODE(op_lesseq); } + case op_greater: { + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreater, op1, op2)); + NEXT_OPCODE(op_greater); + } + + case op_greatereq: { + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareGreaterEq, op1, op2)); + NEXT_OPCODE(op_greatereq); + } + case op_eq: { - ARITHMETIC_OP(); - NodeIndex op1 = get(currentInstruction[2].u.operand); - NodeIndex op2 = get(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, addToGraph(CompareEq, op1, op2)); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEq, op1, op2)); NEXT_OPCODE(op_eq); } case op_eq_null: { - ARITHMETIC_OP(); - NodeIndex value = get(currentInstruction[2].u.operand); - set(currentInstruction[1].u.operand, addToGraph(CompareEq, value, constantNull())); + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareEqConstant, value, constantNull())); NEXT_OPCODE(op_eq_null); } case op_stricteq: { - ARITHMETIC_OP(); - NodeIndex op1 = get(currentInstruction[2].u.operand); - NodeIndex op2 = get(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, addToGraph(CompareStrictEq, op1, op2)); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CompareStrictEq, op1, op2)); NEXT_OPCODE(op_stricteq); } case op_neq: { - ARITHMETIC_OP(); - NodeIndex op1 = get(currentInstruction[2].u.operand); - NodeIndex op2 = get(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEq, op1, op2))); NEXT_OPCODE(op_neq); } case op_neq_null: { - ARITHMETIC_OP(); - NodeIndex value = get(currentInstruction[2].u.operand); - set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareEq, value, constantNull()))); + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, addToGraph(CompareEqConstant, value, constantNull()))); NEXT_OPCODE(op_neq_null); } case op_nstricteq: { - ARITHMETIC_OP(); - NodeIndex op1 = get(currentInstruction[2].u.operand); - NodeIndex op2 = get(currentInstruction[3].u.operand); - set(currentInstruction[1].u.operand, addToGraph(LogicalNot, addToGraph(CompareStrictEq, op1, op2))); + Node* op1 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[3].u.operand)); + Node* invertedResult; + invertedResult = addToGraph(CompareStrictEq, op1, op2); + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(LogicalNot, invertedResult)); NEXT_OPCODE(op_nstricteq); } // === Property access operations === case op_get_by_val: { - NodeIndex base = get(currentInstruction[2].u.operand); - NodeIndex property = get(currentInstruction[3].u.operand); - predictArray(base); - predictInt32(property); - - NodeIndex getByVal = addToGraph(GetByVal, base, property, aliases.lookupGetByVal(base, property)); - set(currentInstruction[1].u.operand, getByVal); - aliases.recordGetByVal(getByVal); + SpeculatedType prediction = getPrediction(); + + Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); + ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Read); + Node* property = get(VirtualRegister(currentInstruction[3].u.operand)); + Node* getByVal = addToGraph(GetByVal, OpInfo(arrayMode.asWord()), OpInfo(prediction), base, property); + set(VirtualRegister(currentInstruction[1].u.operand), getByVal); NEXT_OPCODE(op_get_by_val); } + case op_put_by_val_direct: case op_put_by_val: { - NodeIndex base = get(currentInstruction[1].u.operand); - NodeIndex property = get(currentInstruction[2].u.operand); - NodeIndex value = get(currentInstruction[3].u.operand); - predictArray(base); - predictInt32(property); - - NodeIndex aliasedGet = aliases.lookupGetByVal(base, property); - NodeIndex putByVal = addToGraph(aliasedGet != NoNode ? PutByValAlias : PutByVal, base, property, value); - aliases.recordPutByVal(putByVal); + Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); + + ArrayMode arrayMode = getArrayModeConsideringSlowPath(currentInstruction[4].u.arrayProfile, Array::Write); + + Node* property = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); + + addVarArgChild(base); + addVarArgChild(property); + addVarArgChild(value); + addVarArgChild(0); // Leave room for property storage. + addVarArgChild(0); // Leave room for length. + addToGraph(Node::VarArg, opcodeID == op_put_by_val_direct ? PutByValDirect : PutByVal, OpInfo(arrayMode.asWord()), OpInfo(0)); NEXT_OPCODE(op_put_by_val); } - - case op_get_by_id: { - NodeIndex base = get(currentInstruction[2].u.operand); - unsigned identifier = currentInstruction[3].u.operand; - - NodeIndex getById = addToGraph(GetById, OpInfo(identifier), base); - set(currentInstruction[1].u.operand, getById); - aliases.recordGetById(getById); + + case op_get_by_id: + case op_get_by_id_out_of_line: + case op_get_array_length: { + SpeculatedType prediction = getPrediction(); + + Node* base = get(VirtualRegister(currentInstruction[2].u.operand)); + unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; + + StringImpl* uid = m_graph.identifiers()[identifierNumber]; + GetByIdStatus getByIdStatus = GetByIdStatus::computeFor( + m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock, + m_inlineStackTop->m_stubInfos, m_dfgStubInfos, + currentCodeOrigin(), uid); + + handleGetById( + currentInstruction[1].u.operand, prediction, base, identifierNumber, getByIdStatus); NEXT_OPCODE(op_get_by_id); } - - case op_put_by_id: { - NodeIndex value = get(currentInstruction[3].u.operand); - NodeIndex base = get(currentInstruction[1].u.operand); - unsigned identifier = currentInstruction[2].u.operand; + case op_put_by_id: + case op_put_by_id_out_of_line: + case op_put_by_id_transition_direct: + case op_put_by_id_transition_normal: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal_out_of_line: { + Node* value = get(VirtualRegister(currentInstruction[3].u.operand)); + Node* base = get(VirtualRegister(currentInstruction[1].u.operand)); + unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; bool direct = currentInstruction[8].u.operand; - if (direct) { - NodeIndex putByIdDirect = addToGraph(PutByIdDirect, OpInfo(identifier), base, value); - aliases.recordPutByIdDirect(putByIdDirect); - } else { - NodeIndex putById = addToGraph(PutById, OpInfo(identifier), base, value); - aliases.recordPutById(putById); - } - + PutByIdStatus putByIdStatus = PutByIdStatus::computeFor( + m_inlineStackTop->m_profiledBlock, m_dfgCodeBlock, + m_inlineStackTop->m_stubInfos, m_dfgStubInfos, + currentCodeOrigin(), m_graph.identifiers()[identifierNumber]); + + handlePutById(base, identifierNumber, value, putByIdStatus, direct); NEXT_OPCODE(op_put_by_id); } - case op_get_global_var: { - NodeIndex getGlobalVar = addToGraph(GetGlobalVar, OpInfo(currentInstruction[2].u.operand)); - set(currentInstruction[1].u.operand, getGlobalVar); - NEXT_OPCODE(op_get_global_var); + case op_init_global_const_nop: { + NEXT_OPCODE(op_init_global_const_nop); } - case op_put_global_var: { - NodeIndex value = get(currentInstruction[2].u.operand); - addToGraph(PutGlobalVar, OpInfo(currentInstruction[1].u.operand), value); - NEXT_OPCODE(op_put_global_var); + case op_init_global_const: { + Node* value = get(VirtualRegister(currentInstruction[2].u.operand)); + addToGraph( + PutGlobalVar, + OpInfo(m_inlineStackTop->m_codeBlock->globalObject()->assertRegisterIsInThisObject(currentInstruction[1].u.registerPointer)), + value); + NEXT_OPCODE(op_init_global_const); } // === Block terminators. === case op_jmp: { - unsigned relativeOffset = currentInstruction[1].u.operand; + int relativeOffset = currentInstruction[1].u.operand; + if (relativeOffset <= 0) + flushForTerminal(); addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); LAST_OPCODE(op_jmp); } - case op_loop: { - unsigned relativeOffset = currentInstruction[1].u.operand; - addToGraph(Jump, OpInfo(m_currentIndex + relativeOffset)); - LAST_OPCODE(op_loop); - } - case op_jtrue: { unsigned relativeOffset = currentInstruction[2].u.operand; - NodeIndex condition = get(currentInstruction[1].u.operand); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jtrue)), condition); + Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jtrue))), condition); LAST_OPCODE(op_jtrue); } case op_jfalse: { unsigned relativeOffset = currentInstruction[2].u.operand; - NodeIndex condition = get(currentInstruction[1].u.operand); - addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jfalse)), OpInfo(m_currentIndex + relativeOffset), condition); + Node* condition = get(VirtualRegister(currentInstruction[1].u.operand)); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jfalse), m_currentIndex + relativeOffset)), condition); LAST_OPCODE(op_jfalse); } - case op_loop_if_true: { - unsigned relativeOffset = currentInstruction[2].u.operand; - NodeIndex condition = get(currentInstruction[1].u.operand); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_true)), condition); - LAST_OPCODE(op_loop_if_true); - } - - case op_loop_if_false: { - unsigned relativeOffset = currentInstruction[2].u.operand; - NodeIndex condition = get(currentInstruction[1].u.operand); - addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_false)), OpInfo(m_currentIndex + relativeOffset), condition); - LAST_OPCODE(op_loop_if_false); - } - case op_jeq_null: { unsigned relativeOffset = currentInstruction[2].u.operand; - NodeIndex value = get(currentInstruction[1].u.operand); - NodeIndex condition = addToGraph(CompareEq, value, constantNull()); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jeq_null)), condition); + Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* condition = addToGraph(CompareEqConstant, value, constantNull()); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jeq_null))), condition); LAST_OPCODE(op_jeq_null); } case op_jneq_null: { unsigned relativeOffset = currentInstruction[2].u.operand; - NodeIndex value = get(currentInstruction[1].u.operand); - NodeIndex condition = addToGraph(CompareEq, value, constantNull()); - addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_null)), OpInfo(m_currentIndex + relativeOffset), condition); + Node* value = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* condition = addToGraph(CompareEqConstant, value, constantNull()); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jneq_null), m_currentIndex + relativeOffset)), condition); LAST_OPCODE(op_jneq_null); } - case op_jnless: { - unsigned relativeOffset = currentInstruction[3].u.operand; - NodeIndex op1 = get(currentInstruction[1].u.operand); - NodeIndex op2 = get(currentInstruction[2].u.operand); - NodeIndex condition = addToGraph(CompareLess, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnless)), OpInfo(m_currentIndex + relativeOffset), condition); - LAST_OPCODE(op_jnless); - } - - case op_jnlesseq: { - unsigned relativeOffset = currentInstruction[3].u.operand; - NodeIndex op1 = get(currentInstruction[1].u.operand); - NodeIndex op2 = get(currentInstruction[2].u.operand); - NodeIndex condition = addToGraph(CompareLessEq, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jnlesseq)), OpInfo(m_currentIndex + relativeOffset), condition); - LAST_OPCODE(op_jnlesseq); - } - case op_jless: { unsigned relativeOffset = currentInstruction[3].u.operand; - NodeIndex op1 = get(currentInstruction[1].u.operand); - NodeIndex op2 = get(currentInstruction[2].u.operand); - NodeIndex condition = addToGraph(CompareLess, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jless)), condition); + Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* condition = addToGraph(CompareLess, op1, op2); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jless))), condition); LAST_OPCODE(op_jless); } case op_jlesseq: { unsigned relativeOffset = currentInstruction[3].u.operand; - NodeIndex op1 = get(currentInstruction[1].u.operand); - NodeIndex op2 = get(currentInstruction[2].u.operand); - NodeIndex condition = addToGraph(CompareLessEq, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_jlesseq)), condition); + Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* condition = addToGraph(CompareLessEq, op1, op2); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jlesseq))), condition); LAST_OPCODE(op_jlesseq); } - case op_loop_if_less: { + case op_jgreater: { unsigned relativeOffset = currentInstruction[3].u.operand; - NodeIndex op1 = get(currentInstruction[1].u.operand); - NodeIndex op2 = get(currentInstruction[2].u.operand); - NodeIndex condition = addToGraph(CompareLess, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_less)), condition); - LAST_OPCODE(op_loop_if_less); + Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* condition = addToGraph(CompareGreater, op1, op2); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreater))), condition); + LAST_OPCODE(op_jgreater); } - case op_loop_if_lesseq: { + case op_jgreatereq: { unsigned relativeOffset = currentInstruction[3].u.operand; - NodeIndex op1 = get(currentInstruction[1].u.operand); - NodeIndex op2 = get(currentInstruction[2].u.operand); - NodeIndex condition = addToGraph(CompareLessEq, op1, op2); - addToGraph(Branch, OpInfo(m_currentIndex + relativeOffset), OpInfo(m_currentIndex + OPCODE_LENGTH(op_loop_if_lesseq)), condition); - LAST_OPCODE(op_loop_if_lesseq); + Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* condition = addToGraph(CompareGreaterEq, op1, op2); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + relativeOffset, m_currentIndex + OPCODE_LENGTH(op_jgreatereq))), condition); + LAST_OPCODE(op_jgreatereq); } - case op_ret: { - addToGraph(Return, get(currentInstruction[1].u.operand)); - LAST_OPCODE(op_ret); + case op_jnless: { + unsigned relativeOffset = currentInstruction[3].u.operand; + Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* condition = addToGraph(CompareLess, op1, op2); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnless), m_currentIndex + relativeOffset)), condition); + LAST_OPCODE(op_jnless); } - default: - // Parse failed! - return false; + case op_jnlesseq: { + unsigned relativeOffset = currentInstruction[3].u.operand; + Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* condition = addToGraph(CompareLessEq, op1, op2); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jnlesseq), m_currentIndex + relativeOffset)), condition); + LAST_OPCODE(op_jnlesseq); } - } -} - -template -void ByteCodeParser::processPhiStack() -{ - Vector& phiStack = (stackType == ArgumentPhiStack) ? m_argumentPhiStack : m_localPhiStack; - - while (!phiStack.isEmpty()) { - PhiStackEntry entry = phiStack.last(); - phiStack.removeLast(); - - Node& phiNode = m_graph[entry.m_phi]; - PredecessorList& predecessors = entry.m_block->m_predecessors; - unsigned varNo = entry.m_varNo; - - for (size_t i = 0; i < predecessors.size(); ++i) { - BasicBlock* predecessorBlock = m_graph.m_blocks[predecessors[i]].get(); - VariableRecord& var = (stackType == ArgumentPhiStack) ? predecessorBlock->m_arguments[varNo] : predecessorBlock->m_locals[varNo]; + case op_jngreater: { + unsigned relativeOffset = currentInstruction[3].u.operand; + Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* condition = addToGraph(CompareGreater, op1, op2); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreater), m_currentIndex + relativeOffset)), condition); + LAST_OPCODE(op_jngreater); + } - NodeIndex valueInPredecessor = var.value; - if (valueInPredecessor == NoNode) { - valueInPredecessor = addToGraph(Phi); - var.value = valueInPredecessor; - phiStack.append(PhiStackEntry(predecessorBlock, valueInPredecessor, varNo)); - } else if (m_graph[valueInPredecessor].op == GetLocal) - valueInPredecessor = m_graph[valueInPredecessor].child1; - ASSERT(m_graph[valueInPredecessor].op == SetLocal || m_graph[valueInPredecessor].op == Phi); + case op_jngreatereq: { + unsigned relativeOffset = currentInstruction[3].u.operand; + Node* op1 = get(VirtualRegister(currentInstruction[1].u.operand)); + Node* op2 = get(VirtualRegister(currentInstruction[2].u.operand)); + Node* condition = addToGraph(CompareGreaterEq, op1, op2); + addToGraph(Branch, OpInfo(branchData(m_currentIndex + OPCODE_LENGTH(op_jngreatereq), m_currentIndex + relativeOffset)), condition); + LAST_OPCODE(op_jngreatereq); + } + + case op_switch_imm: { + SwitchData& data = *m_graph.m_switchData.add(); + data.kind = SwitchImm; + data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand]; + data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); + SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); + for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { + if (!table.branchOffsets[i]) + continue; + unsigned target = m_currentIndex + table.branchOffsets[i]; + if (target == data.fallThrough.bytecodeIndex()) + continue; + data.cases.append(SwitchCase::withBytecodeIndex(jsNumber(static_cast(table.min + i)), target)); + } + flushIfTerminal(data); + addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); + LAST_OPCODE(op_switch_imm); + } + + case op_switch_char: { + SwitchData& data = *m_graph.m_switchData.add(); + data.kind = SwitchChar; + data.switchTableIndex = m_inlineStackTop->m_switchRemap[currentInstruction[1].u.operand]; + data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); + SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); + for (unsigned i = 0; i < table.branchOffsets.size(); ++i) { + if (!table.branchOffsets[i]) + continue; + unsigned target = m_currentIndex + table.branchOffsets[i]; + if (target == data.fallThrough.bytecodeIndex()) + continue; + data.cases.append( + SwitchCase::withBytecodeIndex(LazyJSValue::singleCharacterString(table.min + i), target)); + } + flushIfTerminal(data); + addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); + LAST_OPCODE(op_switch_char); + } + + case op_switch_string: { + SwitchData& data = *m_graph.m_switchData.add(); + data.kind = SwitchString; + data.switchTableIndex = currentInstruction[1].u.operand; + data.fallThrough.setBytecodeIndex(m_currentIndex + currentInstruction[2].u.operand); + StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); + StringJumpTable::StringOffsetTable::iterator iter; + StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); + for (iter = table.offsetTable.begin(); iter != end; ++iter) { + unsigned target = m_currentIndex + iter->value.branchOffset; + if (target == data.fallThrough.bytecodeIndex()) + continue; + data.cases.append( + SwitchCase::withBytecodeIndex(LazyJSValue::knownStringImpl(iter->key.get()), target)); + } + flushIfTerminal(data); + addToGraph(Switch, OpInfo(&data), get(VirtualRegister(currentInstruction[3].u.operand))); + LAST_OPCODE(op_switch_string); + } + + case op_ret: + flushForReturn(); + if (inlineCallFrame()) { + ASSERT(m_inlineStackTop->m_returnValue.isValid()); + setDirect(m_inlineStackTop->m_returnValue, get(VirtualRegister(currentInstruction[1].u.operand)), ImmediateSetWithFlush); + m_inlineStackTop->m_didReturn = true; + if (m_inlineStackTop->m_unlinkedBlocks.isEmpty()) { + // If we're returning from the first block, then we're done parsing. + ASSERT(m_inlineStackTop->m_callsiteBlockHead == m_graph.lastBlock()); + shouldContinueParsing = false; + LAST_OPCODE(op_ret); + } else { + // If inlining created blocks, and we're doing a return, then we need some + // special linking. + ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock()); + m_inlineStackTop->m_unlinkedBlocks.last().m_needsNormalLinking = false; + } + if (m_currentIndex + OPCODE_LENGTH(op_ret) != m_inlineStackTop->m_codeBlock->instructions().size() || m_inlineStackTop->m_didEarlyReturn) { + ASSERT(m_currentIndex + OPCODE_LENGTH(op_ret) <= m_inlineStackTop->m_codeBlock->instructions().size()); + addToGraph(Jump, OpInfo(0)); + m_inlineStackTop->m_unlinkedBlocks.last().m_needsEarlyReturnLinking = true; + m_inlineStackTop->m_didEarlyReturn = true; + } + LAST_OPCODE(op_ret); + } + addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand))); + LAST_OPCODE(op_ret); + + case op_end: + flushForReturn(); + ASSERT(!inlineCallFrame()); + addToGraph(Return, get(VirtualRegister(currentInstruction[1].u.operand))); + LAST_OPCODE(op_end); + + case op_throw: + addToGraph(Throw, get(VirtualRegister(currentInstruction[1].u.operand))); + flushForTerminal(); + addToGraph(Unreachable); + LAST_OPCODE(op_throw); + + case op_throw_static_error: + addToGraph(ThrowReferenceError); + flushForTerminal(); + addToGraph(Unreachable); + LAST_OPCODE(op_throw_static_error); + + case op_call: + handleCall(currentInstruction, Call, CodeForCall); + NEXT_OPCODE(op_call); + + case op_construct: + handleCall(currentInstruction, Construct, CodeForConstruct); + NEXT_OPCODE(op_construct); + + case op_call_varargs: { + int result = currentInstruction[1].u.operand; + int callee = currentInstruction[2].u.operand; + int thisReg = currentInstruction[3].u.operand; + int arguments = currentInstruction[4].u.operand; + int firstFreeReg = currentInstruction[5].u.operand; + + ASSERT(inlineCallFrame()); + ASSERT_UNUSED(arguments, arguments == m_inlineStackTop->m_codeBlock->argumentsRegister().offset()); + ASSERT(!m_inlineStackTop->m_codeBlock->symbolTable()->slowArguments()); + + addToGraph(CheckArgumentsNotCreated); + + unsigned argCount = inlineCallFrame()->arguments.size(); + + // Let's compute the register offset. We start with the last used register, and + // then adjust for the things we want in the call frame. + int registerOffset = firstFreeReg + 1; + registerOffset -= argCount; // We will be passing some arguments. + registerOffset -= JSStack::CallFrameHeaderSize; // We will pretend to have a call frame header. + + // Get the alignment right. + registerOffset = -WTF::roundUpToMultipleOf( + stackAlignmentRegisters(), + -registerOffset); + + ensureLocals( + m_inlineStackTop->remapOperand( + VirtualRegister(registerOffset)).toLocal()); + + // The bytecode wouldn't have set up the arguments. But we'll do it and make it + // look like the bytecode had done it. + int nextRegister = registerOffset + JSStack::CallFrameHeaderSize; + set(VirtualRegister(nextRegister++), get(VirtualRegister(thisReg)), ImmediateNakedSet); + for (unsigned argument = 1; argument < argCount; ++argument) + set(VirtualRegister(nextRegister++), get(virtualRegisterForArgument(argument)), ImmediateNakedSet); + + handleCall( + result, Call, CodeForCall, OPCODE_LENGTH(op_call_varargs), + callee, argCount, registerOffset); + NEXT_OPCODE(op_call_varargs); + } + + case op_jneq_ptr: + // Statically speculate for now. It makes sense to let speculate-only jneq_ptr + // support simmer for a while before making it more general, since it's + // already gnarly enough as it is. + ASSERT(pointerIsFunction(currentInstruction[2].u.specialPointer)); + addToGraph( + CheckFunction, + OpInfo(actualPointerFor(m_inlineStackTop->m_codeBlock, currentInstruction[2].u.specialPointer)), + get(VirtualRegister(currentInstruction[1].u.operand))); + addToGraph(Jump, OpInfo(m_currentIndex + OPCODE_LENGTH(op_jneq_ptr))); + LAST_OPCODE(op_jneq_ptr); + + case op_resolve_scope: { + int dst = currentInstruction[1].u.operand; + ResolveType resolveType = static_cast(currentInstruction[3].u.operand); + unsigned depth = currentInstruction[4].u.operand; + + // get_from_scope and put_to_scope depend on this watchpoint forcing OSR exit, so they don't add their own watchpoints. + if (needsVarInjectionChecks(resolveType)) + addToGraph(VarInjectionWatchpoint); + + switch (resolveType) { + case GlobalProperty: + case GlobalVar: + case GlobalPropertyWithVarInjectionChecks: + case GlobalVarWithVarInjectionChecks: + set(VirtualRegister(dst), cellConstant(m_inlineStackTop->m_codeBlock->globalObject())); + break; + case ClosureVar: + case ClosureVarWithVarInjectionChecks: { + JSActivation* activation = currentInstruction[5].u.activation.get(); + if (activation + && activation->symbolTable()->m_functionEnteredOnce.isStillValid()) { + addToGraph(FunctionReentryWatchpoint, OpInfo(activation->symbolTable())); + set(VirtualRegister(dst), cellConstant(activation)); + break; + } + set(VirtualRegister(dst), + getScope(m_inlineStackTop->m_codeBlock->needsActivation(), depth)); + break; + } + case Dynamic: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + NEXT_OPCODE(op_resolve_scope); + } + + case op_get_from_scope: { + int dst = currentInstruction[1].u.operand; + int scope = currentInstruction[2].u.operand; + unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[3].u.operand]; + StringImpl* uid = m_graph.identifiers()[identifierNumber]; + ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type(); + + Structure* structure = 0; + WatchpointSet* watchpoints = 0; + uintptr_t operand; + { + ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); + if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) + watchpoints = currentInstruction[5].u.watchpointSet; + else + structure = currentInstruction[5].u.structure.get(); + operand = reinterpret_cast(currentInstruction[6].u.pointer); + } - if (phiNode.refCount()) - m_graph.ref(valueInPredecessor); + UNUSED_PARAM(watchpoints); // We will use this in the future. For now we set it as a way of documenting the fact that that's what index 5 is in GlobalVar mode. + + SpeculatedType prediction = getPrediction(); + JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); + + switch (resolveType) { + case GlobalProperty: + case GlobalPropertyWithVarInjectionChecks: { + GetByIdStatus status = GetByIdStatus::computeFor(*m_vm, structure, uid); + if (status.state() != GetByIdStatus::Simple || status.numVariants() != 1) { + set(VirtualRegister(dst), addToGraph(GetByIdFlush, OpInfo(identifierNumber), OpInfo(prediction), get(VirtualRegister(scope)))); + break; + } + Node* base = cellConstantWithStructureCheck(globalObject, status[0].structureSet().singletonStructure()); + addToGraph(Phantom, get(VirtualRegister(scope))); + if (JSValue specificValue = status[0].specificValue()) + set(VirtualRegister(dst), cellConstant(specificValue.asCell())); + else + set(VirtualRegister(dst), handleGetByOffset(prediction, base, identifierNumber, operand)); + break; + } + case GlobalVar: + case GlobalVarWithVarInjectionChecks: { + addToGraph(Phantom, get(VirtualRegister(scope))); + SymbolTableEntry entry = globalObject->symbolTable()->get(uid); + VariableWatchpointSet* watchpointSet = entry.watchpointSet(); + JSValue specificValue = + watchpointSet ? watchpointSet->inferredValue() : JSValue(); + if (!specificValue) { + set(VirtualRegister(dst), addToGraph(GetGlobalVar, OpInfo(operand), OpInfo(prediction))); + break; + } + + addToGraph(VariableWatchpoint, OpInfo(watchpointSet)); + set(VirtualRegister(dst), inferredConstant(specificValue)); + break; + } + case ClosureVar: + case ClosureVarWithVarInjectionChecks: { + Node* scopeNode = get(VirtualRegister(scope)); + if (JSActivation* activation = m_graph.tryGetActivation(scopeNode)) { + SymbolTable* symbolTable = activation->symbolTable(); + ConcurrentJITLocker locker(symbolTable->m_lock); + SymbolTable::Map::iterator iter = symbolTable->find(locker, uid); + ASSERT(iter != symbolTable->end(locker)); + VariableWatchpointSet* watchpointSet = iter->value.watchpointSet(); + if (watchpointSet) { + if (JSValue value = watchpointSet->inferredValue()) { + addToGraph(Phantom, scopeNode); + addToGraph(VariableWatchpoint, OpInfo(watchpointSet)); + set(VirtualRegister(dst), inferredConstant(value)); + break; + } + } + } + set(VirtualRegister(dst), + addToGraph(GetClosureVar, OpInfo(operand), OpInfo(prediction), + addToGraph(GetClosureRegisters, scopeNode))); + break; + } + case Dynamic: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + NEXT_OPCODE(op_get_from_scope); + } + + case op_put_to_scope: { + unsigned scope = currentInstruction[1].u.operand; + unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; + unsigned value = currentInstruction[3].u.operand; + ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type(); + StringImpl* uid = m_graph.identifiers()[identifierNumber]; + + Structure* structure = 0; + VariableWatchpointSet* watchpoints = 0; + uintptr_t operand; + { + ConcurrentJITLocker locker(m_inlineStackTop->m_profiledBlock->m_lock); + if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) + watchpoints = currentInstruction[5].u.watchpointSet; + else + structure = currentInstruction[5].u.structure.get(); + operand = reinterpret_cast(currentInstruction[6].u.pointer); + } - if (phiNode.child1 == NoNode) { - phiNode.child1 = valueInPredecessor; - continue; + JSGlobalObject* globalObject = m_inlineStackTop->m_codeBlock->globalObject(); + + switch (resolveType) { + case GlobalProperty: + case GlobalPropertyWithVarInjectionChecks: { + PutByIdStatus status = PutByIdStatus::computeFor(*m_vm, globalObject, structure, uid, false); + if (status.numVariants() != 1 || status[0].kind() != PutByIdVariant::Replace) { + addToGraph(PutById, OpInfo(identifierNumber), get(VirtualRegister(scope)), get(VirtualRegister(value))); + break; + } + Node* base = cellConstantWithStructureCheck(globalObject, status[0].structure()); + addToGraph(Phantom, get(VirtualRegister(scope))); + handlePutByOffset(base, identifierNumber, static_cast(operand), get(VirtualRegister(value))); + // Keep scope alive until after put. + addToGraph(Phantom, get(VirtualRegister(scope))); + break; } - if (phiNode.child2 == NoNode) { - phiNode.child2 = valueInPredecessor; - continue; + case GlobalVar: + case GlobalVarWithVarInjectionChecks: { + SymbolTableEntry entry = globalObject->symbolTable()->get(uid); + ASSERT(watchpoints == entry.watchpointSet()); + Node* valueNode = get(VirtualRegister(value)); + addToGraph(PutGlobalVar, OpInfo(operand), valueNode); + if (watchpoints->state() != IsInvalidated) + addToGraph(NotifyWrite, OpInfo(watchpoints), valueNode); + // Keep scope alive until after put. + addToGraph(Phantom, get(VirtualRegister(scope))); + break; } - if (phiNode.child3 == NoNode) { - phiNode.child3 = valueInPredecessor; - continue; + case ClosureVar: + case ClosureVarWithVarInjectionChecks: { + Node* scopeNode = get(VirtualRegister(scope)); + Node* scopeRegisters = addToGraph(GetClosureRegisters, scopeNode); + addToGraph(PutClosureVar, OpInfo(operand), scopeNode, scopeRegisters, get(VirtualRegister(value))); + break; } + case Dynamic: + RELEASE_ASSERT_NOT_REACHED(); + break; + } + NEXT_OPCODE(op_put_to_scope); + } + + case op_loop_hint: { + // Baseline->DFG OSR jumps between loop hints. The DFG assumes that Baseline->DFG + // OSR can only happen at basic block boundaries. Assert that these two statements + // are compatible. + RELEASE_ASSERT(m_currentIndex == blockBegin); + + // We never do OSR into an inlined code block. That could not happen, since OSR + // looks up the code block that is the replacement for the baseline JIT code + // block. Hence, machine code block = true code block = not inline code block. + if (!m_inlineStackTop->m_caller) + m_currentBlock->isOSRTarget = true; + + addToGraph(LoopHint); + + if (m_vm->watchdog && m_vm->watchdog->isEnabled()) + addToGraph(CheckWatchdogTimer); + + NEXT_OPCODE(op_loop_hint); + } + + case op_init_lazy_reg: { + set(VirtualRegister(currentInstruction[1].u.operand), getJSConstantForValue(JSValue())); + ASSERT(operandIsLocal(currentInstruction[1].u.operand)); + m_graph.m_lazyVars.set(VirtualRegister(currentInstruction[1].u.operand).toLocal()); + NEXT_OPCODE(op_init_lazy_reg); + } + + case op_create_activation: { + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(CreateActivation, get(VirtualRegister(currentInstruction[1].u.operand)))); + NEXT_OPCODE(op_create_activation); + } + + case op_create_arguments: { + m_graph.m_hasArguments = true; + Node* createArguments = addToGraph(CreateArguments, get(VirtualRegister(currentInstruction[1].u.operand))); + set(VirtualRegister(currentInstruction[1].u.operand), createArguments); + set(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand)), createArguments); + NEXT_OPCODE(op_create_arguments); + } + + case op_tear_off_activation: { + addToGraph(TearOffActivation, get(VirtualRegister(currentInstruction[1].u.operand))); + NEXT_OPCODE(op_tear_off_activation); + } + + case op_tear_off_arguments: { + m_graph.m_hasArguments = true; + addToGraph(TearOffArguments, get(unmodifiedArgumentsRegister(VirtualRegister(currentInstruction[1].u.operand))), get(VirtualRegister(currentInstruction[2].u.operand))); + NEXT_OPCODE(op_tear_off_arguments); + } + + case op_get_arguments_length: { + m_graph.m_hasArguments = true; + set(VirtualRegister(currentInstruction[1].u.operand), addToGraph(GetMyArgumentsLengthSafe)); + NEXT_OPCODE(op_get_arguments_length); + } + + case op_get_argument_by_val: { + m_graph.m_hasArguments = true; + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph( + GetMyArgumentByValSafe, OpInfo(0), OpInfo(getPrediction()), + get(VirtualRegister(currentInstruction[3].u.operand)))); + NEXT_OPCODE(op_get_argument_by_val); + } + + case op_new_func: { + if (!currentInstruction[3].u.operand) { + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph(NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand))); + } else { + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph( + NewFunction, + OpInfo(currentInstruction[2].u.operand), + get(VirtualRegister(currentInstruction[1].u.operand)))); + } + NEXT_OPCODE(op_new_func); + } + + case op_new_captured_func: { + Node* function = addToGraph( + NewFunctionNoCheck, OpInfo(currentInstruction[2].u.operand)); + if (VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet) + addToGraph(NotifyWrite, OpInfo(set), function); + set(VirtualRegister(currentInstruction[1].u.operand), function); + NEXT_OPCODE(op_new_captured_func); + } + + case op_new_func_exp: { + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph(NewFunctionExpression, OpInfo(currentInstruction[2].u.operand))); + NEXT_OPCODE(op_new_func_exp); + } - NodeIndex newPhi = addToGraph(Phi); - Node& newPhiNode = m_graph[newPhi]; - if (phiNode.refCount()) - m_graph.ref(newPhi); + case op_typeof: { + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph(TypeOf, get(VirtualRegister(currentInstruction[2].u.operand)))); + NEXT_OPCODE(op_typeof); + } - newPhiNode.child1 = phiNode.child1; - newPhiNode.child2 = phiNode.child2; - newPhiNode.child3 = phiNode.child3; + case op_to_number: { + Node* node = get(VirtualRegister(currentInstruction[2].u.operand)); + addToGraph(Phantom, Edge(node, NumberUse)); + set(VirtualRegister(currentInstruction[1].u.operand), node); + NEXT_OPCODE(op_to_number); + } + + case op_in: { + set(VirtualRegister(currentInstruction[1].u.operand), + addToGraph(In, get(VirtualRegister(currentInstruction[2].u.operand)), get(VirtualRegister(currentInstruction[3].u.operand)))); + NEXT_OPCODE(op_in); + } - phiNode.child1 = newPhi; - phiNode.child1 = valueInPredecessor; - phiNode.child3 = NoNode; + default: + // Parse failed! This should not happen because the capabilities checker + // should have caught it. + RELEASE_ASSERT_NOT_REACHED(); + return false; } } } -void ByteCodeParser::setupPredecessors() +void ByteCodeParser::linkBlock(BasicBlock* block, Vector& possibleTargets) { - for (BlockIndex index = 0; index < m_graph.m_blocks.size(); ++index) { - BasicBlock* block = m_graph.m_blocks[index].get(); - ASSERT(block->end != NoNode); - Node& node = m_graph[block->end - 1]; - ASSERT(node.isTerminal()); + ASSERT(!block->isLinked); + ASSERT(!block->isEmpty()); + Node* node = block->last(); + ASSERT(node->isTerminal()); + + switch (node->op()) { + case Jump: + node->targetBlock() = blockForBytecodeOffset(possibleTargets, node->targetBytecodeOffsetDuringParsing()); + break; + + case Branch: { + BranchData* data = node->branchData(); + data->taken.block = blockForBytecodeOffset(possibleTargets, data->takenBytecodeIndex()); + data->notTaken.block = blockForBytecodeOffset(possibleTargets, data->notTakenBytecodeIndex()); + break; + } + + case Switch: { + SwitchData* data = node->switchData(); + for (unsigned i = node->switchData()->cases.size(); i--;) + data->cases[i].target.block = blockForBytecodeOffset(possibleTargets, data->cases[i].target.bytecodeIndex()); + data->fallThrough.block = blockForBytecodeOffset(possibleTargets, data->fallThrough.bytecodeIndex()); + break; + } + + default: + break; + } + +#if !ASSERT_DISABLED + block->isLinked = true; +#endif +} - if (node.isJump()) - m_graph.blockForBytecodeOffset(node.takenBytecodeOffset()).m_predecessors.append(index); - else if (node.isBranch()) { - m_graph.blockForBytecodeOffset(node.takenBytecodeOffset()).m_predecessors.append(index); - m_graph.blockForBytecodeOffset(node.notTakenBytecodeOffset()).m_predecessors.append(index); +void ByteCodeParser::linkBlocks(Vector& unlinkedBlocks, Vector& possibleTargets) +{ + for (size_t i = 0; i < unlinkedBlocks.size(); ++i) { + if (unlinkedBlocks[i].m_needsNormalLinking) { + linkBlock(unlinkedBlocks[i].m_block, possibleTargets); + unlinkedBlocks[i].m_needsNormalLinking = false; } } } -void ByteCodeParser::allocateVirtualRegisters() +void ByteCodeParser::buildOperandMapsIfNecessary() { - ScoreBoard scoreBoard(m_graph, m_preservedVars); - unsigned sizeExcludingPhiNodes = m_graph.m_blocks.last()->end; - for (size_t i = 0; i < sizeExcludingPhiNodes; ++i) { - Node& node = m_graph[i]; - if (!node.shouldGenerate()) - continue; + if (m_haveBuiltOperandMaps) + return; + + for (size_t i = 0; i < m_codeBlock->numberOfIdentifiers(); ++i) + m_identifierMap.add(m_codeBlock->identifier(i).impl(), i); + for (size_t i = 0; i < m_codeBlock->numberOfConstantRegisters(); ++i) { + JSValue value = m_codeBlock->getConstant(i + FirstConstantRegisterIndex); + if (!value) + m_emptyJSValueIndex = i + FirstConstantRegisterIndex; + else + m_jsValueMap.add(JSValue::encode(value), i + FirstConstantRegisterIndex); + } + + m_haveBuiltOperandMaps = true; +} - // GetLocal nodes are effectively phi nodes in the graph, referencing - // results from prior blocks. - if (node.op != GetLocal) { - // First, call use on all of the current node's children, then - // allocate a VirtualRegister for this node. We do so in this - // order so that if a child is on its last use, and a - // VirtualRegister is freed, then it may be reused for node. - scoreBoard.use(node.child1); - scoreBoard.use(node.child2); - scoreBoard.use(node.child3); +ByteCodeParser::InlineStackEntry::InlineStackEntry( + ByteCodeParser* byteCodeParser, + CodeBlock* codeBlock, + CodeBlock* profiledBlock, + BasicBlock* callsiteBlockHead, + JSFunction* callee, // Null if this is a closure call. + VirtualRegister returnValueVR, + VirtualRegister inlineCallFrameStart, + int argumentCountIncludingThis, + CodeSpecializationKind kind) + : m_byteCodeParser(byteCodeParser) + , m_codeBlock(codeBlock) + , m_profiledBlock(profiledBlock) + , m_callsiteBlockHead(callsiteBlockHead) + , m_returnValue(returnValueVR) + , m_didReturn(false) + , m_didEarlyReturn(false) + , m_caller(byteCodeParser->m_inlineStackTop) +{ + { + ConcurrentJITLocker locker(m_profiledBlock->m_lock); + m_lazyOperands.initialize(locker, m_profiledBlock->lazyOperandValueProfiles()); + m_exitProfile.initialize(locker, profiledBlock->exitProfile()); + + // We do this while holding the lock because we want to encourage StructureStubInfo's + // to be potentially added to operations and because the profiled block could be in the + // middle of LLInt->JIT tier-up in which case we would be adding the info's right now. + if (m_profiledBlock->hasBaselineJITProfiling()) { + m_profiledBlock->getStubInfoMap(locker, m_stubInfos); + m_profiledBlock->getCallLinkInfoMap(locker, m_callLinkInfos); + } + } + + m_argumentPositions.resize(argumentCountIncludingThis); + for (int i = 0; i < argumentCountIncludingThis; ++i) { + byteCodeParser->m_graph.m_argumentPositions.append(ArgumentPosition()); + ArgumentPosition* argumentPosition = &byteCodeParser->m_graph.m_argumentPositions.last(); + m_argumentPositions[i] = argumentPosition; + } + + // Track the code-block-global exit sites. + if (m_exitProfile.hasExitSite(ArgumentsEscaped)) { + byteCodeParser->m_graph.m_executablesWhoseArgumentsEscaped.add( + codeBlock->ownerExecutable()); + } + + if (m_caller) { + // Inline case. + ASSERT(codeBlock != byteCodeParser->m_codeBlock); + ASSERT(inlineCallFrameStart.isValid()); + ASSERT(callsiteBlockHead); + + m_inlineCallFrame = byteCodeParser->m_graph.m_plan.inlineCallFrames->add(); + initializeLazyWriteBarrierForInlineCallFrameExecutable( + byteCodeParser->m_graph.m_plan.writeBarriers, + m_inlineCallFrame->executable, + byteCodeParser->m_codeBlock, + m_inlineCallFrame, + byteCodeParser->m_codeBlock->ownerExecutable(), + codeBlock->ownerExecutable()); + m_inlineCallFrame->stackOffset = inlineCallFrameStart.offset() - JSStack::CallFrameHeaderSize; + if (callee) { + m_inlineCallFrame->calleeRecovery = ValueRecovery::constant(callee); + m_inlineCallFrame->isClosureCall = false; + } else + m_inlineCallFrame->isClosureCall = true; + m_inlineCallFrame->caller = byteCodeParser->currentCodeOrigin(); + m_inlineCallFrame->arguments.resize(argumentCountIncludingThis); // Set the number of arguments including this, but don't configure the value recoveries, yet. + m_inlineCallFrame->isCall = isCall(kind); + + if (m_inlineCallFrame->caller.inlineCallFrame) + m_inlineCallFrame->capturedVars = m_inlineCallFrame->caller.inlineCallFrame->capturedVars; + else { + for (int i = byteCodeParser->m_codeBlock->m_numVars; i--;) { + if (byteCodeParser->m_codeBlock->isCaptured(virtualRegisterForLocal(i))) + m_inlineCallFrame->capturedVars.set(i); + } } - if (!node.hasResult()) - continue; + for (int i = argumentCountIncludingThis; i--;) { + VirtualRegister argument = virtualRegisterForArgument(i); + if (codeBlock->isCaptured(argument)) + m_inlineCallFrame->capturedVars.set(VirtualRegister(argument.offset() + m_inlineCallFrame->stackOffset).toLocal()); + } + for (size_t i = codeBlock->m_numVars; i--;) { + VirtualRegister local = virtualRegisterForLocal(i); + if (codeBlock->isCaptured(local)) + m_inlineCallFrame->capturedVars.set(VirtualRegister(local.offset() + m_inlineCallFrame->stackOffset).toLocal()); + } - node.setVirtualRegister(scoreBoard.allocate()); - // 'mustGenerate' nodes have their useCount artificially elevated, - // call use now to account for this. - if (node.mustGenerate()) - scoreBoard.use(i); + byteCodeParser->buildOperandMapsIfNecessary(); + + m_identifierRemap.resize(codeBlock->numberOfIdentifiers()); + m_constantRemap.resize(codeBlock->numberOfConstantRegisters()); + m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers()); + m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables()); + + for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) { + StringImpl* rep = codeBlock->identifier(i).impl(); + BorrowedIdentifierMap::AddResult result = byteCodeParser->m_identifierMap.add(rep, byteCodeParser->m_graph.identifiers().numberOfIdentifiers()); + if (result.isNewEntry) + byteCodeParser->m_graph.identifiers().addLazily(rep); + m_identifierRemap[i] = result.iterator->value; + } + for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) { + JSValue value = codeBlock->getConstant(i + FirstConstantRegisterIndex); + if (!value) { + if (byteCodeParser->m_emptyJSValueIndex == UINT_MAX) { + byteCodeParser->m_emptyJSValueIndex = byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex; + byteCodeParser->addConstant(JSValue()); + byteCodeParser->m_constants.append(ConstantRecord()); + } + m_constantRemap[i] = byteCodeParser->m_emptyJSValueIndex; + continue; + } + JSValueMap::AddResult result = byteCodeParser->m_jsValueMap.add(JSValue::encode(value), byteCodeParser->m_codeBlock->numberOfConstantRegisters() + FirstConstantRegisterIndex); + if (result.isNewEntry) { + byteCodeParser->addConstant(value); + byteCodeParser->m_constants.append(ConstantRecord()); + } + m_constantRemap[i] = result.iterator->value; + } + for (unsigned i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) { + // If we inline the same code block multiple times, we don't want to needlessly + // duplicate its constant buffers. + HashMap::iterator iter = + byteCodeParser->m_constantBufferCache.find(ConstantBufferKey(codeBlock, i)); + if (iter != byteCodeParser->m_constantBufferCache.end()) { + m_constantBufferRemap[i] = iter->value; + continue; + } + Vector& buffer = codeBlock->constantBufferAsVector(i); + unsigned newIndex = byteCodeParser->m_codeBlock->addConstantBuffer(buffer); + m_constantBufferRemap[i] = newIndex; + byteCodeParser->m_constantBufferCache.add(ConstantBufferKey(codeBlock, i), newIndex); + } + for (unsigned i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) { + m_switchRemap[i] = byteCodeParser->m_codeBlock->numberOfSwitchJumpTables(); + byteCodeParser->m_codeBlock->addSwitchJumpTable() = codeBlock->switchJumpTable(i); + } + m_callsiteBlockHeadNeedsLinking = true; + } else { + // Machine code block case. + ASSERT(codeBlock == byteCodeParser->m_codeBlock); + ASSERT(!callee); + ASSERT(!returnValueVR.isValid()); + ASSERT(!inlineCallFrameStart.isValid()); + ASSERT(!callsiteBlockHead); + + m_inlineCallFrame = 0; + + m_identifierRemap.resize(codeBlock->numberOfIdentifiers()); + m_constantRemap.resize(codeBlock->numberOfConstantRegisters()); + m_constantBufferRemap.resize(codeBlock->numberOfConstantBuffers()); + m_switchRemap.resize(codeBlock->numberOfSwitchJumpTables()); + for (size_t i = 0; i < codeBlock->numberOfIdentifiers(); ++i) + m_identifierRemap[i] = i; + for (size_t i = 0; i < codeBlock->numberOfConstantRegisters(); ++i) + m_constantRemap[i] = i + FirstConstantRegisterIndex; + for (size_t i = 0; i < codeBlock->numberOfConstantBuffers(); ++i) + m_constantBufferRemap[i] = i; + for (size_t i = 0; i < codeBlock->numberOfSwitchJumpTables(); ++i) + m_switchRemap[i] = i; + m_callsiteBlockHeadNeedsLinking = false; } - - // 'm_numCalleeRegisters' is the number of locals and temporaries allocated - // for the function (and checked for on entry). Since we perform a new and - // different allocation of temporaries, more registers may now be required. - unsigned calleeRegisters = scoreBoard.allocatedCount() + m_preservedVars; - if ((unsigned)m_codeBlock->m_numCalleeRegisters < calleeRegisters) - m_codeBlock->m_numCalleeRegisters = calleeRegisters; + + for (size_t i = 0; i < m_constantRemap.size(); ++i) + ASSERT(m_constantRemap[i] >= static_cast(FirstConstantRegisterIndex)); + + byteCodeParser->m_inlineStackTop = this; } -bool ByteCodeParser::parse() +void ByteCodeParser::parseCodeBlock() { - // Set during construction. - ASSERT(!m_currentIndex); - - for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= m_codeBlock->numberOfJumpTargets(); ++jumpTargetIndex) { + CodeBlock* codeBlock = m_inlineStackTop->m_codeBlock; + + if (m_graph.compilation()) { + m_graph.compilation()->addProfiledBytecodes( + *m_vm->m_perBytecodeProfiler, m_inlineStackTop->m_profiledBlock); + } + + bool shouldDumpBytecode = Options::dumpBytecodeAtDFGTime(); + if (shouldDumpBytecode) { + dataLog("Parsing ", *codeBlock); + if (inlineCallFrame()) { + dataLog( + " for inlining at ", CodeBlockWithJITType(m_codeBlock, JITCode::DFGJIT), + " ", inlineCallFrame()->caller); + } + dataLog( + ": captureCount = ", codeBlock->symbolTable() ? codeBlock->symbolTable()->captureCount() : 0, + ", needsActivation = ", codeBlock->needsActivation(), + ", isStrictMode = ", codeBlock->ownerExecutable()->isStrictMode(), "\n"); + codeBlock->baselineVersion()->dumpBytecode(); + } + + Vector jumpTargets; + computePreciseJumpTargets(codeBlock, jumpTargets); + if (Options::dumpBytecodeAtDFGTime()) { + dataLog("Jump targets: "); + CommaPrinter comma; + for (unsigned i = 0; i < jumpTargets.size(); ++i) + dataLog(comma, jumpTargets[i]); + dataLog("\n"); + } + + for (unsigned jumpTargetIndex = 0; jumpTargetIndex <= jumpTargets.size(); ++jumpTargetIndex) { // The maximum bytecode offset to go into the current basicblock is either the next jump target, or the end of the instructions. - unsigned limit = jumpTargetIndex < m_codeBlock->numberOfJumpTargets() ? m_codeBlock->jumpTarget(jumpTargetIndex) : m_codeBlock->instructions().size(); + unsigned limit = jumpTargetIndex < jumpTargets.size() ? jumpTargets[jumpTargetIndex] : codeBlock->instructions().size(); ASSERT(m_currentIndex < limit); // Loop until we reach the current limit (i.e. next jump target). do { - OwnPtr block = adoptPtr(new BasicBlock(m_currentIndex, m_graph.size(), m_numArguments, m_numLocals)); - m_currentBlock = block.get(); - m_graph.m_blocks.append(block.release()); + if (!m_currentBlock) { + // Check if we can use the last block. + if (m_graph.numBlocks() && m_graph.lastBlock()->isEmpty()) { + // This must be a block belonging to us. + ASSERT(m_inlineStackTop->m_unlinkedBlocks.last().m_block == m_graph.lastBlock()); + // Either the block is linkable or it isn't. If it's linkable then it's the last + // block in the blockLinkingTargets list. If it's not then the last block will + // have a lower bytecode index that the one we're about to give to this block. + if (m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin != m_currentIndex) { + // Make the block linkable. + ASSERT(m_inlineStackTop->m_blockLinkingTargets.isEmpty() || m_inlineStackTop->m_blockLinkingTargets.last()->bytecodeBegin < m_currentIndex); + m_inlineStackTop->m_blockLinkingTargets.append(m_graph.lastBlock()); + } + // Change its bytecode begin and continue. + m_currentBlock = m_graph.lastBlock(); + m_currentBlock->bytecodeBegin = m_currentIndex; + } else { + RefPtr block = adoptRef(new BasicBlock(m_currentIndex, m_numArguments, m_numLocals, PNaN)); + m_currentBlock = block.get(); + // This assertion checks two things: + // 1) If the bytecodeBegin is greater than currentIndex, then something has gone + // horribly wrong. So, we're probably generating incorrect code. + // 2) If the bytecodeBegin is equal to the currentIndex, then we failed to do + // a peephole coalescing of this block in the if statement above. So, we're + // generating suboptimal code and leaving more work for the CFG simplifier. + ASSERT(m_inlineStackTop->m_unlinkedBlocks.isEmpty() || m_inlineStackTop->m_unlinkedBlocks.last().m_block->bytecodeBegin < m_currentIndex); + m_inlineStackTop->m_unlinkedBlocks.append(UnlinkedBlock(block.get())); + m_inlineStackTop->m_blockLinkingTargets.append(block.get()); + // The first block is definitely an OSR target. + if (!m_graph.numBlocks()) + block->isOSRTarget = true; + m_graph.appendBlock(block); + prepareToParseBlock(); + } + } + + bool shouldContinueParsing = parseBlock(limit); - if (!parseBlock(limit)) - return false; // We should not have gone beyond the limit. ASSERT(m_currentIndex <= limit); - - m_currentBlock->end = m_graph.size(); + + // We should have planted a terminal, or we just gave up because + // we realized that the jump target information is imprecise, or we + // are at the end of an inline function, or we realized that we + // should stop parsing because there was a return in the first + // basic block. + ASSERT(m_currentBlock->isEmpty() || m_currentBlock->last()->isTerminal() || (m_currentIndex == codeBlock->instructions().size() && inlineCallFrame()) || !shouldContinueParsing); + + if (!shouldContinueParsing) + return; + + m_currentBlock = 0; } while (m_currentIndex < limit); } // Should have reached the end of the instructions. - ASSERT(m_currentIndex == m_codeBlock->instructions().size()); - - setupPredecessors(); - processPhiStack(); - processPhiStack(); - - allocateVirtualRegisters(); + ASSERT(m_currentIndex == codeBlock->instructions().size()); +} -#if DFG_DEBUG_VERBOSE - m_graph.dump(m_codeBlock); -#endif +bool ByteCodeParser::parse() +{ + // Set during construction. + ASSERT(!m_currentIndex); + + if (Options::verboseDFGByteCodeParsing()) + dataLog("Parsing ", *m_codeBlock, "\n"); + + m_dfgCodeBlock = m_graph.m_plan.profiledDFGCodeBlock.get(); + if (isFTL(m_graph.m_plan.mode) && m_dfgCodeBlock + && Options::enablePolyvariantDevirtualization()) { + if (Options::enablePolyvariantCallInlining()) + CallLinkStatus::computeDFGStatuses(m_dfgCodeBlock, m_callContextMap); + if (Options::enablePolyvariantByIdInlining()) + m_dfgCodeBlock->getStubInfoMap(m_dfgStubInfos); + } + + if (m_codeBlock->captureCount()) { + SymbolTable* symbolTable = m_codeBlock->symbolTable(); + ConcurrentJITLocker locker(symbolTable->m_lock); + SymbolTable::Map::iterator iter = symbolTable->begin(locker); + SymbolTable::Map::iterator end = symbolTable->end(locker); + for (; iter != end; ++iter) { + VariableWatchpointSet* set = iter->value.watchpointSet(); + if (!set) + continue; + size_t index = static_cast(VirtualRegister(iter->value.getIndex()).toLocal()); + while (m_localWatchpoints.size() <= index) + m_localWatchpoints.append(nullptr); + m_localWatchpoints[index] = set; + } + } + + InlineStackEntry inlineStackEntry( + this, m_codeBlock, m_profiledBlock, 0, 0, VirtualRegister(), VirtualRegister(), + m_codeBlock->numParameters(), CodeForCall); + + parseCodeBlock(); + + linkBlocks(inlineStackEntry.m_unlinkedBlocks, inlineStackEntry.m_blockLinkingTargets); + m_graph.determineReachability(); + m_graph.killUnreachableBlocks(); + + for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { + BasicBlock* block = m_graph.block(blockIndex); + if (!block) + continue; + ASSERT(block->variablesAtHead.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals()); + ASSERT(block->variablesAtHead.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments()); + ASSERT(block->variablesAtTail.numberOfLocals() == m_graph.block(0)->variablesAtHead.numberOfLocals()); + ASSERT(block->variablesAtTail.numberOfArguments() == m_graph.block(0)->variablesAtHead.numberOfArguments()); + } + + m_graph.m_localVars = m_numLocals; + m_graph.m_parameterSlots = m_parameterSlots; return true; } -bool parse(Graph& graph, JSGlobalData* globalData, CodeBlock* codeBlock) +bool parse(Graph& graph) { -#if DFG_DEBUG_LOCAL_DISBALE - UNUSED_PARAM(graph); - UNUSED_PARAM(globalData); - UNUSED_PARAM(codeBlock); - return false; -#else - return ByteCodeParser(globalData, codeBlock, graph).parse(); -#endif + SamplingRegion samplingRegion("DFG Parsing"); + return ByteCodeParser(graph).parse(); } } } // namespace JSC::DFG