+SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
+ : m_compileOkay(true)
+ , m_jit(jit)
+ , m_currentNode(0)
+ , m_indexInBlock(0)
+ , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
+ , m_blockHeads(jit.graph().m_blocks.size())
+ , m_arguments(jit.codeBlock()->numParameters())
+ , m_variables(jit.graph().m_localVars)
+ , m_lastSetOperand(std::numeric_limits<int>::max())
+ , m_state(m_jit.graph())
+ , m_stream(&jit.codeBlock()->variableEventStream())
+ , m_minifiedGraph(&jit.codeBlock()->minifiedDFG())
+ , m_isCheckingArgumentTypes(false)
+{
+}
+
+SpeculativeJIT::~SpeculativeJIT()
+{
+}
+
+void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
+{
+ ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
+
+ GPRTemporary scratch(this);
+ GPRTemporary scratch2(this);
+ GPRReg scratchGPR = scratch.gpr();
+ GPRReg scratch2GPR = scratch2.gpr();
+
+ unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
+
+ JITCompiler::JumpList slowCases;
+
+ slowCases.append(
+ emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
+ m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
+ emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
+
+ m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
+ m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
+
+ if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
+#if USE(JSVALUE64)
+ m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
+ for (unsigned i = numElements; i < vectorLength; ++i)
+ m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
+#else
+ EncodedValueDescriptor value;
+ value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
+ for (unsigned i = numElements; i < vectorLength; ++i) {
+ m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ }
+#endif
+ }
+
+ // I want a slow path that also loads out the storage pointer, and that's
+ // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
+ // of work for a very small piece of functionality. :-/
+ addSlowPathGenerator(adoptPtr(
+ new CallArrayAllocatorSlowPathGenerator(
+ slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
+ structure, numElements)));
+}
+
+void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
+{
+ if (!m_compileOkay)
+ return;
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ m_jit.appendExitInfo(jumpToFail);
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
+}
+
+void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
+{
+ if (!m_compileOkay)
+ return;
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ m_jit.appendExitInfo(jumpsToFail);
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
+}
+
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
+{
+ if (!m_compileOkay)
+ return;
+ backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
+ if (m_speculationDirection == ForwardSpeculation)
+ convertLastOSRExitToForward();
+}
+
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
+{
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
+}
+
+OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
+{
+ if (!m_compileOkay)
+ return OSRExitJumpPlaceholder();
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ unsigned index = m_jit.codeBlock()->numberOfOSRExits();
+ m_jit.appendExitInfo();
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
+ return OSRExitJumpPlaceholder(index);
+}
+
+OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
+{
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ return backwardSpeculationCheck(kind, jsValueSource, nodeUse.node());
+}
+
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
+{
+ if (!m_compileOkay)
+ return;
+ backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
+ if (m_speculationDirection == ForwardSpeculation)
+ convertLastOSRExitToForward();
+}
+
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
+{
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
+}
+
+void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
+{
+ if (!m_compileOkay)
+ return;
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ m_jit.codeBlock()->appendSpeculationRecovery(recovery);
+ m_jit.appendExitInfo(jumpToFail);
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), m_jit.codeBlock()->numberOfSpeculationRecoveries()));
+}
+
+void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
+{
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ backwardSpeculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
+}
+
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
+{
+ if (!m_compileOkay)
+ return;
+ backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail, recovery);
+ if (m_speculationDirection == ForwardSpeculation)
+ convertLastOSRExitToForward();
+}
+
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
+{
+ speculationCheck(kind, jsValueSource, edge.node(), jumpToFail, recovery);
+}
+
+JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind, JSValueSource jsValueSource, Node* node)
+{
+ if (!m_compileOkay)
+ return 0;
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ m_jit.appendExitInfo(JITCompiler::JumpList());
+ OSRExit& exit = m_jit.codeBlock()->osrExit(
+ m_jit.codeBlock()->appendOSRExit(OSRExit(
+ kind, jsValueSource,
+ m_jit.graph().methodOfGettingAValueProfileFor(node),
+ this, m_stream->size())));
+ exit.m_watchpointIndex = m_jit.codeBlock()->appendWatchpoint(
+ JumpReplacementWatchpoint(m_jit.watchpointLabel()));
+ if (m_speculationDirection == ForwardSpeculation)
+ convertLastOSRExitToForward();
+ return &m_jit.codeBlock()->watchpoint(exit.m_watchpointIndex);
+}
+
+JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind)
+{
+ return speculationWatchpoint(kind, JSValueSource(), 0);
+}
+
+void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
+{
+ if (!valueRecovery) {
+ // Check that either the current node is a SetLocal, or the preceding node was a
+ // SetLocal with the same code origin.
+ if (!m_currentNode->containsMovHint()) {
+ Node* setLocal = m_jit.graph().m_blocks[m_block]->at(m_indexInBlock - 1);
+ ASSERT_UNUSED(setLocal, setLocal->containsMovHint());
+ ASSERT_UNUSED(setLocal, setLocal->codeOrigin == m_currentNode->codeOrigin);
+ }
+
+ // Find the next node.
+ unsigned indexInBlock = m_indexInBlock + 1;
+ Node* node = 0;
+ for (;;) {
+ if (indexInBlock == m_jit.graph().m_blocks[m_block]->size()) {
+ // This is an inline return. Give up and do a backwards speculation. This is safe
+ // because an inline return has its own bytecode index and it's always safe to
+ // reexecute that bytecode.
+ ASSERT(node->op() == Jump);
+ return;
+ }
+ node = m_jit.graph().m_blocks[m_block]->at(indexInBlock);
+ if (node->codeOrigin != m_currentNode->codeOrigin)
+ break;
+ indexInBlock++;
+ }
+
+ ASSERT(node->codeOrigin != m_currentNode->codeOrigin);
+ OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
+ exit.m_codeOrigin = node->codeOrigin;
+ return;
+ }
+
+ unsigned setLocalIndexInBlock = m_indexInBlock + 1;
+
+ Node* setLocal = m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock);
+ bool hadInt32ToDouble = false;
+
+ if (setLocal->op() == ForwardInt32ToDouble) {
+ setLocal = m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock);
+ hadInt32ToDouble = true;
+ }
+ if (setLocal->op() == Flush || setLocal->op() == Phantom)
+ setLocal = m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock);
+
+ if (hadInt32ToDouble)
+ ASSERT(setLocal->child1()->child1() == m_currentNode);
+ else
+ ASSERT(setLocal->child1() == m_currentNode);
+ ASSERT(setLocal->containsMovHint());
+ ASSERT(setLocal->codeOrigin == m_currentNode->codeOrigin);
+
+ Node* nextNode = m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 1);
+ if (nextNode->op() == Jump && nextNode->codeOrigin == m_currentNode->codeOrigin) {
+ // We're at an inlined return. Use a backward speculation instead.
+ return;
+ }
+ ASSERT(nextNode->codeOrigin != m_currentNode->codeOrigin);
+
+ OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
+ exit.m_codeOrigin = nextNode->codeOrigin;
+
+ exit.m_lastSetOperand = setLocal->local();
+ exit.m_valueRecoveryOverride = adoptRef(
+ new ValueRecoveryOverride(setLocal->local(), valueRecovery));
+}
+
+void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
+{
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
+ convertLastOSRExitToForward(valueRecovery);
+}
+
+void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
+{
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
+ convertLastOSRExitToForward(valueRecovery);
+}
+
+void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
+{
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("SpeculativeJIT was terminated.\n");
+#endif
+ if (!m_compileOkay)
+ return;
+ speculationCheck(kind, jsValueRegs, node, m_jit.jump());
+ m_compileOkay = false;
+}
+
+void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
+{
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
+}
+
+void SpeculativeJIT::backwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
+{
+ ASSERT(needsTypeCheck(edge, typesPassedThrough));
+ m_state.forNode(edge).filter(typesPassedThrough);
+ backwardSpeculationCheck(BadType, source, edge.node(), jumpToFail);
+}
+
+void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
+{
+ backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
+ if (m_speculationDirection == ForwardSpeculation)
+ convertLastOSRExitToForward();
+}
+
+void SpeculativeJIT::forwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
+{
+ backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
+ convertLastOSRExitToForward(valueRecovery);
+}
+
+void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
+{
+ m_slowPathGenerators.append(slowPathGenerator);
+}
+
+void SpeculativeJIT::runSlowPathGenerators()
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("Running %lu slow path generators.\n", m_slowPathGenerators.size());
+#endif
+ for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
+ m_slowPathGenerators[i]->generate(this);
+}
+
+// On Windows we need to wrap fmod; on other platforms we can call it directly.
+// On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
+#if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
+static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
+{
+ return fmod(x, y);
+}
+#else
+#define fmodAsDFGOperation fmod
+#endif
+
+void SpeculativeJIT::clearGenerationInfo()
+{
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i)
+ m_generationInfo[i] = GenerationInfo();
+ m_gprs = RegisterBank<GPRInfo>();
+ m_fprs = RegisterBank<FPRInfo>();
+}
+
+SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
+{
+ GenerationInfo& info = m_generationInfo[spillMe];
+ Node* node = info.node();
+ DataFormat registerFormat = info.registerFormat();
+ ASSERT(registerFormat != DataFormatNone);
+ ASSERT(registerFormat != DataFormatDouble);
+
+ SilentSpillAction spillAction;
+ SilentFillAction fillAction;
+
+ if (!info.needsSpill())
+ spillAction = DoNothingForSpill;
+ else {
+#if USE(JSVALUE64)
+ ASSERT(info.gpr() == source);
+ if (registerFormat == DataFormatInteger)
+ spillAction = Store32Payload;
+ else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
+ spillAction = StorePtr;
+ else {
+ ASSERT(registerFormat & DataFormatJS);
+ spillAction = Store64;
+ }
+#elif USE(JSVALUE32_64)
+ if (registerFormat & DataFormatJS) {
+ ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
+ spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
+ } else {
+ ASSERT(info.gpr() == source);
+ spillAction = Store32Payload;
+ }
+#endif
+ }
+
+ if (registerFormat == DataFormatInteger) {
+ ASSERT(info.gpr() == source);
+ ASSERT(isJSInteger(info.registerFormat()));
+ if (node->hasConstant()) {
+ ASSERT(isInt32Constant(node));
+ fillAction = SetInt32Constant;
+ } else
+ fillAction = Load32Payload;
+ } else if (registerFormat == DataFormatBoolean) {
+#if USE(JSVALUE64)
+ RELEASE_ASSERT_NOT_REACHED();
+ fillAction = DoNothingForFill;
+#elif USE(JSVALUE32_64)
+ ASSERT(info.gpr() == source);
+ if (node->hasConstant()) {
+ ASSERT(isBooleanConstant(node));
+ fillAction = SetBooleanConstant;
+ } else
+ fillAction = Load32Payload;
+#endif
+ } else if (registerFormat == DataFormatCell) {
+ ASSERT(info.gpr() == source);
+ if (node->hasConstant()) {
+ JSValue value = valueOfJSConstant(node);
+ ASSERT_UNUSED(value, value.isCell());
+ fillAction = SetCellConstant;
+ } else {
+#if USE(JSVALUE64)
+ fillAction = LoadPtr;
+#else
+ fillAction = Load32Payload;
+#endif
+ }
+ } else if (registerFormat == DataFormatStorage) {
+ ASSERT(info.gpr() == source);
+ fillAction = LoadPtr;
+ } else {
+ ASSERT(registerFormat & DataFormatJS);
+#if USE(JSVALUE64)
+ ASSERT(info.gpr() == source);
+ if (node->hasConstant()) {
+ if (valueOfJSConstant(node).isCell())
+ fillAction = SetTrustedJSConstant;
+ else
+ fillAction = SetJSConstant;
+ } else if (info.spillFormat() == DataFormatInteger) {
+ ASSERT(registerFormat == DataFormatJSInteger);
+ fillAction = Load32PayloadBoxInt;
+ } else if (info.spillFormat() == DataFormatDouble) {
+ ASSERT(registerFormat == DataFormatJSDouble);
+ fillAction = LoadDoubleBoxDouble;
+ } else
+ fillAction = Load64;
+#else
+ ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
+ if (node->hasConstant())
+ fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
+ else if (info.payloadGPR() == source)
+ fillAction = Load32Payload;
+ else { // Fill the Tag
+ switch (info.spillFormat()) {
+ case DataFormatInteger:
+ ASSERT(registerFormat == DataFormatJSInteger);
+ fillAction = SetInt32Tag;
+ break;
+ case DataFormatCell:
+ ASSERT(registerFormat == DataFormatJSCell);
+ fillAction = SetCellTag;
+ break;
+ case DataFormatBoolean:
+ ASSERT(registerFormat == DataFormatJSBoolean);
+ fillAction = SetBooleanTag;
+ break;
+ default:
+ fillAction = Load32Tag;
+ break;
+ }
+ }
+#endif
+ }
+
+ return SilentRegisterSavePlan(spillAction, fillAction, node, source);
+}
+
+SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
+{
+ GenerationInfo& info = m_generationInfo[spillMe];
+ Node* node = info.node();
+ ASSERT(info.registerFormat() == DataFormatDouble);
+
+ SilentSpillAction spillAction;
+ SilentFillAction fillAction;
+
+ if (!info.needsSpill())
+ spillAction = DoNothingForSpill;
+ else {
+ ASSERT(!node->hasConstant());
+ ASSERT(info.spillFormat() == DataFormatNone);
+ ASSERT(info.fpr() == source);
+ spillAction = StoreDouble;
+ }
+
+#if USE(JSVALUE64)
+ if (node->hasConstant()) {
+ ASSERT(isNumberConstant(node));
+ fillAction = SetDoubleConstant;
+ } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
+ // it was already spilled previously and not as a double, which means we need unboxing.
+ ASSERT(info.spillFormat() & DataFormatJS);
+ fillAction = LoadJSUnboxDouble;
+ } else
+ fillAction = LoadDouble;
+#elif USE(JSVALUE32_64)
+ ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
+ if (node->hasConstant()) {
+ ASSERT(isNumberConstant(node));
+ fillAction = SetDoubleConstant;
+ } else
+ fillAction = LoadDouble;
+#endif
+
+ return SilentRegisterSavePlan(spillAction, fillAction, node, source);
+}
+
+void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
+{
+ switch (plan.spillAction()) {
+ case DoNothingForSpill:
+ break;
+ case Store32Tag:
+ m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
+ break;
+ case Store32Payload:
+ m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
+ break;
+ case StorePtr:
+ m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
+ break;
+#if USE(JSVALUE64)
+ case Store64:
+ m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
+ break;
+#endif
+ case StoreDouble:
+ m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
+void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
+{
+#if USE(JSVALUE32_64)
+ UNUSED_PARAM(canTrample);
+#endif
+ switch (plan.fillAction()) {
+ case DoNothingForFill:
+ break;
+ case SetInt32Constant:
+ m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
+ break;
+ case SetBooleanConstant:
+ m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
+ break;
+ case SetCellConstant:
+ m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
+ break;
+#if USE(JSVALUE64)
+ case SetTrustedJSConstant:
+ m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
+ break;
+ case SetJSConstant:
+ m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
+ break;
+ case SetDoubleConstant:
+ m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
+ m_jit.move64ToDouble(canTrample, plan.fpr());
+ break;
+ case Load32PayloadBoxInt:
+ m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
+ break;
+ case LoadDoubleBoxDouble:
+ m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
+ break;
+ case LoadJSUnboxDouble:
+ m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
+ unboxDouble(canTrample, plan.fpr());
+ break;
+#else
+ case SetJSConstantTag:
+ m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
+ break;
+ case SetJSConstantPayload:
+ m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
+ break;
+ case SetInt32Tag:
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
+ break;
+ case SetCellTag:
+ m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
+ break;
+ case SetBooleanTag:
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
+ break;
+ case SetDoubleConstant:
+ m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
+ break;
+#endif
+ case Load32Tag:
+ m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
+ break;
+ case Load32Payload:
+ m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
+ break;
+ case LoadPtr:
+ m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
+ break;
+#if USE(JSVALUE64)
+ case Load64:
+ m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
+ break;
+#endif
+ case LoadDouble:
+ m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
+const TypedArrayDescriptor* SpeculativeJIT::typedArrayDescriptor(ArrayMode arrayMode)
+{
+ switch (arrayMode.type()) {
+ case Array::Int8Array:
+ return &m_jit.vm()->int8ArrayDescriptor();
+ case Array::Int16Array:
+ return &m_jit.vm()->int16ArrayDescriptor();
+ case Array::Int32Array:
+ return &m_jit.vm()->int32ArrayDescriptor();
+ case Array::Uint8Array:
+ return &m_jit.vm()->uint8ArrayDescriptor();
+ case Array::Uint8ClampedArray:
+ return &m_jit.vm()->uint8ClampedArrayDescriptor();
+ case Array::Uint16Array:
+ return &m_jit.vm()->uint16ArrayDescriptor();
+ case Array::Uint32Array:
+ return &m_jit.vm()->uint32ArrayDescriptor();
+ case Array::Float32Array:
+ return &m_jit.vm()->float32ArrayDescriptor();
+ case Array::Float64Array:
+ return &m_jit.vm()->float64ArrayDescriptor();
+ default:
+ return 0;
+ }
+}
+
+JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
+{
+ switch (arrayMode.arrayClass()) {
+ case Array::OriginalArray: {
+ CRASH();
+ JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
+ return result;
+ }
+
+ case Array::Array:
+ m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
+ return m_jit.branch32(
+ MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
+
+ default:
+ m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
+ return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
+ }
+}
+
+JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
+{
+ JITCompiler::JumpList result;
+
+ switch (arrayMode.type()) {
+ case Array::Int32:
+ return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
+
+ case Array::Double:
+ return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
+
+ case Array::Contiguous:
+ return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
+
+ case Array::ArrayStorage:
+ case Array::SlowPutArrayStorage: {
+ ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
+
+ if (arrayMode.isJSArray()) {
+ if (arrayMode.isSlowPut()) {
+ result.append(
+ m_jit.branchTest32(
+ MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
+ m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
+ m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
+ result.append(
+ m_jit.branch32(
+ MacroAssembler::Above, tempGPR,
+ TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
+ break;
+ }
+ m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
+ result.append(
+ m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
+ break;
+ }
+ m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
+ if (arrayMode.isSlowPut()) {
+ m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
+ result.append(
+ m_jit.branch32(
+ MacroAssembler::Above, tempGPR,
+ TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
+ break;
+ }
+ result.append(
+ m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
+ break;
+ }
+ default:
+ CRASH();
+ break;
+ }
+
+ return result;
+}
+
+void SpeculativeJIT::checkArray(Node* node)
+{
+ ASSERT(node->arrayMode().isSpecific());
+ ASSERT(!node->arrayMode().doesConversion());
+
+ SpeculateCellOperand base(this, node->child1());
+ GPRReg baseReg = base.gpr();
+
+ const TypedArrayDescriptor* result = typedArrayDescriptor(node->arrayMode());
+
+ if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
+ noResult(m_currentNode);
+ return;
+ }
+
+ const ClassInfo* expectedClassInfo = 0;
+
+ switch (node->arrayMode().type()) {
+ case Array::String:
+ expectedClassInfo = &JSString::s_info;
+ break;
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous:
+ case Array::ArrayStorage:
+ case Array::SlowPutArrayStorage: {
+ GPRTemporary temp(this);
+ GPRReg tempGPR = temp.gpr();
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
+ m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
+ speculationCheck(
+ BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
+ jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
+
+ noResult(m_currentNode);
+ return;
+ }
+ case Array::Arguments:
+ expectedClassInfo = &Arguments::s_info;
+ break;
+ case Array::Int8Array:
+ case Array::Int16Array:
+ case Array::Int32Array:
+ case Array::Uint8Array:
+ case Array::Uint8ClampedArray:
+ case Array::Uint16Array:
+ case Array::Uint32Array:
+ case Array::Float32Array:
+ case Array::Float64Array:
+ expectedClassInfo = result->m_classInfo;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+
+ GPRTemporary temp(this);
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
+ speculationCheck(
+ Uncountable, JSValueRegs(), 0,
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(expectedClassInfo)));
+
+ noResult(m_currentNode);
+}
+
+void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
+{
+ ASSERT(node->arrayMode().doesConversion());
+
+ GPRTemporary temp(this);
+ GPRTemporary structure;
+ GPRReg tempGPR = temp.gpr();
+ GPRReg structureGPR = InvalidGPRReg;
+
+ if (node->op() != ArrayifyToStructure) {
+ GPRTemporary realStructure(this);
+ structure.adopt(realStructure);
+ structureGPR = structure.gpr();
+ }
+
+ // We can skip all that comes next if we already have array storage.
+ MacroAssembler::JumpList slowPath;
+
+ if (node->op() == ArrayifyToStructure) {
+ slowPath.append(m_jit.branchWeakPtr(
+ JITCompiler::NotEqual,
+ JITCompiler::Address(baseReg, JSCell::structureOffset()),
+ node->structure()));
+ } else {
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
+
+ m_jit.load8(
+ MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
+
+ slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
+ }
+
+ addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
+ slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
+
+ noResult(m_currentNode);
+}
+
+void SpeculativeJIT::arrayify(Node* node)
+{
+ ASSERT(node->arrayMode().isSpecific());
+
+ SpeculateCellOperand base(this, node->child1());
+
+ if (!node->child2()) {
+ arrayify(node, base.gpr(), InvalidGPRReg);
+ return;
+ }
+
+ SpeculateIntegerOperand property(this, node->child2());
+
+ arrayify(node, base.gpr(), property.gpr());
+}
+
+GPRReg SpeculativeJIT::fillStorage(Edge edge)
+{
+ VirtualRegister virtualRegister = edge->virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+ if (info.spillFormat() == DataFormatStorage) {
+ GPRReg gpr = allocate();
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillStorage(*m_stream, gpr);
+ return gpr;
+ }
+
+ // Must be a cell; fill it as a cell and then return the pointer.
+ return fillSpeculateCell(edge);
+ }
+
+ case DataFormatStorage: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ return gpr;
+ }
+
+ default:
+ return fillSpeculateCell(edge);
+ }
+}
+
+void SpeculativeJIT::useChildren(Node* node)
+{
+ if (node->flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
+ if (!!m_jit.graph().m_varArgChildren[childIdx])
+ use(m_jit.graph().m_varArgChildren[childIdx]);
+ }
+ } else {
+ Edge child1 = node->child1();
+ if (!child1) {
+ ASSERT(!node->child2() && !node->child3());
+ return;
+ }
+ use(child1);
+
+ Edge child2 = node->child2();
+ if (!child2) {
+ ASSERT(!node->child3());
+ return;
+ }
+ use(child2);
+
+ Edge child3 = node->child3();
+ if (!child3)
+ return;
+ use(child3);
+ }
+}
+
+void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
+{
+ UNUSED_PARAM(jit);
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+ ASSERT(owner != scratch1);
+ ASSERT(owner != scratch2);
+ ASSERT(scratch1 != scratch2);
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+}
+
+void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
+{
+ UNUSED_PARAM(ownerGPR);
+ UNUSED_PARAM(valueGPR);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+
+ if (isKnownNotCell(valueUse.node()))
+ return;
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+}
+
+void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
+{
+ UNUSED_PARAM(ownerGPR);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+
+ if (Heap::isMarked(value))
+ return;
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+}
+
+void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
+{
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(valueGPR);
+ UNUSED_PARAM(scratch);
+ UNUSED_PARAM(useKind);
+
+ if (isKnownNotCell(valueUse.node()))
+ return;
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
+}
+
+bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
+{
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+
+ ASSERT(node->adjustedRefCount() == 1);
+
+ nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
+
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+
+ return true;
+ }
+
+ nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
+
+ return false;
+}
+
+bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
+{
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+
+ ASSERT(node->adjustedRefCount() == 1);
+
+ nonSpeculativePeepholeStrictEq(node, branchNode, invert);
+
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+
+ return true;
+ }
+
+ nonSpeculativeNonPeepholeStrictEq(node, invert);
+
+ return false;
+}
+
+#ifndef NDEBUG
+static const char* dataFormatString(DataFormat format)
+{
+ // These values correspond to the DataFormat enum.
+ const char* strings[] = {
+ "[ ]",
+ "[ i]",
+ "[ d]",
+ "[ c]",
+ "Err!",
+ "Err!",
+ "Err!",
+ "Err!",
+ "[J ]",
+ "[Ji]",
+ "[Jd]",
+ "[Jc]",
+ "Err!",
+ "Err!",
+ "Err!",
+ "Err!",
+ };
+ return strings[format];
+}
+
+void SpeculativeJIT::dump(const char* label)
+{
+ if (label)
+ dataLogF("<%s>\n", label);
+
+ dataLogF(" gprs:\n");
+ m_gprs.dump();
+ dataLogF(" fprs:\n");
+ m_fprs.dump();
+ dataLogF(" VirtualRegisters:\n");
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
+ GenerationInfo& info = m_generationInfo[i];
+ if (info.alive())
+ dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
+ else
+ dataLogF(" % 3d:[__][__]", i);
+ if (info.registerFormat() == DataFormatDouble)
+ dataLogF(":fpr%d\n", info.fpr());
+ else if (info.registerFormat() != DataFormatNone
+#if USE(JSVALUE32_64)
+ && !(info.registerFormat() & DataFormatJS)
+#endif
+ ) {
+ ASSERT(info.gpr() != InvalidGPRReg);
+ dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
+ } else
+ dataLogF("\n");
+ }
+ if (label)
+ dataLogF("</%s>\n", label);
+}
+#endif
+
+
+#if DFG_ENABLE(CONSISTENCY_CHECK)
+void SpeculativeJIT::checkConsistency()
+{
+ bool failed = false;
+
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ if (iter.isLocked()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
+ failed = true;
+ }
+ }
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ if (iter.isLocked()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
+ failed = true;
+ }
+ }
+
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
+ VirtualRegister virtualRegister = (VirtualRegister)i;
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (!info.alive())
+ continue;
+ switch (info.registerFormat()) {
+ case DataFormatNone:
+ break;
+ case DataFormatJS:
+ case DataFormatJSInteger:
+ case DataFormatJSDouble:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean:
+#if USE(JSVALUE32_64)
+ break;
+#endif
+ case DataFormatInteger:
+ case DataFormatCell:
+ case DataFormatBoolean:
+ case DataFormatStorage: {
+ GPRReg gpr = info.gpr();
+ ASSERT(gpr != InvalidGPRReg);
+ if (m_gprs.name(gpr) != virtualRegister) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
+ failed = true;
+ }
+ break;
+ }
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ ASSERT(fpr != InvalidFPRReg);
+ if (m_fprs.name(fpr) != virtualRegister) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
+ failed = true;
+ }
+ break;
+ }
+ case DataFormatOSRMarker:
+ case DataFormatDead:
+ case DataFormatArguments:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ VirtualRegister virtualRegister = iter.name();
+ if (virtualRegister == InvalidVirtualRegister)
+ continue;
+
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+#if USE(JSVALUE64)
+ if (iter.regID() != info.gpr()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+#else
+ if (!(info.registerFormat() & DataFormatJS)) {
+ if (iter.regID() != info.gpr()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ } else {
+ if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ }
+#endif
+ }
+
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ VirtualRegister virtualRegister = iter.name();
+ if (virtualRegister == InvalidVirtualRegister)
+ continue;
+
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (iter.regID() != info.fpr()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ }
+
+ if (failed) {
+ dump();
+ CRASH();
+ }
+}
+#endif
+
+GPRTemporary::GPRTemporary()
+ : m_jit(0)
+ , m_gpr(InvalidGPRReg)
+{
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ m_gpr = m_jit->allocate(specific);
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1, SpeculateIntegerOperand& op2)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else if (m_jit->canReuse(op2.node()))
+ m_gpr = m_jit->reuse(op2.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateStrictInt32Operand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1, IntegerOperand& op2)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else if (m_jit->canReuse(op2.node()))
+ m_gpr = m_jit->reuse(op2.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateCellOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateBooleanOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+#if USE(JSVALUE64)
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+#else
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1, bool tag)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (!op1.isDouble() && m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(tag ? op1.tagGPR() : op1.payloadGPR());
+ else
+ m_gpr = m_jit->allocate();
+}
+#endif
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, StorageOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+void GPRTemporary::adopt(GPRTemporary& other)
+{
+ ASSERT(!m_jit);
+ ASSERT(m_gpr == InvalidGPRReg);
+ ASSERT(other.m_jit);
+ ASSERT(other.m_gpr != InvalidGPRReg);
+ m_jit = other.m_jit;
+ m_gpr = other.m_gpr;
+ other.m_jit = 0;
+ other.m_gpr = InvalidGPRReg;
+}
+
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else if (m_jit->canReuse(op2.node()))
+ m_fpr = m_jit->reuse(op2.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+#if USE(JSVALUE32_64)
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (op1.isDouble() && m_jit->canReuse(op1.node()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+#endif
+
+void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
+{
+ BlockIndex taken = branchNode->takenBlockIndex();
+ BlockIndex notTaken = branchNode->notTakenBlockIndex();
+
+ SpeculateDoubleOperand op1(this, node->child1());
+ SpeculateDoubleOperand op2(this, node->child2());
+
+ branchDouble(condition, op1.fpr(), op2.fpr(), taken);
+ jump(notTaken);
+}
+
+void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
+{
+ BlockIndex taken = branchNode->takenBlockIndex();
+ BlockIndex notTaken = branchNode->notTakenBlockIndex();
+
+ MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
+
+ if (taken == nextBlock()) {
+ condition = MacroAssembler::NotEqual;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ SpeculateCellOperand op1(this, node->child1());
+ SpeculateCellOperand op2(this, node->child2());
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ if (m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
+ m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
+ if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ }
+ if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ }
+ } else {
+ GPRTemporary structure(this);
+ GPRReg structureGPR = structure.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
+ if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ }
+ speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
+
+ m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
+ if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ }
+ speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
+ }
+
+ branchPtr(condition, op1GPR, op2GPR, taken);
+ jump(notTaken);
+}
+
+void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
+{
+ BlockIndex taken = branchNode->takenBlockIndex();
+ BlockIndex notTaken = branchNode->notTakenBlockIndex();
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == nextBlock()) {
+ condition = JITCompiler::invert(condition);
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ if (isBooleanConstant(node->child1().node())) {
+ bool imm = valueOfBooleanConstant(node->child1().node());
+ SpeculateBooleanOperand op2(this, node->child2());
+ branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
+ } else if (isBooleanConstant(node->child2().node())) {
+ SpeculateBooleanOperand op1(this, node->child1());
+ bool imm = valueOfBooleanConstant(node->child2().node());
+ branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
+ } else {
+ SpeculateBooleanOperand op1(this, node->child1());
+ SpeculateBooleanOperand op2(this, node->child2());
+ branch32(condition, op1.gpr(), op2.gpr(), taken);
+ }
+
+ jump(notTaken);
+}
+
+void SpeculativeJIT::compilePeepHoleIntegerBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
+{
+ BlockIndex taken = branchNode->takenBlockIndex();
+ BlockIndex notTaken = branchNode->notTakenBlockIndex();
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == nextBlock()) {
+ condition = JITCompiler::invert(condition);
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ if (isInt32Constant(node->child1().node())) {
+ int32_t imm = valueOfInt32Constant(node->child1().node());
+ SpeculateIntegerOperand op2(this, node->child2());
+ branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
+ } else if (isInt32Constant(node->child2().node())) {
+ SpeculateIntegerOperand op1(this, node->child1());
+ int32_t imm = valueOfInt32Constant(node->child2().node());
+ branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
+ } else {
+ SpeculateIntegerOperand op1(this, node->child1());
+ SpeculateIntegerOperand op2(this, node->child2());
+ branch32(condition, op1.gpr(), op2.gpr(), taken);
+ }
+
+ jump(notTaken);
+}
+
+// Returns true if the compare is fused with a subsequent branch.
+bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
+{
+ // Fused compare & branch.
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+
+ // detectPeepHoleBranch currently only permits the branch to be the very next node,
+ // so can be no intervening nodes to also reference the compare.
+ ASSERT(node->adjustedRefCount() == 1);
+
+ if (node->isBinaryUseKind(Int32Use))
+ compilePeepHoleIntegerBranch(node, branchNode, condition);
+ else if (node->isBinaryUseKind(NumberUse))
+ compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
+ else if (node->op() == CompareEq) {
+ if (node->isBinaryUseKind(StringUse)) {
+ // Use non-peephole comparison, for now.
+ return false;
+ }
+ if (node->isBinaryUseKind(BooleanUse))
+ compilePeepHoleBooleanBranch(node, branchNode, condition);
+ else if (node->isBinaryUseKind(ObjectUse))
+ compilePeepHoleObjectEquality(node, branchNode);
+ else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse)
+ compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
+ else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse)
+ compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
+ else {
+ nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
+ return true;
+ }
+ } else {
+ nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
+ return true;
+ }
+
+ use(node->child1());
+ use(node->child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+ return true;
+ }
+ return false;
+}
+
+void SpeculativeJIT::noticeOSRBirth(Node* node)
+{
+ if (!node->hasVirtualRegister())
+ return;
+
+ VirtualRegister virtualRegister = node->virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ info.noticeOSRBirth(*m_stream, node, virtualRegister);
+}
+
+void SpeculativeJIT::compileMovHint(Node* node)
+{
+ ASSERT(node->containsMovHint() && node->op() != ZombieHint);
+
+ m_lastSetOperand = node->local();
+
+ Node* child = node->child1().node();
+ noticeOSRBirth(child);
+
+ if (child->op() == UInt32ToNumber)
+ noticeOSRBirth(child->child1().node());
+
+ m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->local()));
+}
+
+void SpeculativeJIT::compileMovHintAndCheck(Node* node)
+{
+ compileMovHint(node);
+ speculate(node, node->child1());
+ noResult(node);
+}
+
+void SpeculativeJIT::compileInlineStart(Node* node)
+{
+ InlineCallFrame* inlineCallFrame = node->codeOrigin.inlineCallFrame;
+ int argumentCountIncludingThis = inlineCallFrame->arguments.size();
+ unsigned argumentPositionStart = node->argumentPositionStart();
+ CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+ for (int i = 0; i < argumentCountIncludingThis; ++i) {
+ ValueRecovery recovery;
+ if (codeBlock->isCaptured(argumentToOperand(i)))
+ recovery = ValueRecovery::alreadyInJSStack();
+ else {
+ ArgumentPosition& argumentPosition =
+ m_jit.graph().m_argumentPositions[argumentPositionStart + i];
+ ValueSource valueSource;
+ if (!argumentPosition.shouldUnboxIfPossible())
+ valueSource = ValueSource(ValueInJSStack);
+ else if (argumentPosition.shouldUseDoubleFormat())
+ valueSource = ValueSource(DoubleInJSStack);
+ else if (isInt32Speculation(argumentPosition.prediction()))
+ valueSource = ValueSource(Int32InJSStack);
+ else if (isCellSpeculation(argumentPosition.prediction()))
+ valueSource = ValueSource(CellInJSStack);
+ else if (isBooleanSpeculation(argumentPosition.prediction()))
+ valueSource = ValueSource(BooleanInJSStack);
+ else
+ valueSource = ValueSource(ValueInJSStack);
+ recovery = computeValueRecoveryFor(valueSource);
+ }
+ // The recovery should refer either to something that has already been
+ // stored into the stack at the right place, or to a constant,
+ // since the Arguments code isn't smart enough to handle anything else.
+ // The exception is the this argument, which we don't really need to be
+ // able to recover.
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("\nRecovery for argument %d: ", i);
+ recovery.dump(WTF::dataFile());
+#endif
+ inlineCallFrame->arguments[i] = recovery;
+ }
+}
+
+void SpeculativeJIT::compile(BasicBlock& block)
+{
+ ASSERT(m_compileOkay);
+
+ if (!block.isReachable)
+ return;
+
+ if (!block.cfaHasVisited) {
+ // Don't generate code for basic blocks that are unreachable according to CFA.
+ // But to be sure that nobody has generated a jump to this block, drop in a
+ // breakpoint here.
+#if !ASSERT_DISABLED
+ m_jit.breakpoint();
+#endif
+ return;
+ }
+
+ m_blockHeads[m_block] = m_jit.label();
+#if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
+ m_jit.breakpoint();
+#endif
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("Setting up state for block #%u: ", m_block);
+#endif
+
+ m_stream->appendAndLog(VariableEvent::reset());
+
+ m_jit.jitAssertHasValidCallFrame();
+
+ ASSERT(m_arguments.size() == block.variablesAtHead.numberOfArguments());
+ for (size_t i = 0; i < m_arguments.size(); ++i) {
+ ValueSource valueSource = ValueSource(ValueInJSStack);
+ m_arguments[i] = valueSource;
+ m_stream->appendAndLog(VariableEvent::setLocal(argumentToOperand(i), valueSource.dataFormat()));
+ }
+
+ m_state.reset();
+ m_state.beginBasicBlock(&block);
+
+ ASSERT(m_variables.size() == block.variablesAtHead.numberOfLocals());
+ for (size_t i = 0; i < m_variables.size(); ++i) {
+ Node* node = block.variablesAtHead.local(i);
+ ValueSource valueSource;
+ if (!node)
+ valueSource = ValueSource(SourceIsDead);
+ else if (node->variableAccessData()->isArgumentsAlias())
+ valueSource = ValueSource(ArgumentsSource);
+ else if (!node->refCount())
+ valueSource = ValueSource(SourceIsDead);
+ else if (!node->variableAccessData()->shouldUnboxIfPossible())
+ valueSource = ValueSource(ValueInJSStack);
+ else if (node->variableAccessData()->shouldUseDoubleFormat())
+ valueSource = ValueSource(DoubleInJSStack);
+ else
+ valueSource = ValueSource::forSpeculation(node->variableAccessData()->argumentAwarePrediction());
+ m_variables[i] = valueSource;
+ // FIXME: Don't emit SetLocal(Dead). https://bugs.webkit.org/show_bug.cgi?id=108019
+ m_stream->appendAndLog(VariableEvent::setLocal(i, valueSource.dataFormat()));
+ }
+
+ m_lastSetOperand = std::numeric_limits<int>::max();
+ m_codeOriginForOSR = CodeOrigin();
+
+ if (DFG_ENABLE_EDGE_CODE_VERIFICATION) {
+ JITCompiler::Jump verificationSucceeded =
+ m_jit.branch32(JITCompiler::Equal, GPRInfo::regT0, TrustedImm32(m_block));
+ m_jit.breakpoint();
+ verificationSucceeded.link(&m_jit);
+ }
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("\n");
+#endif
+
+ for (m_indexInBlock = 0; m_indexInBlock < block.size(); ++m_indexInBlock) {
+ m_currentNode = block[m_indexInBlock];
+#if !ASSERT_DISABLED
+ m_canExit = m_currentNode->canExit();
+#endif
+ bool shouldExecuteEffects = m_state.startExecuting(m_currentNode);
+ m_jit.setForNode(m_currentNode);
+ m_codeOriginForOSR = m_currentNode->codeOrigin;
+ if (!m_currentNode->shouldGenerate()) {
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", m_currentNode->index(), m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
+#endif
+ switch (m_currentNode->op()) {
+ case JSConstant:
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
+ break;
+
+ case WeakJSConstant:
+ m_jit.addWeakReference(m_currentNode->weakConstant());
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
+ break;
+
+ case SetLocal:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+
+ case MovHint:
+ compileMovHint(m_currentNode);
+ break;
+
+ case ZombieHint: {
+ m_lastSetOperand = m_currentNode->local();
+ m_stream->appendAndLog(VariableEvent::setLocal(m_currentNode->local(), DataFormatDead));
+ break;
+ }
+
+ default:
+ if (belongsInMinifiedGraph(m_currentNode->op()))
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
+ break;
+ }
+ } else {
+
+ if (verboseCompilationEnabled()) {
+ dataLogF(
+ "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
+ (int)m_currentNode->index(),
+ m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLog(" ");
+#else
+ dataLog("\n");
+#endif
+ }
+#if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
+ m_jit.breakpoint();
+#endif
+#if DFG_ENABLE(XOR_DEBUG_AID)
+ m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
+ m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
+#endif
+ checkConsistency();
+
+ m_speculationDirection = (m_currentNode->flags() & NodeExitsForward) ? ForwardSpeculation : BackwardSpeculation;
+
+ compile(m_currentNode);
+ if (!m_compileOkay) {
+ m_compileOkay = true;
+ clearGenerationInfo();
+ return;
+ }
+
+ if (belongsInMinifiedGraph(m_currentNode->op())) {
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
+ noticeOSRBirth(m_currentNode);
+ }
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ if (m_currentNode->hasResult()) {
+ GenerationInfo& info = m_generationInfo[m_currentNode->virtualRegister()];
+ dataLogF("-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)m_currentNode->virtualRegister());
+ if (info.registerFormat() != DataFormatNone) {
+ if (info.registerFormat() == DataFormatDouble)
+ dataLogF(", %s", FPRInfo::debugName(info.fpr()));
+#if USE(JSVALUE32_64)
+ else if (info.registerFormat() & DataFormatJS)
+ dataLogF(", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
+#endif
+ else
+ dataLogF(", %s", GPRInfo::debugName(info.gpr()));
+ }
+ dataLogF(" ");
+ } else
+ dataLogF(" ");
+#endif
+ }
+
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("\n");
+#endif
+
+ // Make sure that the abstract state is rematerialized for the next node.
+ if (shouldExecuteEffects)
+ m_state.executeEffects(m_indexInBlock);
+
+ if (m_currentNode->shouldGenerate())
+ checkConsistency();
+ }
+
+ // Perform the most basic verification that children have been used correctly.
+#if !ASSERT_DISABLED
+ for (unsigned index = 0; index < m_generationInfo.size(); ++index) {
+ GenerationInfo& info = m_generationInfo[index];
+ ASSERT(!info.alive());
+ }
+#endif
+}
+
+// If we are making type predictions about our arguments then
+// we need to check that they are correct on function entry.
+void SpeculativeJIT::checkArgumentTypes()
+{
+ ASSERT(!m_currentNode);
+ m_isCheckingArgumentTypes = true;
+ m_speculationDirection = BackwardSpeculation;
+ m_codeOriginForOSR = CodeOrigin(0);
+
+ for (size_t i = 0; i < m_arguments.size(); ++i)
+ m_arguments[i] = ValueSource(ValueInJSStack);
+ for (size_t i = 0; i < m_variables.size(); ++i)
+ m_variables[i] = ValueSource(ValueInJSStack);
+
+ for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
+ Node* node = m_jit.graph().m_arguments[i];
+ ASSERT(node->op() == SetArgument);
+ if (!node->shouldGenerate()) {
+ // The argument is dead. We don't do any checks for such arguments.
+ continue;
+ }
+
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ if (!variableAccessData->isProfitableToUnbox())
+ continue;
+
+ VirtualRegister virtualRegister = variableAccessData->local();
+ SpeculatedType predictedType = variableAccessData->prediction();
+
+ JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
+
+#if USE(JSVALUE64)
+ if (isInt32Speculation(predictedType))
+ speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
+ else if (isBooleanSpeculation(predictedType)) {
+ GPRTemporary temp(this);
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
+ speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
+ } else if (isCellSpeculation(predictedType))
+ speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
+#else
+ if (isInt32Speculation(predictedType))
+ speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
+ else if (isBooleanSpeculation(predictedType))
+ speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
+ else if (isCellSpeculation(predictedType))
+ speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
+#endif
+ }
+ m_isCheckingArgumentTypes = false;
+}
+
+bool SpeculativeJIT::compile()
+{
+ checkArgumentTypes();
+
+ if (DFG_ENABLE_EDGE_CODE_VERIFICATION)
+ m_jit.move(TrustedImm32(0), GPRInfo::regT0);
+
+ ASSERT(!m_currentNode);
+ for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block) {
+ m_jit.setForBlock(m_block);
+ BasicBlock* block = m_jit.graph().m_blocks[m_block].get();
+ if (block)
+ compile(*block);
+ }
+ linkBranches();
+ return true;
+}
+
+void SpeculativeJIT::createOSREntries()
+{
+ for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ if (!block->isOSRTarget)
+ continue;
+
+ // Currently we only need to create OSR entry trampolines when using edge code
+ // verification. But in the future, we'll need this for other things as well (like
+ // when we have global reg alloc).
+ // If we don't need OSR entry trampolin
+ if (!DFG_ENABLE_EDGE_CODE_VERIFICATION) {
+ m_osrEntryHeads.append(m_blockHeads[blockIndex]);
+ continue;
+ }
+
+ m_osrEntryHeads.append(m_jit.label());
+ m_jit.move(TrustedImm32(blockIndex), GPRInfo::regT0);
+ m_jit.jump().linkTo(m_blockHeads[blockIndex], &m_jit);
+ }
+}
+
+void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer)
+{
+ unsigned osrEntryIndex = 0;
+ for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
+ BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ if (!block->isOSRTarget)
+ continue;
+ m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
+ }
+ ASSERT(osrEntryIndex == m_osrEntryHeads.size());
+}
+
+ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
+{
+ if (valueSource.isInJSStack())
+ return valueSource.valueRecovery();
+
+ ASSERT(valueSource.kind() == HaveNode);
+ Node* node = valueSource.id().node(m_jit.graph());
+ if (isConstant(node))
+ return ValueRecovery::constant(valueOfJSConstant(node));
+
+ return ValueRecovery();
+}
+
+void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
+{
+ Edge child3 = m_jit.graph().varArgChild(node, 2);
+ Edge child4 = m_jit.graph().varArgChild(node, 3);
+
+ ArrayMode arrayMode = node->arrayMode();
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+
+ SpeculateDoubleOperand value(this, child3);
+
+ FPRReg valueReg = value.fpr();
+
+ DFG_TYPE_CHECK(
+ JSValueRegs(), child3, SpecRealNumber,
+ m_jit.branchDouble(
+ MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
+
+ if (!m_compileOkay)
+ return;
+
+ StorageOperand storage(this, child4);
+ GPRReg storageReg = storage.gpr();
+
+ if (node->op() == PutByValAlias) {
+ // Store the value to the array.
+ GPRReg propertyReg = property.gpr();
+ FPRReg valueReg = value.fpr();
+ m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
+
+ noResult(m_currentNode);
+ return;
+ }
+
+ GPRTemporary temporary;
+ GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
+
+ MacroAssembler::Jump slowCase;
+
+ if (arrayMode.isInBounds()) {
+ speculationCheck(
+ StoreToHoleOrOutOfBounds, JSValueRegs(), 0,
+ m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
+ } else {
+ MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
+
+ slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
+
+ if (!arrayMode.isOutOfBounds())
+ speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
+
+ m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
+ m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
+
+ inBounds.link(&m_jit);
+ }
+
+ m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
+
+ base.use();
+ property.use();
+ value.use();
+ storage.use();
+
+ if (arrayMode.isOutOfBounds()) {
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCase, this,
+ m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
+ NoResult, baseReg, propertyReg, valueReg));
+ }
+
+ noResult(m_currentNode, UseChildrenCalledExplicitly);
+}
+
+void SpeculativeJIT::compileGetCharCodeAt(Node* node)
+{
+ SpeculateCellOperand string(this, node->child1());
+ SpeculateStrictInt32Operand index(this, node->child2());
+ StorageOperand storage(this, node->child3());
+
+ GPRReg stringReg = string.gpr();
+ GPRReg indexReg = index.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
+
+ // unsigned comparison so we can filter out negative indices and indices that are too large
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
+
+ GPRTemporary scratch(this);
+ GPRReg scratchReg = scratch.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(stringReg, JSString::offsetOfValue()), scratchReg);
+
+ // Load the character into scratchReg
+ JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
+
+ m_jit.load8(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesOne, 0), scratchReg);
+ JITCompiler::Jump cont8Bit = m_jit.jump();
+
+ is16Bit.link(&m_jit);
+
+ m_jit.load16(MacroAssembler::BaseIndex(storageReg, indexReg, MacroAssembler::TimesTwo, 0), scratchReg);
+
+ cont8Bit.link(&m_jit);
+
+ integerResult(scratchReg, m_currentNode);
+}
+
+void SpeculativeJIT::compileGetByValOnString(Node* node)
+{
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateStrictInt32Operand property(this, node->child2());
+ StorageOperand storage(this, node->child3());
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
+
+ // unsigned comparison so we can filter out negative indices and indices that are too large
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSString::offsetOfLength())));
+
+ GPRTemporary scratch(this);
+ GPRReg scratchReg = scratch.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), scratchReg);
+
+ // Load the character into scratchReg
+ JITCompiler::Jump is16Bit = m_jit.branchTest32(MacroAssembler::Zero, MacroAssembler::Address(scratchReg, StringImpl::flagsOffset()), TrustedImm32(StringImpl::flagIs8Bit()));
+
+ m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne, 0), scratchReg);
+ JITCompiler::Jump cont8Bit = m_jit.jump();
+
+ is16Bit.link(&m_jit);
+
+ m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
+
+ // We only support ascii characters
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100)));
+
+ // 8 bit string values don't need the isASCII check.
+ cont8Bit.link(&m_jit);
+
+ GPRTemporary smallStrings(this);
+ GPRReg smallStringsReg = smallStrings.gpr();
+ m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
+ m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, scratchReg, MacroAssembler::ScalePtr, 0), scratchReg);
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
+ cellResult(scratchReg, m_currentNode);
+}
+
+void SpeculativeJIT::compileFromCharCode(Node* node)