/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#if ENABLE(DFG_JIT)
+#include "Arguments.h"
+#include "DFGArrayifySlowPathGenerator.h"
+#include "DFGCallArrayAllocatorSlowPathGenerator.h"
+#include "DFGSlowPathGenerator.h"
+#include "JSCJSValueInlines.h"
#include "LinkBuffer.h"
+#include <wtf/MathExtras.h>
namespace JSC { namespace DFG {
-// On Windows we need to wrap fmod; on other platforms we can call it directly.
-// On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
-#if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
-static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
+SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
+ : m_compileOkay(true)
+ , m_jit(jit)
+ , m_currentNode(0)
+ , m_indexInBlock(0)
+ , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
+ , m_blockHeads(jit.graph().m_blocks.size())
+ , m_arguments(jit.codeBlock()->numParameters())
+ , m_variables(jit.graph().m_localVars)
+ , m_lastSetOperand(std::numeric_limits<int>::max())
+ , m_state(m_jit.graph())
+ , m_stream(&jit.codeBlock()->variableEventStream())
+ , m_minifiedGraph(&jit.codeBlock()->minifiedDFG())
+ , m_isCheckingArgumentTypes(false)
{
- return fmod(x, y);
}
-#else
-#define fmodAsDFGOperation fmod
-#endif
-void SpeculativeJIT::clearGenerationInfo()
+SpeculativeJIT::~SpeculativeJIT()
{
- for (unsigned i = 0; i < m_generationInfo.size(); ++i)
- m_generationInfo[i] = GenerationInfo();
- m_gprs = RegisterBank<GPRInfo>();
- m_fprs = RegisterBank<FPRInfo>();
}
-GPRReg SpeculativeJIT::fillStorage(NodeIndex nodeIndex)
+void SpeculativeJIT::emitAllocateJSArray(GPRReg resultGPR, Structure* structure, GPRReg storageGPR, unsigned numElements)
{
- Node& node = m_jit.graph()[nodeIndex];
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
+ ASSERT(hasUndecided(structure->indexingType()) || hasInt32(structure->indexingType()) || hasDouble(structure->indexingType()) || hasContiguous(structure->indexingType()));
- switch (info.registerFormat()) {
- case DataFormatNone: {
- if (info.spillFormat() == DataFormatStorage) {
- GPRReg gpr = allocate();
- m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
- info.fillStorage(gpr);
- return gpr;
+ GPRTemporary scratch(this);
+ GPRTemporary scratch2(this);
+ GPRReg scratchGPR = scratch.gpr();
+ GPRReg scratch2GPR = scratch2.gpr();
+
+ unsigned vectorLength = std::max(BASE_VECTOR_LEN, numElements);
+
+ JITCompiler::JumpList slowCases;
+
+ slowCases.append(
+ emitAllocateBasicStorage(TrustedImm32(vectorLength * sizeof(JSValue) + sizeof(IndexingHeader)), storageGPR));
+ m_jit.subPtr(TrustedImm32(vectorLength * sizeof(JSValue)), storageGPR);
+ emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
+
+ m_jit.store32(TrustedImm32(numElements), MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
+ m_jit.store32(TrustedImm32(vectorLength), MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
+
+ if (hasDouble(structure->indexingType()) && numElements < vectorLength) {
+#if USE(JSVALUE64)
+ m_jit.move(TrustedImm64(bitwise_cast<int64_t>(QNaN)), scratchGPR);
+ for (unsigned i = numElements; i < vectorLength; ++i)
+ m_jit.store64(scratchGPR, MacroAssembler::Address(storageGPR, sizeof(double) * i));
+#else
+ EncodedValueDescriptor value;
+ value.asInt64 = JSValue::encode(JSValue(JSValue::EncodeAsDouble, QNaN));
+ for (unsigned i = numElements; i < vectorLength; ++i) {
+ m_jit.store32(TrustedImm32(value.asBits.tag), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(TrustedImm32(value.asBits.payload), MacroAssembler::Address(storageGPR, sizeof(double) * i + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
}
-
- // Must be a cell; fill it as a cell and then return the pointer.
- return fillSpeculateCell(nodeIndex);
- }
-
- case DataFormatStorage: {
- GPRReg gpr = info.gpr();
- m_gprs.lock(gpr);
- return gpr;
- }
-
- default:
- return fillSpeculateCell(nodeIndex);
+#endif
}
+
+ // I want a slow path that also loads out the storage pointer, and that's
+ // what this custom CallArrayAllocatorSlowPathGenerator gives me. It's a lot
+ // of work for a very small piece of functionality. :-/
+ addSlowPathGenerator(adoptPtr(
+ new CallArrayAllocatorSlowPathGenerator(
+ slowCases, this, operationNewArrayWithSize, resultGPR, storageGPR,
+ structure, numElements)));
}
-void SpeculativeJIT::useChildren(Node& node)
+void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
{
- if (node.flags() & NodeHasVarArgs) {
- for (unsigned childIdx = node.firstChild(); childIdx < node.firstChild() + node.numChildren(); childIdx++)
- use(m_jit.graph().m_varArgChildren[childIdx]);
- } else {
- Edge child1 = node.child1();
- if (!child1) {
- ASSERT(!node.child2() && !node.child3());
- return;
- }
- use(child1);
-
- Edge child2 = node.child2();
- if (!child2) {
- ASSERT(!node.child3());
- return;
- }
- use(child2);
-
- Edge child3 = node.child3();
- if (!child3)
- return;
- use(child3);
- }
+ if (!m_compileOkay)
+ return;
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ m_jit.appendExitInfo(jumpToFail);
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
}
-bool SpeculativeJIT::isStrictInt32(NodeIndex nodeIndex)
+void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
{
- if (isInt32Constant(nodeIndex))
- return true;
-
- Node& node = m_jit.graph()[nodeIndex];
- GenerationInfo& info = m_generationInfo[node.virtualRegister()];
-
- return info.registerFormat() == DataFormatInteger;
+ if (!m_compileOkay)
+ return;
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ m_jit.appendExitInfo(jumpsToFail);
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
}
-bool SpeculativeJIT::isKnownInteger(NodeIndex nodeIndex)
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail)
{
- if (isInt32Constant(nodeIndex))
- return true;
-
- Node& node = m_jit.graph()[nodeIndex];
-
- if (node.hasInt32Result())
- return true;
-
- GenerationInfo& info = m_generationInfo[node.virtualRegister()];
-
- return info.isJSInteger();
+ if (!m_compileOkay)
+ return;
+ backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
+ if (m_speculationDirection == ForwardSpeculation)
+ convertLastOSRExitToForward();
}
-bool SpeculativeJIT::isKnownNumeric(NodeIndex nodeIndex)
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
{
- if (isInt32Constant(nodeIndex) || isNumberConstant(nodeIndex))
- return true;
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ speculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail);
+}
- Node& node = m_jit.graph()[nodeIndex];
-
- if (node.hasNumberResult())
- return true;
-
- GenerationInfo& info = m_generationInfo[node.virtualRegister()];
+OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node)
+{
+ if (!m_compileOkay)
+ return OSRExitJumpPlaceholder();
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ unsigned index = m_jit.codeBlock()->numberOfOSRExits();
+ m_jit.appendExitInfo();
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size()));
+ return OSRExitJumpPlaceholder(index);
+}
- return info.isJSInteger() || info.isJSDouble();
+OSRExitJumpPlaceholder SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse)
+{
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ return backwardSpeculationCheck(kind, jsValueSource, nodeUse.node());
}
-bool SpeculativeJIT::isKnownCell(NodeIndex nodeIndex)
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail)
{
- return m_generationInfo[m_jit.graph()[nodeIndex].virtualRegister()].isJSCell();
+ if (!m_compileOkay)
+ return;
+ backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
+ if (m_speculationDirection == ForwardSpeculation)
+ convertLastOSRExitToForward();
}
-bool SpeculativeJIT::isKnownNotCell(NodeIndex nodeIndex)
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, const MacroAssembler::JumpList& jumpsToFail)
{
- Node& node = m_jit.graph()[nodeIndex];
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
- if (node.hasConstant() && !valueOfJSConstant(nodeIndex).isCell())
- return true;
- return !(info.isJSCell() || info.isUnknownJS());
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ speculationCheck(kind, jsValueSource, nodeUse.node(), jumpsToFail);
}
-bool SpeculativeJIT::isKnownNotInteger(NodeIndex nodeIndex)
+void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
{
- Node& node = m_jit.graph()[nodeIndex];
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
-
- return info.isJSDouble() || info.isJSCell() || info.isJSBoolean()
- || (node.hasConstant() && !valueOfJSConstant(nodeIndex).isInt32());
+ if (!m_compileOkay)
+ return;
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ m_jit.codeBlock()->appendSpeculationRecovery(recovery);
+ m_jit.appendExitInfo(jumpToFail);
+ m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(node), this, m_stream->size(), m_jit.codeBlock()->numberOfSpeculationRecoveries()));
}
-bool SpeculativeJIT::isKnownNotNumber(NodeIndex nodeIndex)
+void SpeculativeJIT::backwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
{
- Node& node = m_jit.graph()[nodeIndex];
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
-
- return (!info.isJSDouble() && !info.isJSInteger() && !info.isUnknownJS())
- || (node.hasConstant() && !isNumberConstant(nodeIndex));
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ backwardSpeculationCheck(kind, jsValueSource, nodeUse.node(), jumpToFail, recovery);
}
-void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
{
- UNUSED_PARAM(jit);
- UNUSED_PARAM(owner);
- UNUSED_PARAM(scratch1);
- UNUSED_PARAM(scratch2);
- UNUSED_PARAM(useKind);
- ASSERT(owner != scratch1);
- ASSERT(owner != scratch2);
- ASSERT(scratch1 != scratch2);
+ if (!m_compileOkay)
+ return;
+ backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail, recovery);
+ if (m_speculationDirection == ForwardSpeculation)
+ convertLastOSRExitToForward();
+}
-#if ENABLE(WRITE_BARRIER_PROFILING)
- JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
-#endif
- markCellCard(jit, owner, scratch1, scratch2);
+void SpeculativeJIT::speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
+{
+ speculationCheck(kind, jsValueSource, edge.node(), jumpToFail, recovery);
}
-void SpeculativeJIT::markCellCard(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2)
+JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind, JSValueSource jsValueSource, Node* node)
{
- UNUSED_PARAM(jit);
- UNUSED_PARAM(owner);
- UNUSED_PARAM(scratch1);
- UNUSED_PARAM(scratch2);
-
-#if ENABLE(GGC)
- jit.move(owner, scratch1);
- jit.andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch1);
- jit.move(owner, scratch2);
- // consume additional 8 bits as we're using an approximate filter
- jit.rshift32(TrustedImm32(MarkedBlock::atomShift + 8), scratch2);
- jit.andPtr(TrustedImm32(MarkedBlock::atomMask >> 8), scratch2);
- MacroAssembler::Jump filter = jit.branchTest8(MacroAssembler::Zero, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfMarks()));
- jit.move(owner, scratch2);
- jit.rshift32(TrustedImm32(MarkedBlock::cardShift), scratch2);
- jit.andPtr(TrustedImm32(MarkedBlock::cardMask), scratch2);
- jit.store8(TrustedImm32(1), MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::TimesOne, MarkedBlock::offsetOfCards()));
- filter.link(&jit);
-#endif
+ if (!m_compileOkay)
+ return 0;
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ m_jit.appendExitInfo(JITCompiler::JumpList());
+ OSRExit& exit = m_jit.codeBlock()->osrExit(
+ m_jit.codeBlock()->appendOSRExit(OSRExit(
+ kind, jsValueSource,
+ m_jit.graph().methodOfGettingAValueProfileFor(node),
+ this, m_stream->size())));
+ exit.m_watchpointIndex = m_jit.codeBlock()->appendWatchpoint(
+ JumpReplacementWatchpoint(m_jit.watchpointLabel()));
+ if (m_speculationDirection == ForwardSpeculation)
+ convertLastOSRExitToForward();
+ return &m_jit.codeBlock()->watchpoint(exit.m_watchpointIndex);
}
-void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
+JumpReplacementWatchpoint* SpeculativeJIT::speculationWatchpoint(ExitKind kind)
{
- UNUSED_PARAM(ownerGPR);
- UNUSED_PARAM(valueGPR);
- UNUSED_PARAM(scratch1);
- UNUSED_PARAM(scratch2);
- UNUSED_PARAM(useKind);
+ return speculationWatchpoint(kind, JSValueSource(), 0);
+}
- if (isKnownNotCell(valueUse.index()))
+void SpeculativeJIT::convertLastOSRExitToForward(const ValueRecovery& valueRecovery)
+{
+ if (!valueRecovery) {
+ // Check that either the current node is a SetLocal, or the preceding node was a
+ // SetLocal with the same code origin.
+ if (!m_currentNode->containsMovHint()) {
+ Node* setLocal = m_jit.graph().m_blocks[m_block]->at(m_indexInBlock - 1);
+ ASSERT_UNUSED(setLocal, setLocal->containsMovHint());
+ ASSERT_UNUSED(setLocal, setLocal->codeOrigin == m_currentNode->codeOrigin);
+ }
+
+ // Find the next node.
+ unsigned indexInBlock = m_indexInBlock + 1;
+ Node* node = 0;
+ for (;;) {
+ if (indexInBlock == m_jit.graph().m_blocks[m_block]->size()) {
+ // This is an inline return. Give up and do a backwards speculation. This is safe
+ // because an inline return has its own bytecode index and it's always safe to
+ // reexecute that bytecode.
+ ASSERT(node->op() == Jump);
+ return;
+ }
+ node = m_jit.graph().m_blocks[m_block]->at(indexInBlock);
+ if (node->codeOrigin != m_currentNode->codeOrigin)
+ break;
+ indexInBlock++;
+ }
+
+ ASSERT(node->codeOrigin != m_currentNode->codeOrigin);
+ OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
+ exit.m_codeOrigin = node->codeOrigin;
return;
-
-#if ENABLE(WRITE_BARRIER_PROFILING)
- JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
-#endif
-
-#if ENABLE(GGC)
- GPRTemporary temp1;
- GPRTemporary temp2;
- if (scratch1 == InvalidGPRReg) {
- GPRTemporary scratchGPR(this);
- temp1.adopt(scratchGPR);
- scratch1 = temp1.gpr();
- }
- if (scratch2 == InvalidGPRReg) {
- GPRTemporary scratchGPR(this);
- temp2.adopt(scratchGPR);
- scratch2 = temp2.gpr();
}
- JITCompiler::Jump rhsNotCell;
- bool hadCellCheck = false;
- if (!isKnownCell(valueUse.index()) && !isCellPrediction(m_jit.getPrediction(valueUse.index()))) {
- hadCellCheck = true;
- rhsNotCell = m_jit.branchIfNotCell(valueGPR);
+ unsigned setLocalIndexInBlock = m_indexInBlock + 1;
+
+ Node* setLocal = m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock);
+ bool hadInt32ToDouble = false;
+
+ if (setLocal->op() == ForwardInt32ToDouble) {
+ setLocal = m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock);
+ hadInt32ToDouble = true;
}
+ if (setLocal->op() == Flush || setLocal->op() == Phantom)
+ setLocal = m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock);
+
+ if (hadInt32ToDouble)
+ ASSERT(setLocal->child1()->child1() == m_currentNode);
+ else
+ ASSERT(setLocal->child1() == m_currentNode);
+ ASSERT(setLocal->containsMovHint());
+ ASSERT(setLocal->codeOrigin == m_currentNode->codeOrigin);
- markCellCard(m_jit, ownerGPR, scratch1, scratch2);
-
- if (hadCellCheck)
- rhsNotCell.link(&m_jit);
-#endif
+ Node* nextNode = m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 1);
+ if (nextNode->op() == Jump && nextNode->codeOrigin == m_currentNode->codeOrigin) {
+ // We're at an inlined return. Use a backward speculation instead.
+ return;
+ }
+ ASSERT(nextNode->codeOrigin != m_currentNode->codeOrigin);
+
+ OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
+ exit.m_codeOrigin = nextNode->codeOrigin;
+
+ exit.m_lastSetOperand = setLocal->local();
+ exit.m_valueRecoveryOverride = adoptRef(
+ new ValueRecoveryOverride(setLocal->local(), valueRecovery));
}
-void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
+void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
{
- UNUSED_PARAM(ownerGPR);
- UNUSED_PARAM(value);
- UNUSED_PARAM(scratch1);
- UNUSED_PARAM(scratch2);
- UNUSED_PARAM(useKind);
-
- if (Heap::isMarked(value))
- return;
-
-#if ENABLE(WRITE_BARRIER_PROFILING)
- JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
-#endif
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ backwardSpeculationCheck(kind, jsValueSource, node, jumpToFail);
+ convertLastOSRExitToForward(valueRecovery);
+}
-#if ENABLE(GGC)
- GPRTemporary temp1;
- GPRTemporary temp2;
- if (scratch1 == InvalidGPRReg) {
- GPRTemporary scratchGPR(this);
- temp1.adopt(scratchGPR);
- scratch1 = temp1.gpr();
- }
- if (scratch2 == InvalidGPRReg) {
- GPRTemporary scratchGPR(this);
- temp2.adopt(scratchGPR);
- scratch2 = temp2.gpr();
- }
+void SpeculativeJIT::forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, Node* node, const MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
+{
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ backwardSpeculationCheck(kind, jsValueSource, node, jumpsToFail);
+ convertLastOSRExitToForward(valueRecovery);
+}
- markCellCard(m_jit, ownerGPR, scratch1, scratch2);
+void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Node* node)
+{
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("SpeculativeJIT was terminated.\n");
#endif
+ if (!m_compileOkay)
+ return;
+ speculationCheck(kind, jsValueRegs, node, m_jit.jump());
+ m_compileOkay = false;
}
-void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
+void SpeculativeJIT::terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
{
- UNUSED_PARAM(owner);
- UNUSED_PARAM(valueGPR);
- UNUSED_PARAM(scratch);
- UNUSED_PARAM(useKind);
+ ASSERT(m_isCheckingArgumentTypes || m_canExit);
+ terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.node());
+}
- if (isKnownNotCell(valueUse.index()))
- return;
+void SpeculativeJIT::backwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
+{
+ ASSERT(needsTypeCheck(edge, typesPassedThrough));
+ m_state.forNode(edge).filter(typesPassedThrough);
+ backwardSpeculationCheck(BadType, source, edge.node(), jumpToFail);
+}
-#if ENABLE(WRITE_BARRIER_PROFILING)
- JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
-#endif
+void SpeculativeJIT::typeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail)
+{
+ backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
+ if (m_speculationDirection == ForwardSpeculation)
+ convertLastOSRExitToForward();
+}
-#if ENABLE(GGC)
- JITCompiler::Jump rhsNotCell;
- bool hadCellCheck = false;
- if (!isKnownCell(valueUse.index()) && !isCellPrediction(m_jit.getPrediction(valueUse.index()))) {
- hadCellCheck = true;
- rhsNotCell = m_jit.branchIfNotCell(valueGPR);
- }
-
- GPRTemporary temp;
- if (scratch == InvalidGPRReg) {
- GPRTemporary scratchGPR(this);
- temp.adopt(scratchGPR);
- scratch = temp.gpr();
- }
+void SpeculativeJIT::forwardTypeCheck(JSValueSource source, Edge edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
+{
+ backwardTypeCheck(source, edge, typesPassedThrough, jumpToFail);
+ convertLastOSRExitToForward(valueRecovery);
+}
- uint8_t* cardAddress = Heap::addressOfCardFor(owner);
- m_jit.move(JITCompiler::TrustedImmPtr(cardAddress), scratch);
- m_jit.store8(JITCompiler::TrustedImm32(1), JITCompiler::Address(scratch));
+void SpeculativeJIT::addSlowPathGenerator(PassOwnPtr<SlowPathGenerator> slowPathGenerator)
+{
+ m_slowPathGenerators.append(slowPathGenerator);
+}
- if (hadCellCheck)
- rhsNotCell.link(&m_jit);
+void SpeculativeJIT::runSlowPathGenerators()
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("Running %lu slow path generators.\n", m_slowPathGenerators.size());
#endif
+ for (unsigned i = 0; i < m_slowPathGenerators.size(); ++i)
+ m_slowPathGenerators[i]->generate(this);
}
-bool SpeculativeJIT::nonSpeculativeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
+// On Windows we need to wrap fmod; on other platforms we can call it directly.
+// On ARMv7 we assert that all function pointers have to low bit set (point to thumb code).
+#if CALLING_CONVENTION_IS_STDCALL || CPU(ARM_THUMB2)
+static double DFG_OPERATION fmodAsDFGOperation(double x, double y)
{
- unsigned branchIndexInBlock = detectPeepHoleBranch();
- if (branchIndexInBlock != UINT_MAX) {
- NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+ return fmod(x, y);
+}
+#else
+#define fmodAsDFGOperation fmod
+#endif
- ASSERT(node.adjustedRefCount() == 1);
+void SpeculativeJIT::clearGenerationInfo()
+{
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i)
+ m_generationInfo[i] = GenerationInfo();
+ m_gprs = RegisterBank<GPRInfo>();
+ m_fprs = RegisterBank<FPRInfo>();
+}
+
+SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source)
+{
+ GenerationInfo& info = m_generationInfo[spillMe];
+ Node* node = info.node();
+ DataFormat registerFormat = info.registerFormat();
+ ASSERT(registerFormat != DataFormatNone);
+ ASSERT(registerFormat != DataFormatDouble);
- nonSpeculativePeepholeBranch(node, branchNodeIndex, cond, helperFunction);
-
- m_indexInBlock = branchIndexInBlock;
- m_compileIndex = branchNodeIndex;
+ SilentSpillAction spillAction;
+ SilentFillAction fillAction;
- return true;
+ if (!info.needsSpill())
+ spillAction = DoNothingForSpill;
+ else {
+#if USE(JSVALUE64)
+ ASSERT(info.gpr() == source);
+ if (registerFormat == DataFormatInteger)
+ spillAction = Store32Payload;
+ else if (registerFormat == DataFormatCell || registerFormat == DataFormatStorage)
+ spillAction = StorePtr;
+ else {
+ ASSERT(registerFormat & DataFormatJS);
+ spillAction = Store64;
+ }
+#elif USE(JSVALUE32_64)
+ if (registerFormat & DataFormatJS) {
+ ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
+ spillAction = source == info.tagGPR() ? Store32Tag : Store32Payload;
+ } else {
+ ASSERT(info.gpr() == source);
+ spillAction = Store32Payload;
+ }
+#endif
}
-
- nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
-
- return false;
+
+ if (registerFormat == DataFormatInteger) {
+ ASSERT(info.gpr() == source);
+ ASSERT(isJSInteger(info.registerFormat()));
+ if (node->hasConstant()) {
+ ASSERT(isInt32Constant(node));
+ fillAction = SetInt32Constant;
+ } else
+ fillAction = Load32Payload;
+ } else if (registerFormat == DataFormatBoolean) {
+#if USE(JSVALUE64)
+ RELEASE_ASSERT_NOT_REACHED();
+ fillAction = DoNothingForFill;
+#elif USE(JSVALUE32_64)
+ ASSERT(info.gpr() == source);
+ if (node->hasConstant()) {
+ ASSERT(isBooleanConstant(node));
+ fillAction = SetBooleanConstant;
+ } else
+ fillAction = Load32Payload;
+#endif
+ } else if (registerFormat == DataFormatCell) {
+ ASSERT(info.gpr() == source);
+ if (node->hasConstant()) {
+ JSValue value = valueOfJSConstant(node);
+ ASSERT_UNUSED(value, value.isCell());
+ fillAction = SetCellConstant;
+ } else {
+#if USE(JSVALUE64)
+ fillAction = LoadPtr;
+#else
+ fillAction = Load32Payload;
+#endif
+ }
+ } else if (registerFormat == DataFormatStorage) {
+ ASSERT(info.gpr() == source);
+ fillAction = LoadPtr;
+ } else {
+ ASSERT(registerFormat & DataFormatJS);
+#if USE(JSVALUE64)
+ ASSERT(info.gpr() == source);
+ if (node->hasConstant()) {
+ if (valueOfJSConstant(node).isCell())
+ fillAction = SetTrustedJSConstant;
+ else
+ fillAction = SetJSConstant;
+ } else if (info.spillFormat() == DataFormatInteger) {
+ ASSERT(registerFormat == DataFormatJSInteger);
+ fillAction = Load32PayloadBoxInt;
+ } else if (info.spillFormat() == DataFormatDouble) {
+ ASSERT(registerFormat == DataFormatJSDouble);
+ fillAction = LoadDoubleBoxDouble;
+ } else
+ fillAction = Load64;
+#else
+ ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
+ if (node->hasConstant())
+ fillAction = info.tagGPR() == source ? SetJSConstantTag : SetJSConstantPayload;
+ else if (info.payloadGPR() == source)
+ fillAction = Load32Payload;
+ else { // Fill the Tag
+ switch (info.spillFormat()) {
+ case DataFormatInteger:
+ ASSERT(registerFormat == DataFormatJSInteger);
+ fillAction = SetInt32Tag;
+ break;
+ case DataFormatCell:
+ ASSERT(registerFormat == DataFormatJSCell);
+ fillAction = SetCellTag;
+ break;
+ case DataFormatBoolean:
+ ASSERT(registerFormat == DataFormatJSBoolean);
+ fillAction = SetBooleanTag;
+ break;
+ default:
+ fillAction = Load32Tag;
+ break;
+ }
+ }
+#endif
+ }
+
+ return SilentRegisterSavePlan(spillAction, fillAction, node, source);
}
-
-bool SpeculativeJIT::nonSpeculativeStrictEq(Node& node, bool invert)
+
+SilentRegisterSavePlan SpeculativeJIT::silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source)
{
- unsigned branchIndexInBlock = detectPeepHoleBranch();
- if (branchIndexInBlock != UINT_MAX) {
- NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+ GenerationInfo& info = m_generationInfo[spillMe];
+ Node* node = info.node();
+ ASSERT(info.registerFormat() == DataFormatDouble);
- ASSERT(node.adjustedRefCount() == 1);
-
- nonSpeculativePeepholeStrictEq(node, branchNodeIndex, invert);
-
- m_indexInBlock = branchIndexInBlock;
- m_compileIndex = branchNodeIndex;
+ SilentSpillAction spillAction;
+ SilentFillAction fillAction;
- return true;
+ if (!info.needsSpill())
+ spillAction = DoNothingForSpill;
+ else {
+ ASSERT(!node->hasConstant());
+ ASSERT(info.spillFormat() == DataFormatNone);
+ ASSERT(info.fpr() == source);
+ spillAction = StoreDouble;
}
+
+#if USE(JSVALUE64)
+ if (node->hasConstant()) {
+ ASSERT(isNumberConstant(node));
+ fillAction = SetDoubleConstant;
+ } else if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
+ // it was already spilled previously and not as a double, which means we need unboxing.
+ ASSERT(info.spillFormat() & DataFormatJS);
+ fillAction = LoadJSUnboxDouble;
+ } else
+ fillAction = LoadDouble;
+#elif USE(JSVALUE32_64)
+ ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
+ if (node->hasConstant()) {
+ ASSERT(isNumberConstant(node));
+ fillAction = SetDoubleConstant;
+ } else
+ fillAction = LoadDouble;
+#endif
+
+ return SilentRegisterSavePlan(spillAction, fillAction, node, source);
+}
- nonSpeculativeNonPeepholeStrictEq(node, invert);
-
- return false;
-}
-
-#ifndef NDEBUG
-static const char* dataFormatString(DataFormat format)
+void SpeculativeJIT::silentSpill(const SilentRegisterSavePlan& plan)
{
- // These values correspond to the DataFormat enum.
- const char* strings[] = {
- "[ ]",
- "[ i]",
- "[ d]",
- "[ c]",
- "Err!",
- "Err!",
- "Err!",
- "Err!",
- "[J ]",
- "[Ji]",
- "[Jd]",
- "[Jc]",
- "Err!",
- "Err!",
- "Err!",
- "Err!",
- };
- return strings[format];
+ switch (plan.spillAction()) {
+ case DoNothingForSpill:
+ break;
+ case Store32Tag:
+ m_jit.store32(plan.gpr(), JITCompiler::tagFor(plan.node()->virtualRegister()));
+ break;
+ case Store32Payload:
+ m_jit.store32(plan.gpr(), JITCompiler::payloadFor(plan.node()->virtualRegister()));
+ break;
+ case StorePtr:
+ m_jit.storePtr(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
+ break;
+#if USE(JSVALUE64)
+ case Store64:
+ m_jit.store64(plan.gpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
+ break;
+#endif
+ case StoreDouble:
+ m_jit.storeDouble(plan.fpr(), JITCompiler::addressFor(plan.node()->virtualRegister()));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
}
-
-void SpeculativeJIT::dump(const char* label)
+
+void SpeculativeJIT::silentFill(const SilentRegisterSavePlan& plan, GPRReg canTrample)
{
- if (label)
- dataLog("<%s>\n", label);
-
- dataLog(" gprs:\n");
- m_gprs.dump();
- dataLog(" fprs:\n");
- m_fprs.dump();
- dataLog(" VirtualRegisters:\n");
- for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
- GenerationInfo& info = m_generationInfo[i];
- if (info.alive())
- dataLog(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
- else
- dataLog(" % 3d:[__][__]", i);
- if (info.registerFormat() == DataFormatDouble)
- dataLog(":fpr%d\n", info.fpr());
- else if (info.registerFormat() != DataFormatNone
#if USE(JSVALUE32_64)
- && !(info.registerFormat() & DataFormatJS)
+ UNUSED_PARAM(canTrample);
#endif
- ) {
- ASSERT(info.gpr() != InvalidGPRReg);
- dataLog(":%s\n", GPRInfo::debugName(info.gpr()));
- } else
- dataLog("\n");
+ switch (plan.fillAction()) {
+ case DoNothingForFill:
+ break;
+ case SetInt32Constant:
+ m_jit.move(Imm32(valueOfInt32Constant(plan.node())), plan.gpr());
+ break;
+ case SetBooleanConstant:
+ m_jit.move(TrustedImm32(valueOfBooleanConstant(plan.node())), plan.gpr());
+ break;
+ case SetCellConstant:
+ m_jit.move(TrustedImmPtr(valueOfJSConstant(plan.node()).asCell()), plan.gpr());
+ break;
+#if USE(JSVALUE64)
+ case SetTrustedJSConstant:
+ m_jit.move(valueOfJSConstantAsImm64(plan.node()).asTrustedImm64(), plan.gpr());
+ break;
+ case SetJSConstant:
+ m_jit.move(valueOfJSConstantAsImm64(plan.node()), plan.gpr());
+ break;
+ case SetDoubleConstant:
+ m_jit.move(Imm64(reinterpretDoubleToInt64(valueOfNumberConstant(plan.node()))), canTrample);
+ m_jit.move64ToDouble(canTrample, plan.fpr());
+ break;
+ case Load32PayloadBoxInt:
+ m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, plan.gpr());
+ break;
+ case LoadDoubleBoxDouble:
+ m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
+ m_jit.sub64(GPRInfo::tagTypeNumberRegister, plan.gpr());
+ break;
+ case LoadJSUnboxDouble:
+ m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), canTrample);
+ unboxDouble(canTrample, plan.fpr());
+ break;
+#else
+ case SetJSConstantTag:
+ m_jit.move(Imm32(valueOfJSConstant(plan.node()).tag()), plan.gpr());
+ break;
+ case SetJSConstantPayload:
+ m_jit.move(Imm32(valueOfJSConstant(plan.node()).payload()), plan.gpr());
+ break;
+ case SetInt32Tag:
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), plan.gpr());
+ break;
+ case SetCellTag:
+ m_jit.move(TrustedImm32(JSValue::CellTag), plan.gpr());
+ break;
+ case SetBooleanTag:
+ m_jit.move(TrustedImm32(JSValue::BooleanTag), plan.gpr());
+ break;
+ case SetDoubleConstant:
+ m_jit.loadDouble(addressOfDoubleConstant(plan.node()), plan.fpr());
+ break;
+#endif
+ case Load32Tag:
+ m_jit.load32(JITCompiler::tagFor(plan.node()->virtualRegister()), plan.gpr());
+ break;
+ case Load32Payload:
+ m_jit.load32(JITCompiler::payloadFor(plan.node()->virtualRegister()), plan.gpr());
+ break;
+ case LoadPtr:
+ m_jit.loadPtr(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
+ break;
+#if USE(JSVALUE64)
+ case Load64:
+ m_jit.load64(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.gpr());
+ break;
+#endif
+ case LoadDouble:
+ m_jit.loadDouble(JITCompiler::addressFor(plan.node()->virtualRegister()), plan.fpr());
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
}
- if (label)
- dataLog("</%s>\n", label);
}
-#endif
-
-
-#if DFG_ENABLE(CONSISTENCY_CHECK)
-void SpeculativeJIT::checkConsistency()
+
+const TypedArrayDescriptor* SpeculativeJIT::typedArrayDescriptor(ArrayMode arrayMode)
{
- bool failed = false;
+ switch (arrayMode.type()) {
+ case Array::Int8Array:
+ return &m_jit.vm()->int8ArrayDescriptor();
+ case Array::Int16Array:
+ return &m_jit.vm()->int16ArrayDescriptor();
+ case Array::Int32Array:
+ return &m_jit.vm()->int32ArrayDescriptor();
+ case Array::Uint8Array:
+ return &m_jit.vm()->uint8ArrayDescriptor();
+ case Array::Uint8ClampedArray:
+ return &m_jit.vm()->uint8ClampedArrayDescriptor();
+ case Array::Uint16Array:
+ return &m_jit.vm()->uint16ArrayDescriptor();
+ case Array::Uint32Array:
+ return &m_jit.vm()->uint32ArrayDescriptor();
+ case Array::Float32Array:
+ return &m_jit.vm()->float32ArrayDescriptor();
+ case Array::Float64Array:
+ return &m_jit.vm()->float64ArrayDescriptor();
+ default:
+ return 0;
+ }
+}
- for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- if (iter.isLocked()) {
- dataLog("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
- failed = true;
- }
+JITCompiler::Jump SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode, IndexingType shape)
+{
+ switch (arrayMode.arrayClass()) {
+ case Array::OriginalArray: {
+ CRASH();
+ JITCompiler::Jump result; // I already know that VC++ takes unkindly to the expression "return Jump()", so I'm doing it this way in anticipation of someone eventually using VC++ to compile the DFG.
+ return result;
}
- for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- if (iter.isLocked()) {
- dataLog("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
- failed = true;
- }
+
+ case Array::Array:
+ m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
+ return m_jit.branch32(
+ MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | shape));
+
+ default:
+ m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
+ return m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(shape));
}
+}
- for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
- VirtualRegister virtualRegister = (VirtualRegister)i;
- GenerationInfo& info = m_generationInfo[virtualRegister];
- if (!info.alive())
- continue;
- switch (info.registerFormat()) {
- case DataFormatNone:
- break;
- case DataFormatJS:
- case DataFormatJSInteger:
- case DataFormatJSDouble:
- case DataFormatJSCell:
- case DataFormatJSBoolean:
-#if USE(JSVALUE32_64)
- break;
-#endif
- case DataFormatInteger:
- case DataFormatCell:
- case DataFormatBoolean:
- case DataFormatStorage: {
- GPRReg gpr = info.gpr();
- ASSERT(gpr != InvalidGPRReg);
- if (m_gprs.name(gpr) != virtualRegister) {
- dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
- failed = true;
- }
- break;
- }
- case DataFormatDouble: {
- FPRReg fpr = info.fpr();
- ASSERT(fpr != InvalidFPRReg);
- if (m_fprs.name(fpr) != virtualRegister) {
- dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
- failed = true;
- }
- break;
- }
- }
- }
+JITCompiler::JumpList SpeculativeJIT::jumpSlowForUnwantedArrayMode(GPRReg tempGPR, ArrayMode arrayMode)
+{
+ JITCompiler::JumpList result;
+
+ switch (arrayMode.type()) {
+ case Array::Int32:
+ return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, Int32Shape);
- for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- VirtualRegister virtualRegister = iter.name();
- if (virtualRegister == InvalidVirtualRegister)
- continue;
+ case Array::Double:
+ return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, DoubleShape);
- GenerationInfo& info = m_generationInfo[virtualRegister];
-#if USE(JSVALUE64)
- if (iter.regID() != info.gpr()) {
- dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
- failed = true;
- }
-#else
- if (!(info.registerFormat() & DataFormatJS)) {
- if (iter.regID() != info.gpr()) {
- dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
- failed = true;
- }
- } else {
- if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
- dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
- failed = true;
+ case Array::Contiguous:
+ return jumpSlowForUnwantedArrayMode(tempGPR, arrayMode, ContiguousShape);
+
+ case Array::ArrayStorage:
+ case Array::SlowPutArrayStorage: {
+ ASSERT(!arrayMode.isJSArrayWithOriginalStructure());
+
+ if (arrayMode.isJSArray()) {
+ if (arrayMode.isSlowPut()) {
+ result.append(
+ m_jit.branchTest32(
+ MacroAssembler::Zero, tempGPR, MacroAssembler::TrustedImm32(IsArray)));
+ m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
+ m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
+ result.append(
+ m_jit.branch32(
+ MacroAssembler::Above, tempGPR,
+ TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
+ break;
}
+ m_jit.and32(TrustedImm32(IsArray | IndexingShapeMask), tempGPR);
+ result.append(
+ m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(IsArray | ArrayStorageShape)));
+ break;
}
-#endif
- }
-
- for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- VirtualRegister virtualRegister = iter.name();
- if (virtualRegister == InvalidVirtualRegister)
- continue;
-
- GenerationInfo& info = m_generationInfo[virtualRegister];
- if (iter.regID() != info.fpr()) {
- dataLog("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
- failed = true;
+ m_jit.and32(TrustedImm32(IndexingShapeMask), tempGPR);
+ if (arrayMode.isSlowPut()) {
+ m_jit.sub32(TrustedImm32(ArrayStorageShape), tempGPR);
+ result.append(
+ m_jit.branch32(
+ MacroAssembler::Above, tempGPR,
+ TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)));
+ break;
}
+ result.append(
+ m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(ArrayStorageShape)));
+ break;
}
-
- if (failed) {
- dump();
+ default:
CRASH();
+ break;
}
-}
-#endif
-
-GPRTemporary::GPRTemporary()
- : m_jit(0)
- , m_gpr(InvalidGPRReg)
-{
+
+ return result;
}
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
+void SpeculativeJIT::checkArray(Node* node)
{
- m_gpr = m_jit->allocate();
+ ASSERT(node->arrayMode().isSpecific());
+ ASSERT(!node->arrayMode().doesConversion());
+
+ SpeculateCellOperand base(this, node->child1());
+ GPRReg baseReg = base.gpr();
+
+ const TypedArrayDescriptor* result = typedArrayDescriptor(node->arrayMode());
+
+ if (node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1()))) {
+ noResult(m_currentNode);
+ return;
+ }
+
+ const ClassInfo* expectedClassInfo = 0;
+
+ switch (node->arrayMode().type()) {
+ case Array::String:
+ expectedClassInfo = &JSString::s_info;
+ break;
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous:
+ case Array::ArrayStorage:
+ case Array::SlowPutArrayStorage: {
+ GPRTemporary temp(this);
+ GPRReg tempGPR = temp.gpr();
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSCell::structureOffset()), tempGPR);
+ m_jit.load8(MacroAssembler::Address(tempGPR, Structure::indexingTypeOffset()), tempGPR);
+ speculationCheck(
+ BadIndexingType, JSValueSource::unboxedCell(baseReg), 0,
+ jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
+
+ noResult(m_currentNode);
+ return;
+ }
+ case Array::Arguments:
+ expectedClassInfo = &Arguments::s_info;
+ break;
+ case Array::Int8Array:
+ case Array::Int16Array:
+ case Array::Int32Array:
+ case Array::Uint8Array:
+ case Array::Uint8ClampedArray:
+ case Array::Uint16Array:
+ case Array::Uint32Array:
+ case Array::Float32Array:
+ case Array::Float64Array:
+ expectedClassInfo = result->m_classInfo;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+
+ GPRTemporary temp(this);
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSCell::structureOffset()), temp.gpr());
+ speculationCheck(
+ Uncountable, JSValueRegs(), 0,
+ m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(temp.gpr(), Structure::classInfoOffset()),
+ MacroAssembler::TrustedImmPtr(expectedClassInfo)));
+
+ noResult(m_currentNode);
}
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
+void SpeculativeJIT::arrayify(Node* node, GPRReg baseReg, GPRReg propertyReg)
{
- m_gpr = m_jit->allocate(specific);
+ ASSERT(node->arrayMode().doesConversion());
+
+ GPRTemporary temp(this);
+ GPRTemporary structure;
+ GPRReg tempGPR = temp.gpr();
+ GPRReg structureGPR = InvalidGPRReg;
+
+ if (node->op() != ArrayifyToStructure) {
+ GPRTemporary realStructure(this);
+ structure.adopt(realStructure);
+ structureGPR = structure.gpr();
+ }
+
+ // We can skip all that comes next if we already have array storage.
+ MacroAssembler::JumpList slowPath;
+
+ if (node->op() == ArrayifyToStructure) {
+ slowPath.append(m_jit.branchWeakPtr(
+ JITCompiler::NotEqual,
+ JITCompiler::Address(baseReg, JSCell::structureOffset()),
+ node->structure()));
+ } else {
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, JSCell::structureOffset()), structureGPR);
+
+ m_jit.load8(
+ MacroAssembler::Address(structureGPR, Structure::indexingTypeOffset()), tempGPR);
+
+ slowPath.append(jumpSlowForUnwantedArrayMode(tempGPR, node->arrayMode()));
+ }
+
+ addSlowPathGenerator(adoptPtr(new ArrayifySlowPathGenerator(
+ slowPath, this, node, baseReg, propertyReg, tempGPR, structureGPR)));
+
+ noResult(m_currentNode);
}
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
+void SpeculativeJIT::arrayify(Node* node)
{
- if (m_jit->canReuse(op1.index()))
- m_gpr = m_jit->reuse(op1.gpr());
- else
- m_gpr = m_jit->allocate();
+ ASSERT(node->arrayMode().isSpecific());
+
+ SpeculateCellOperand base(this, node->child1());
+
+ if (!node->child2()) {
+ arrayify(node, base.gpr(), InvalidGPRReg);
+ return;
+ }
+
+ SpeculateIntegerOperand property(this, node->child2());
+
+ arrayify(node, base.gpr(), property.gpr());
}
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1, SpeculateIntegerOperand& op2)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
+GPRReg SpeculativeJIT::fillStorage(Edge edge)
{
- if (m_jit->canReuse(op1.index()))
- m_gpr = m_jit->reuse(op1.gpr());
- else if (m_jit->canReuse(op2.index()))
- m_gpr = m_jit->reuse(op2.gpr());
- else
- m_gpr = m_jit->allocate();
-}
-
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateStrictInt32Operand& op1)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
-{
- if (m_jit->canReuse(op1.index()))
- m_gpr = m_jit->reuse(op1.gpr());
- else
- m_gpr = m_jit->allocate();
-}
-
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
-{
- if (m_jit->canReuse(op1.index()))
- m_gpr = m_jit->reuse(op1.gpr());
- else
- m_gpr = m_jit->allocate();
+ VirtualRegister virtualRegister = edge->virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+
+ switch (info.registerFormat()) {
+ case DataFormatNone: {
+ if (info.spillFormat() == DataFormatStorage) {
+ GPRReg gpr = allocate();
+ m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
+ m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr);
+ info.fillStorage(*m_stream, gpr);
+ return gpr;
+ }
+
+ // Must be a cell; fill it as a cell and then return the pointer.
+ return fillSpeculateCell(edge);
+ }
+
+ case DataFormatStorage: {
+ GPRReg gpr = info.gpr();
+ m_gprs.lock(gpr);
+ return gpr;
+ }
+
+ default:
+ return fillSpeculateCell(edge);
+ }
}
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1, IntegerOperand& op2)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
+void SpeculativeJIT::useChildren(Node* node)
{
- if (m_jit->canReuse(op1.index()))
- m_gpr = m_jit->reuse(op1.gpr());
- else if (m_jit->canReuse(op2.index()))
- m_gpr = m_jit->reuse(op2.gpr());
- else
- m_gpr = m_jit->allocate();
+ if (node->flags() & NodeHasVarArgs) {
+ for (unsigned childIdx = node->firstChild(); childIdx < node->firstChild() + node->numChildren(); childIdx++) {
+ if (!!m_jit.graph().m_varArgChildren[childIdx])
+ use(m_jit.graph().m_varArgChildren[childIdx]);
+ }
+ } else {
+ Edge child1 = node->child1();
+ if (!child1) {
+ ASSERT(!node->child2() && !node->child3());
+ return;
+ }
+ use(child1);
+
+ Edge child2 = node->child2();
+ if (!child2) {
+ ASSERT(!node->child3());
+ return;
+ }
+ use(child2);
+
+ Edge child3 = node->child3();
+ if (!child3)
+ return;
+ use(child3);
+ }
}
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateCellOperand& op1)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
+void SpeculativeJIT::writeBarrier(MacroAssembler& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
{
- if (m_jit->canReuse(op1.index()))
- m_gpr = m_jit->reuse(op1.gpr());
- else
- m_gpr = m_jit->allocate();
-}
+ UNUSED_PARAM(jit);
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+ ASSERT(owner != scratch1);
+ ASSERT(owner != scratch2);
+ ASSERT(scratch1 != scratch2);
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateBooleanOperand& op1)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
-{
- if (m_jit->canReuse(op1.index()))
- m_gpr = m_jit->reuse(op1.gpr());
- else
- m_gpr = m_jit->allocate();
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
}
-#if USE(JSVALUE64)
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
-{
- if (m_jit->canReuse(op1.index()))
- m_gpr = m_jit->reuse(op1.gpr());
- else
- m_gpr = m_jit->allocate();
-}
-#else
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1, bool tag)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
+void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
{
- if (!op1.isDouble() && m_jit->canReuse(op1.index()))
- m_gpr = m_jit->reuse(tag ? op1.tagGPR() : op1.payloadGPR());
- else
- m_gpr = m_jit->allocate();
-}
-#endif
+ UNUSED_PARAM(ownerGPR);
+ UNUSED_PARAM(valueGPR);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
-GPRTemporary::GPRTemporary(SpeculativeJIT* jit, StorageOperand& op1)
- : m_jit(jit)
- , m_gpr(InvalidGPRReg)
-{
- if (m_jit->canReuse(op1.index()))
- m_gpr = m_jit->reuse(op1.gpr());
- else
- m_gpr = m_jit->allocate();
-}
+ if (isKnownNotCell(valueUse.node()))
+ return;
-void GPRTemporary::adopt(GPRTemporary& other)
-{
- ASSERT(!m_jit);
- ASSERT(m_gpr == InvalidGPRReg);
- ASSERT(other.m_jit);
- ASSERT(other.m_gpr != InvalidGPRReg);
- m_jit = other.m_jit;
- m_gpr = other.m_gpr;
- other.m_jit = 0;
- other.m_gpr = InvalidGPRReg;
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
}
-FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
- : m_jit(jit)
- , m_fpr(InvalidFPRReg)
+void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind useKind, GPRReg scratch1, GPRReg scratch2)
{
- m_fpr = m_jit->fprAllocate();
-}
+ UNUSED_PARAM(ownerGPR);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+
+ if (Heap::isMarked(value))
+ return;
-FPRTemporary::FPRTemporary(SpeculativeJIT* jit, DoubleOperand& op1)
- : m_jit(jit)
- , m_fpr(InvalidFPRReg)
-{
- if (m_jit->canReuse(op1.index()))
- m_fpr = m_jit->reuse(op1.fpr());
- else
- m_fpr = m_jit->fprAllocate();
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
}
-FPRTemporary::FPRTemporary(SpeculativeJIT* jit, DoubleOperand& op1, DoubleOperand& op2)
- : m_jit(jit)
- , m_fpr(InvalidFPRReg)
+void SpeculativeJIT::writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind useKind, GPRReg scratch)
{
- if (m_jit->canReuse(op1.index()))
- m_fpr = m_jit->reuse(op1.fpr());
- else if (m_jit->canReuse(op2.index()))
- m_fpr = m_jit->reuse(op2.fpr());
- else
- m_fpr = m_jit->fprAllocate();
-}
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(valueGPR);
+ UNUSED_PARAM(scratch);
+ UNUSED_PARAM(useKind);
-FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
- : m_jit(jit)
- , m_fpr(InvalidFPRReg)
-{
- if (m_jit->canReuse(op1.index()))
- m_fpr = m_jit->reuse(op1.fpr());
- else
- m_fpr = m_jit->fprAllocate();
-}
+ if (isKnownNotCell(valueUse.node()))
+ return;
-FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
- : m_jit(jit)
- , m_fpr(InvalidFPRReg)
-{
- if (m_jit->canReuse(op1.index()))
- m_fpr = m_jit->reuse(op1.fpr());
- else if (m_jit->canReuse(op2.index()))
- m_fpr = m_jit->reuse(op2.fpr());
- else
- m_fpr = m_jit->fprAllocate();
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ JITCompiler::emitCount(m_jit, WriteBarrierCounters::jitCounterFor(useKind));
+#endif
}
-#if USE(JSVALUE32_64)
-FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
- : m_jit(jit)
- , m_fpr(InvalidFPRReg)
+bool SpeculativeJIT::nonSpeculativeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
{
- if (op1.isDouble() && m_jit->canReuse(op1.index()))
- m_fpr = m_jit->reuse(op1.fpr());
- else
- m_fpr = m_jit->fprAllocate();
-}
-#endif
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
-void ValueSource::dump(FILE* out) const
-{
- switch (kind()) {
- case SourceNotSet:
- fprintf(out, "NotSet");
- break;
- case SourceIsDead:
- fprintf(out, "IsDead");
- break;
- case ValueInRegisterFile:
- fprintf(out, "InRegFile");
- break;
- case Int32InRegisterFile:
- fprintf(out, "Int32");
- break;
- case CellInRegisterFile:
- fprintf(out, "Cell");
- break;
- case BooleanInRegisterFile:
- fprintf(out, "Bool");
- break;
- case DoubleInRegisterFile:
- fprintf(out, "Double");
- break;
- case HaveNode:
- fprintf(out, "Node(%d)", m_nodeIndex);
- break;
+ ASSERT(node->adjustedRefCount() == 1);
+
+ nonSpeculativePeepholeBranch(node, branchNode, cond, helperFunction);
+
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+
+ return true;
}
-}
-
-void SpeculativeJIT::compilePeepHoleDoubleBranch(Node& node, NodeIndex branchNodeIndex, JITCompiler::DoubleCondition condition)
-{
- Node& branchNode = at(branchNodeIndex);
- BlockIndex taken = branchNode.takenBlockIndex();
- BlockIndex notTaken = branchNode.notTakenBlockIndex();
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
+ nonSpeculativeNonPeepholeCompare(node, cond, helperFunction);
- branchDouble(condition, op1.fpr(), op2.fpr(), taken);
- jump(notTaken);
+ return false;
}
-void SpeculativeJIT::compilePeepHoleObjectEquality(Node& node, NodeIndex branchNodeIndex, const ClassInfo* classInfo, PredictionChecker predictionCheck)
+bool SpeculativeJIT::nonSpeculativeStrictEq(Node* node, bool invert)
{
- Node& branchNode = at(branchNodeIndex);
- BlockIndex taken = branchNode.takenBlockIndex();
- BlockIndex notTaken = branchNode.notTakenBlockIndex();
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
- MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
+ ASSERT(node->adjustedRefCount() == 1);
+
+ nonSpeculativePeepholeStrictEq(node, branchNode, invert);
- if (taken == (m_block + 1)) {
- condition = MacroAssembler::NotEqual;
- BlockIndex tmp = taken;
- taken = notTaken;
- notTaken = tmp;
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+
+ return true;
}
-
- SpeculateCellOperand op1(this, node.child1());
- SpeculateCellOperand op2(this, node.child2());
-
- GPRReg op1GPR = op1.gpr();
- GPRReg op2GPR = op2.gpr();
- if (!predictionCheck(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node.child1().index(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
- if (!predictionCheck(m_state.forNode(node.child2()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node.child2().index(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+ nonSpeculativeNonPeepholeStrictEq(node, invert);
- branchPtr(condition, op1GPR, op2GPR, taken);
- jump(notTaken);
+ return false;
}
-void SpeculativeJIT::compilePeepHoleIntegerBranch(Node& node, NodeIndex branchNodeIndex, JITCompiler::RelationalCondition condition)
+#ifndef NDEBUG
+static const char* dataFormatString(DataFormat format)
{
- Node& branchNode = at(branchNodeIndex);
- BlockIndex taken = branchNode.takenBlockIndex();
- BlockIndex notTaken = branchNode.notTakenBlockIndex();
-
- // The branch instruction will branch to the taken block.
- // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == (m_block + 1)) {
- condition = JITCompiler::invert(condition);
- BlockIndex tmp = taken;
- taken = notTaken;
- notTaken = tmp;
- }
-
- if (isInt32Constant(node.child1().index())) {
- int32_t imm = valueOfInt32Constant(node.child1().index());
- SpeculateIntegerOperand op2(this, node.child2());
- branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
- } else if (isInt32Constant(node.child2().index())) {
- SpeculateIntegerOperand op1(this, node.child1());
- int32_t imm = valueOfInt32Constant(node.child2().index());
- branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
- } else {
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- branch32(condition, op1.gpr(), op2.gpr(), taken);
- }
-
- jump(notTaken);
-}
+ // These values correspond to the DataFormat enum.
+ const char* strings[] = {
+ "[ ]",
+ "[ i]",
+ "[ d]",
+ "[ c]",
+ "Err!",
+ "Err!",
+ "Err!",
+ "Err!",
+ "[J ]",
+ "[Ji]",
+ "[Jd]",
+ "[Jc]",
+ "Err!",
+ "Err!",
+ "Err!",
+ "Err!",
+ };
+ return strings[format];
+}
+
+void SpeculativeJIT::dump(const char* label)
+{
+ if (label)
+ dataLogF("<%s>\n", label);
+
+ dataLogF(" gprs:\n");
+ m_gprs.dump();
+ dataLogF(" fprs:\n");
+ m_fprs.dump();
+ dataLogF(" VirtualRegisters:\n");
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
+ GenerationInfo& info = m_generationInfo[i];
+ if (info.alive())
+ dataLogF(" % 3d:%s%s", i, dataFormatString(info.registerFormat()), dataFormatString(info.spillFormat()));
+ else
+ dataLogF(" % 3d:[__][__]", i);
+ if (info.registerFormat() == DataFormatDouble)
+ dataLogF(":fpr%d\n", info.fpr());
+ else if (info.registerFormat() != DataFormatNone
+#if USE(JSVALUE32_64)
+ && !(info.registerFormat() & DataFormatJS)
+#endif
+ ) {
+ ASSERT(info.gpr() != InvalidGPRReg);
+ dataLogF(":%s\n", GPRInfo::debugName(info.gpr()));
+ } else
+ dataLogF("\n");
+ }
+ if (label)
+ dataLogF("</%s>\n", label);
+}
+#endif
+
+
+#if DFG_ENABLE(CONSISTENCY_CHECK)
+void SpeculativeJIT::checkConsistency()
+{
+ bool failed = false;
+
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ if (iter.isLocked()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: gpr %s is locked.\n", iter.debugName());
+ failed = true;
+ }
+ }
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ if (iter.isLocked()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: fpr %s is locked.\n", iter.debugName());
+ failed = true;
+ }
+ }
+
+ for (unsigned i = 0; i < m_generationInfo.size(); ++i) {
+ VirtualRegister virtualRegister = (VirtualRegister)i;
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (!info.alive())
+ continue;
+ switch (info.registerFormat()) {
+ case DataFormatNone:
+ break;
+ case DataFormatJS:
+ case DataFormatJSInteger:
+ case DataFormatJSDouble:
+ case DataFormatJSCell:
+ case DataFormatJSBoolean:
+#if USE(JSVALUE32_64)
+ break;
+#endif
+ case DataFormatInteger:
+ case DataFormatCell:
+ case DataFormatBoolean:
+ case DataFormatStorage: {
+ GPRReg gpr = info.gpr();
+ ASSERT(gpr != InvalidGPRReg);
+ if (m_gprs.name(gpr) != virtualRegister) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (gpr %s).\n", virtualRegister, GPRInfo::debugName(gpr));
+ failed = true;
+ }
+ break;
+ }
+ case DataFormatDouble: {
+ FPRReg fpr = info.fpr();
+ ASSERT(fpr != InvalidFPRReg);
+ if (m_fprs.name(fpr) != virtualRegister) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for virtual register %d (fpr %s).\n", virtualRegister, FPRInfo::debugName(fpr));
+ failed = true;
+ }
+ break;
+ }
+ case DataFormatOSRMarker:
+ case DataFormatDead:
+ case DataFormatArguments:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ VirtualRegister virtualRegister = iter.name();
+ if (virtualRegister == InvalidVirtualRegister)
+ continue;
+
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+#if USE(JSVALUE64)
+ if (iter.regID() != info.gpr()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+#else
+ if (!(info.registerFormat() & DataFormatJS)) {
+ if (iter.regID() != info.gpr()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ } else {
+ if (iter.regID() != info.tagGPR() && iter.regID() != info.payloadGPR()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for gpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ }
+#endif
+ }
+
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ VirtualRegister virtualRegister = iter.name();
+ if (virtualRegister == InvalidVirtualRegister)
+ continue;
+
+ GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (iter.regID() != info.fpr()) {
+ dataLogF("DFG_CONSISTENCY_CHECK failed: name mismatch for fpr %s (virtual register %d).\n", iter.debugName(), virtualRegister);
+ failed = true;
+ }
+ }
+
+ if (failed) {
+ dump();
+ CRASH();
+ }
+}
+#endif
+
+GPRTemporary::GPRTemporary()
+ : m_jit(0)
+ , m_gpr(InvalidGPRReg)
+{
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, GPRReg specific)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ m_gpr = m_jit->allocate(specific);
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateIntegerOperand& op1, SpeculateIntegerOperand& op2)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else if (m_jit->canReuse(op2.node()))
+ m_gpr = m_jit->reuse(op2.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateStrictInt32Operand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, IntegerOperand& op1, IntegerOperand& op2)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else if (m_jit->canReuse(op2.node()))
+ m_gpr = m_jit->reuse(op2.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateCellOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, SpeculateBooleanOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+#if USE(JSVALUE64)
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+#else
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1, bool tag)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (!op1.isDouble() && m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(tag ? op1.tagGPR() : op1.payloadGPR());
+ else
+ m_gpr = m_jit->allocate();
+}
+#endif
+
+GPRTemporary::GPRTemporary(SpeculativeJIT* jit, StorageOperand& op1)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else
+ m_gpr = m_jit->allocate();
+}
+
+void GPRTemporary::adopt(GPRTemporary& other)
+{
+ ASSERT(!m_jit);
+ ASSERT(m_gpr == InvalidGPRReg);
+ ASSERT(other.m_jit);
+ ASSERT(other.m_gpr != InvalidGPRReg);
+ m_jit = other.m_jit;
+ m_gpr = other.m_gpr;
+ other.m_jit = 0;
+ other.m_gpr = InvalidGPRReg;
+}
+
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit, SpeculateDoubleOperand& op1, SpeculateDoubleOperand& op2)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (m_jit->canReuse(op1.node()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else if (m_jit->canReuse(op2.node()))
+ m_fpr = m_jit->reuse(op2.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+
+#if USE(JSVALUE32_64)
+FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1)
+ : m_jit(jit)
+ , m_fpr(InvalidFPRReg)
+{
+ if (op1.isDouble() && m_jit->canReuse(op1.node()))
+ m_fpr = m_jit->reuse(op1.fpr());
+ else
+ m_fpr = m_jit->fprAllocate();
+}
+#endif
+
+void SpeculativeJIT::compilePeepHoleDoubleBranch(Node* node, Node* branchNode, JITCompiler::DoubleCondition condition)
+{
+ BlockIndex taken = branchNode->takenBlockIndex();
+ BlockIndex notTaken = branchNode->notTakenBlockIndex();
+
+ SpeculateDoubleOperand op1(this, node->child1());
+ SpeculateDoubleOperand op2(this, node->child2());
+
+ branchDouble(condition, op1.fpr(), op2.fpr(), taken);
+ jump(notTaken);
+}
+
+void SpeculativeJIT::compilePeepHoleObjectEquality(Node* node, Node* branchNode)
+{
+ BlockIndex taken = branchNode->takenBlockIndex();
+ BlockIndex notTaken = branchNode->notTakenBlockIndex();
+
+ MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
+
+ if (taken == nextBlock()) {
+ condition = MacroAssembler::NotEqual;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ SpeculateCellOperand op1(this, node->child1());
+ SpeculateCellOperand op2(this, node->child2());
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+
+ if (m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) {
+ m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint());
+ if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op1GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ }
+ if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(op2GPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ }
+ } else {
+ GPRTemporary structure(this);
+ GPRReg structureGPR = structure.gpr();
+
+ m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR);
+ if (m_state.forNode(node->child1()).m_type & ~SpecObject) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ }
+ speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
+
+ m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR);
+ if (m_state.forNode(node->child2()).m_type & ~SpecObject) {
+ speculationCheck(
+ BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ structureGPR,
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ }
+ speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
+ }
+
+ branchPtr(condition, op1GPR, op2GPR, taken);
+ jump(notTaken);
+}
+
+void SpeculativeJIT::compilePeepHoleBooleanBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
+{
+ BlockIndex taken = branchNode->takenBlockIndex();
+ BlockIndex notTaken = branchNode->notTakenBlockIndex();
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == nextBlock()) {
+ condition = JITCompiler::invert(condition);
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ if (isBooleanConstant(node->child1().node())) {
+ bool imm = valueOfBooleanConstant(node->child1().node());
+ SpeculateBooleanOperand op2(this, node->child2());
+ branch32(condition, JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), op2.gpr(), taken);
+ } else if (isBooleanConstant(node->child2().node())) {
+ SpeculateBooleanOperand op1(this, node->child1());
+ bool imm = valueOfBooleanConstant(node->child2().node());
+ branch32(condition, op1.gpr(), JITCompiler::Imm32(static_cast<int32_t>(JSValue::encode(jsBoolean(imm)))), taken);
+ } else {
+ SpeculateBooleanOperand op1(this, node->child1());
+ SpeculateBooleanOperand op2(this, node->child2());
+ branch32(condition, op1.gpr(), op2.gpr(), taken);
+ }
+
+ jump(notTaken);
+}
+
+void SpeculativeJIT::compilePeepHoleIntegerBranch(Node* node, Node* branchNode, JITCompiler::RelationalCondition condition)
+{
+ BlockIndex taken = branchNode->takenBlockIndex();
+ BlockIndex notTaken = branchNode->notTakenBlockIndex();
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == nextBlock()) {
+ condition = JITCompiler::invert(condition);
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
+
+ if (isInt32Constant(node->child1().node())) {
+ int32_t imm = valueOfInt32Constant(node->child1().node());
+ SpeculateIntegerOperand op2(this, node->child2());
+ branch32(condition, JITCompiler::Imm32(imm), op2.gpr(), taken);
+ } else if (isInt32Constant(node->child2().node())) {
+ SpeculateIntegerOperand op1(this, node->child1());
+ int32_t imm = valueOfInt32Constant(node->child2().node());
+ branch32(condition, op1.gpr(), JITCompiler::Imm32(imm), taken);
+ } else {
+ SpeculateIntegerOperand op1(this, node->child1());
+ SpeculateIntegerOperand op2(this, node->child2());
+ branch32(condition, op1.gpr(), op2.gpr(), taken);
+ }
+
+ jump(notTaken);
+}
// Returns true if the compare is fused with a subsequent branch.
-bool SpeculativeJIT::compilePeepHoleBranch(Node& node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
+bool SpeculativeJIT::compilePeepHoleBranch(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
{
// Fused compare & branch.
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
- NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
// detectPeepHoleBranch currently only permits the branch to be the very next node,
// so can be no intervening nodes to also reference the compare.
- ASSERT(node.adjustedRefCount() == 1);
-
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())))
- compilePeepHoleIntegerBranch(node, branchNodeIndex, condition);
- else if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2())))
- compilePeepHoleDoubleBranch(node, branchNodeIndex, doubleCondition);
- else if (node.op() == CompareEq) {
- if (Node::shouldSpeculateFinalObject(
- at(node.child1()), at(node.child2()))) {
- compilePeepHoleObjectEquality(
- node, branchNodeIndex, &JSFinalObject::s_info,
- isFinalObjectPrediction);
- } else if (Node::shouldSpeculateArray(
- at(node.child1()), at(node.child2()))) {
- compilePeepHoleObjectEquality(
- node, branchNodeIndex, &JSArray::s_info,
- isArrayPrediction);
- } else if (at(node.child1()).shouldSpeculateFinalObject()
- && at(node.child2()).shouldSpeculateFinalObjectOrOther()) {
- compilePeepHoleObjectToObjectOrOtherEquality(
- node.child1(), node.child2(), branchNodeIndex,
- &JSFinalObject::s_info, isFinalObjectPrediction);
- } else if (at(node.child1()).shouldSpeculateFinalObjectOrOther()
- && at(node.child2()).shouldSpeculateFinalObject()) {
- compilePeepHoleObjectToObjectOrOtherEquality(
- node.child2(), node.child1(), branchNodeIndex,
- &JSFinalObject::s_info, isFinalObjectPrediction);
- } else if (at(node.child1()).shouldSpeculateArray()
- && at(node.child2()).shouldSpeculateArrayOrOther()) {
- compilePeepHoleObjectToObjectOrOtherEquality(
- node.child1(), node.child2(), branchNodeIndex,
- &JSArray::s_info, isArrayPrediction);
- } else if (at(node.child1()).shouldSpeculateArrayOrOther()
- && at(node.child2()).shouldSpeculateArray()) {
- compilePeepHoleObjectToObjectOrOtherEquality(
- node.child2(), node.child1(), branchNodeIndex,
- &JSArray::s_info, isArrayPrediction);
- } else {
- nonSpeculativePeepholeBranch(node, branchNodeIndex, condition, operation);
+ ASSERT(node->adjustedRefCount() == 1);
+
+ if (node->isBinaryUseKind(Int32Use))
+ compilePeepHoleIntegerBranch(node, branchNode, condition);
+ else if (node->isBinaryUseKind(NumberUse))
+ compilePeepHoleDoubleBranch(node, branchNode, doubleCondition);
+ else if (node->op() == CompareEq) {
+ if (node->isBinaryUseKind(StringUse)) {
+ // Use non-peephole comparison, for now.
+ return false;
+ }
+ if (node->isBinaryUseKind(BooleanUse))
+ compilePeepHoleBooleanBranch(node, branchNode, condition);
+ else if (node->isBinaryUseKind(ObjectUse))
+ compilePeepHoleObjectEquality(node, branchNode);
+ else if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse)
+ compilePeepHoleObjectToObjectOrOtherEquality(node->child1(), node->child2(), branchNode);
+ else if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse)
+ compilePeepHoleObjectToObjectOrOtherEquality(node->child2(), node->child1(), branchNode);
+ else {
+ nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
return true;
}
} else {
- nonSpeculativePeepholeBranch(node, branchNodeIndex, condition, operation);
+ nonSpeculativePeepholeBranch(node, branchNode, condition, operation);
return true;
}
- use(node.child1());
- use(node.child2());
+ use(node->child1());
+ use(node->child2());
m_indexInBlock = branchIndexInBlock;
- m_compileIndex = branchNodeIndex;
+ m_currentNode = branchNode;
return true;
}
return false;
}
-void SpeculativeJIT::compileMovHint(Node& node)
+void SpeculativeJIT::noticeOSRBirth(Node* node)
{
- ASSERT(node.op() == SetLocal);
+ if (!node->hasVirtualRegister())
+ return;
+
+ VirtualRegister virtualRegister = node->virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
- setNodeIndexForOperand(node.child1().index(), node.local());
- m_lastSetOperand = node.local();
+ info.noticeOSRBirth(*m_stream, node, virtualRegister);
+}
+
+void SpeculativeJIT::compileMovHint(Node* node)
+{
+ ASSERT(node->containsMovHint() && node->op() != ZombieHint);
+
+ m_lastSetOperand = node->local();
+
+ Node* child = node->child1().node();
+ noticeOSRBirth(child);
+
+ if (child->op() == UInt32ToNumber)
+ noticeOSRBirth(child->child1().node());
+
+ m_stream->appendAndLog(VariableEvent::movHint(MinifiedID(child), node->local()));
+}
+
+void SpeculativeJIT::compileMovHintAndCheck(Node* node)
+{
+ compileMovHint(node);
+ speculate(node, node->child1());
+ noResult(node);
+}
+
+void SpeculativeJIT::compileInlineStart(Node* node)
+{
+ InlineCallFrame* inlineCallFrame = node->codeOrigin.inlineCallFrame;
+ int argumentCountIncludingThis = inlineCallFrame->arguments.size();
+ unsigned argumentPositionStart = node->argumentPositionStart();
+ CodeBlock* codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+ for (int i = 0; i < argumentCountIncludingThis; ++i) {
+ ValueRecovery recovery;
+ if (codeBlock->isCaptured(argumentToOperand(i)))
+ recovery = ValueRecovery::alreadyInJSStack();
+ else {
+ ArgumentPosition& argumentPosition =
+ m_jit.graph().m_argumentPositions[argumentPositionStart + i];
+ ValueSource valueSource;
+ if (!argumentPosition.shouldUnboxIfPossible())
+ valueSource = ValueSource(ValueInJSStack);
+ else if (argumentPosition.shouldUseDoubleFormat())
+ valueSource = ValueSource(DoubleInJSStack);
+ else if (isInt32Speculation(argumentPosition.prediction()))
+ valueSource = ValueSource(Int32InJSStack);
+ else if (isCellSpeculation(argumentPosition.prediction()))
+ valueSource = ValueSource(CellInJSStack);
+ else if (isBooleanSpeculation(argumentPosition.prediction()))
+ valueSource = ValueSource(BooleanInJSStack);
+ else
+ valueSource = ValueSource(ValueInJSStack);
+ recovery = computeValueRecoveryFor(valueSource);
+ }
+ // The recovery should refer either to something that has already been
+ // stored into the stack at the right place, or to a constant,
+ // since the Arguments code isn't smart enough to handle anything else.
+ // The exception is the this argument, which we don't really need to be
+ // able to recover.
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("\nRecovery for argument %d: ", i);
+ recovery.dump(WTF::dataFile());
+#endif
+ inlineCallFrame->arguments[i] = recovery;
+ }
}
void SpeculativeJIT::compile(BasicBlock& block)
if (!block.isReachable)
return;
+
+ if (!block.cfaHasVisited) {
+ // Don't generate code for basic blocks that are unreachable according to CFA.
+ // But to be sure that nobody has generated a jump to this block, drop in a
+ // breakpoint here.
+#if !ASSERT_DISABLED
+ m_jit.breakpoint();
+#endif
+ return;
+ }
m_blockHeads[m_block] = m_jit.label();
#if DFG_ENABLE(JIT_BREAK_ON_EVERY_BLOCK)
m_jit.breakpoint();
#endif
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("Setting up state for block #%u: ", m_block);
+#endif
+
+ m_stream->appendAndLog(VariableEvent::reset());
+
m_jit.jitAssertHasValidCallFrame();
ASSERT(m_arguments.size() == block.variablesAtHead.numberOfArguments());
for (size_t i = 0; i < m_arguments.size(); ++i) {
- NodeIndex nodeIndex = block.variablesAtHead.argument(i);
- if (nodeIndex == NoNode || m_jit.graph().argumentIsCaptured(i))
- m_arguments[i] = ValueSource(ValueInRegisterFile);
- else
- m_arguments[i] = ValueSource::forPrediction(at(nodeIndex).variableAccessData()->prediction());
+ ValueSource valueSource = ValueSource(ValueInJSStack);
+ m_arguments[i] = valueSource;
+ m_stream->appendAndLog(VariableEvent::setLocal(argumentToOperand(i), valueSource.dataFormat()));
}
m_state.reset();
ASSERT(m_variables.size() == block.variablesAtHead.numberOfLocals());
for (size_t i = 0; i < m_variables.size(); ++i) {
- NodeIndex nodeIndex = block.variablesAtHead.local(i);
- if ((nodeIndex == NoNode || !at(nodeIndex).refCount()) && !m_jit.graph().localIsCaptured(i))
- m_variables[i] = ValueSource(SourceIsDead);
- else if (m_jit.graph().localIsCaptured(i))
- m_variables[i] = ValueSource(ValueInRegisterFile);
- else if (at(nodeIndex).variableAccessData()->shouldUseDoubleFormat())
- m_variables[i] = ValueSource(DoubleInRegisterFile);
+ Node* node = block.variablesAtHead.local(i);
+ ValueSource valueSource;
+ if (!node)
+ valueSource = ValueSource(SourceIsDead);
+ else if (node->variableAccessData()->isArgumentsAlias())
+ valueSource = ValueSource(ArgumentsSource);
+ else if (!node->refCount())
+ valueSource = ValueSource(SourceIsDead);
+ else if (!node->variableAccessData()->shouldUnboxIfPossible())
+ valueSource = ValueSource(ValueInJSStack);
+ else if (node->variableAccessData()->shouldUseDoubleFormat())
+ valueSource = ValueSource(DoubleInJSStack);
else
- m_variables[i] = ValueSource::forPrediction(at(nodeIndex).variableAccessData()->prediction());
+ valueSource = ValueSource::forSpeculation(node->variableAccessData()->argumentAwarePrediction());
+ m_variables[i] = valueSource;
+ // FIXME: Don't emit SetLocal(Dead). https://bugs.webkit.org/show_bug.cgi?id=108019
+ m_stream->appendAndLog(VariableEvent::setLocal(i, valueSource.dataFormat()));
}
m_lastSetOperand = std::numeric_limits<int>::max();
verificationSucceeded.link(&m_jit);
}
- for (m_indexInBlock = 0; m_indexInBlock < block.size(); ++m_indexInBlock) {
- m_compileIndex = block[m_indexInBlock];
- Node& node = at(m_compileIndex);
- m_codeOriginForOSR = node.codeOrigin;
- if (!node.shouldGenerate()) {
#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset());
+ dataLogF("\n");
#endif
- switch (node.op()) {
- case SetLocal:
- compileMovHint(node);
- break;
- case InlineStart: {
- InlineCallFrame* inlineCallFrame = node.codeOrigin.inlineCallFrame;
- int argumentCountIncludingThis = inlineCallFrame->arguments.size();
- for (int i = 0; i < argumentCountIncludingThis; ++i) {
- ValueRecovery recovery = computeValueRecoveryFor(m_variables[inlineCallFrame->stackOffset + CallFrame::argumentOffsetIncludingThis(i)]);
- // The recovery should refer either to something that has already been
- // stored into the register file at the right place, or to a constant,
- // since the Arguments code isn't smart enough to handle anything else.
- // The exception is the this argument, which we don't really need to be
- // able to recover.
+ for (m_indexInBlock = 0; m_indexInBlock < block.size(); ++m_indexInBlock) {
+ m_currentNode = block[m_indexInBlock];
+#if !ASSERT_DISABLED
+ m_canExit = m_currentNode->canExit();
+#endif
+ bool shouldExecuteEffects = m_state.startExecuting(m_currentNode);
+ m_jit.setForNode(m_currentNode);
+ m_codeOriginForOSR = m_currentNode->codeOrigin;
+ if (!m_currentNode->shouldGenerate()) {
#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("\nRecovery for argument %d: ", i);
- recovery.dump(WTF::dataFile());
+ dataLogF("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", m_currentNode->index(), m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
#endif
- ASSERT(!i || (recovery.isAlreadyInRegisterFile() || recovery.isConstant()));
- inlineCallFrame->arguments[i] = recovery;
- }
+ switch (m_currentNode->op()) {
+ case JSConstant:
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
break;
- }
+ case WeakJSConstant:
+ m_jit.addWeakReference(m_currentNode->weakConstant());
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
+ break;
+
+ case SetLocal:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+
+ case MovHint:
+ compileMovHint(m_currentNode);
+ break;
+
+ case ZombieHint: {
+ m_lastSetOperand = m_currentNode->local();
+ m_stream->appendAndLog(VariableEvent::setLocal(m_currentNode->local(), DataFormatDead));
+ break;
+ }
+
default:
+ if (belongsInMinifiedGraph(m_currentNode->op()))
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
break;
}
} else {
+ if (verboseCompilationEnabled()) {
+ dataLogF(
+ "SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x",
+ (int)m_currentNode->index(),
+ m_currentNode->codeOrigin.bytecodeIndex, m_jit.debugOffset());
#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("SpeculativeJIT generating Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset());
+ dataLog(" ");
+#else
+ dataLog("\n");
#endif
+ }
#if DFG_ENABLE(JIT_BREAK_ON_EVERY_NODE)
m_jit.breakpoint();
#endif
#if DFG_ENABLE(XOR_DEBUG_AID)
- m_jit.xorPtr(JITCompiler::TrustedImm32(m_compileIndex), GPRInfo::regT0);
- m_jit.xorPtr(JITCompiler::TrustedImm32(m_compileIndex), GPRInfo::regT0);
+ m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
+ m_jit.xorPtr(JITCompiler::TrustedImm32(m_currentNode->index()), GPRInfo::regT0);
#endif
checkConsistency();
- compile(node);
+
+ m_speculationDirection = (m_currentNode->flags() & NodeExitsForward) ? ForwardSpeculation : BackwardSpeculation;
+
+ compile(m_currentNode);
if (!m_compileOkay) {
m_compileOkay = true;
clearGenerationInfo();
return;
}
+ if (belongsInMinifiedGraph(m_currentNode->op())) {
+ m_minifiedGraph->append(MinifiedNode::fromNode(m_currentNode));
+ noticeOSRBirth(m_currentNode);
+ }
+
#if DFG_ENABLE(DEBUG_VERBOSE)
- if (node.hasResult()) {
- GenerationInfo& info = m_generationInfo[node.virtualRegister()];
- dataLog("-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)node.virtualRegister());
+ if (m_currentNode->hasResult()) {
+ GenerationInfo& info = m_generationInfo[m_currentNode->virtualRegister()];
+ dataLogF("-> %s, vr#%d", dataFormatToString(info.registerFormat()), (int)m_currentNode->virtualRegister());
if (info.registerFormat() != DataFormatNone) {
if (info.registerFormat() == DataFormatDouble)
- dataLog(", %s", FPRInfo::debugName(info.fpr()));
+ dataLogF(", %s", FPRInfo::debugName(info.fpr()));
#if USE(JSVALUE32_64)
else if (info.registerFormat() & DataFormatJS)
- dataLog(", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
+ dataLogF(", %s %s", GPRInfo::debugName(info.tagGPR()), GPRInfo::debugName(info.payloadGPR()));
#endif
else
- dataLog(", %s", GPRInfo::debugName(info.gpr()));
+ dataLogF(", %s", GPRInfo::debugName(info.gpr()));
}
- dataLog(" ");
+ dataLogF(" ");
} else
- dataLog(" ");
+ dataLogF(" ");
#endif
}
-#if DFG_ENABLE(VERBOSE_VALUE_RECOVERIES)
- for (size_t i = 0; i < m_arguments.size(); ++i)
- computeValueRecoveryFor(argumentToOperand(i)).dump(stderr);
-
- dataLog(" : ");
-
- for (int operand = 0; operand < (int)m_variables.size(); ++operand)
- computeValueRecoveryFor(operand).dump(stderr);
-#endif
-
#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("\n");
+ dataLogF("\n");
#endif
// Make sure that the abstract state is rematerialized for the next node.
- m_state.execute(m_indexInBlock);
+ if (shouldExecuteEffects)
+ m_state.executeEffects(m_indexInBlock);
- if (node.shouldGenerate())
+ if (m_currentNode->shouldGenerate())
checkConsistency();
}
// we need to check that they are correct on function entry.
void SpeculativeJIT::checkArgumentTypes()
{
- ASSERT(!m_compileIndex);
+ ASSERT(!m_currentNode);
+ m_isCheckingArgumentTypes = true;
+ m_speculationDirection = BackwardSpeculation;
m_codeOriginForOSR = CodeOrigin(0);
for (size_t i = 0; i < m_arguments.size(); ++i)
- m_arguments[i] = ValueSource(ValueInRegisterFile);
+ m_arguments[i] = ValueSource(ValueInJSStack);
for (size_t i = 0; i < m_variables.size(); ++i)
- m_variables[i] = ValueSource(ValueInRegisterFile);
+ m_variables[i] = ValueSource(ValueInJSStack);
for (int i = 0; i < m_jit.codeBlock()->numParameters(); ++i) {
- NodeIndex nodeIndex = m_jit.graph().m_arguments[i];
- Node& node = at(nodeIndex);
- ASSERT(node.op() == SetArgument);
- if (!node.shouldGenerate()) {
+ Node* node = m_jit.graph().m_arguments[i];
+ ASSERT(node->op() == SetArgument);
+ if (!node->shouldGenerate()) {
// The argument is dead. We don't do any checks for such arguments.
continue;
}
- VariableAccessData* variableAccessData = node.variableAccessData();
+ VariableAccessData* variableAccessData = node->variableAccessData();
+ if (!variableAccessData->isProfitableToUnbox())
+ continue;
+
VirtualRegister virtualRegister = variableAccessData->local();
- PredictedType predictedType = variableAccessData->prediction();
+ SpeculatedType predictedType = variableAccessData->prediction();
JSValueSource valueSource = JSValueSource(JITCompiler::addressFor(virtualRegister));
#if USE(JSVALUE64)
- if (isInt32Prediction(predictedType))
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
- else if (isArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
- } else if (isBooleanPrediction(predictedType)) {
+ if (isInt32Speculation(predictedType))
+ speculationCheck(BadType, valueSource, node, m_jit.branch64(MacroAssembler::Below, JITCompiler::addressFor(virtualRegister), GPRInfo::tagTypeNumberRegister));
+ else if (isBooleanSpeculation(predictedType)) {
GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
- } else if (isInt8ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int8ArrayDescriptor().m_classInfo)));
- } else if (isInt16ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int16ArrayDescriptor().m_classInfo)));
- } else if (isInt32ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int32ArrayDescriptor().m_classInfo)));
- } else if (isUint8ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ArrayDescriptor().m_classInfo)));
- } else if (isUint8ClampedArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ClampedArrayDescriptor().m_classInfo)));
- } else if (isUint16ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint16ArrayDescriptor().m_classInfo)));
- } else if (isUint32ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint32ArrayDescriptor().m_classInfo)));
- } else if (isFloat32ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float32ArrayDescriptor().m_classInfo)));
- } else if (isFloat64ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, temp.gpr(), GPRInfo::tagMaskRegister));
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float64ArrayDescriptor().m_classInfo)));
- }
+ m_jit.load64(JITCompiler::addressFor(virtualRegister), temp.gpr());
+ m_jit.xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), temp.gpr());
+ speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, temp.gpr(), TrustedImm32(static_cast<int32_t>(~1))));
+ } else if (isCellSpeculation(predictedType))
+ speculationCheck(BadType, valueSource, node, m_jit.branchTest64(MacroAssembler::NonZero, JITCompiler::addressFor(virtualRegister), GPRInfo::tagMaskRegister));
#else
- if (isInt32Prediction(predictedType))
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
- else if (isArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
- } else if (isBooleanPrediction(predictedType))
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
- else if (isInt8ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int8ArrayDescriptor().m_classInfo)));
- } else if (isInt16ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int16ArrayDescriptor().m_classInfo)));
- } else if (isInt32ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->int32ArrayDescriptor().m_classInfo)));
- } else if (isUint8ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ArrayDescriptor().m_classInfo)));
- } else if (isUint8ClampedArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint8ClampedArrayDescriptor().m_classInfo)));
- } else if (isUint16ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint16ArrayDescriptor().m_classInfo)));
- } else if (isUint32ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->uint32ArrayDescriptor().m_classInfo)));
- } else if (isFloat32ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float32ArrayDescriptor().m_classInfo)));
- } else if (isFloat64ArrayPrediction(predictedType)) {
- GPRTemporary temp(this);
- m_jit.load32(JITCompiler::tagFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, temp.gpr(), TrustedImm32(JSValue::CellTag)));
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), temp.gpr());
- speculationCheck(BadType, valueSource, nodeIndex, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(temp.gpr(), JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(m_jit.globalData()->float64ArrayDescriptor().m_classInfo)));
- }
+ if (isInt32Speculation(predictedType))
+ speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
+ else if (isBooleanSpeculation(predictedType))
+ speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
+ else if (isCellSpeculation(predictedType))
+ speculationCheck(BadType, valueSource, node, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
#endif
}
+ m_isCheckingArgumentTypes = false;
}
bool SpeculativeJIT::compile()
if (DFG_ENABLE_EDGE_CODE_VERIFICATION)
m_jit.move(TrustedImm32(0), GPRInfo::regT0);
- ASSERT(!m_compileIndex);
- for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block)
- compile(*m_jit.graph().m_blocks[m_block]);
+ ASSERT(!m_currentNode);
+ for (m_block = 0; m_block < m_jit.graph().m_blocks.size(); ++m_block) {
+ m_jit.setForBlock(m_block);
+ BasicBlock* block = m_jit.graph().m_blocks[m_block].get();
+ if (block)
+ compile(*block);
+ }
linkBranches();
return true;
}
void SpeculativeJIT::createOSREntries()
{
for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
- BasicBlock& block = *m_jit.graph().m_blocks[blockIndex];
- if (!block.isOSRTarget)
+ BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ if (!block->isOSRTarget)
continue;
// Currently we only need to create OSR entry trampolines when using edge code
{
unsigned osrEntryIndex = 0;
for (BlockIndex blockIndex = 0; blockIndex < m_jit.graph().m_blocks.size(); ++blockIndex) {
- BasicBlock& block = *m_jit.graph().m_blocks[blockIndex];
- if (block.isOSRTarget)
- m_jit.noticeOSREntry(block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
+ BasicBlock* block = m_jit.graph().m_blocks[blockIndex].get();
+ if (!block)
+ continue;
+ if (!block->isOSRTarget)
+ continue;
+ m_jit.noticeOSREntry(*block, m_osrEntryHeads[osrEntryIndex++], linkBuffer);
+ }
+ ASSERT(osrEntryIndex == m_osrEntryHeads.size());
+}
+
+ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
+{
+ if (valueSource.isInJSStack())
+ return valueSource.valueRecovery();
+
+ ASSERT(valueSource.kind() == HaveNode);
+ Node* node = valueSource.id().node(m_jit.graph());
+ if (isConstant(node))
+ return ValueRecovery::constant(valueOfJSConstant(node));
+
+ return ValueRecovery();
+}
+
+void SpeculativeJIT::compileDoublePutByVal(Node* node, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property)
+{
+ Edge child3 = m_jit.graph().varArgChild(node, 2);
+ Edge child4 = m_jit.graph().varArgChild(node, 3);
+
+ ArrayMode arrayMode = node->arrayMode();
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+
+ SpeculateDoubleOperand value(this, child3);
+
+ FPRReg valueReg = value.fpr();
+
+ DFG_TYPE_CHECK(
+ JSValueRegs(), child3, SpecRealNumber,
+ m_jit.branchDouble(
+ MacroAssembler::DoubleNotEqualOrUnordered, valueReg, valueReg));
+
+ if (!m_compileOkay)
+ return;
+
+ StorageOperand storage(this, child4);
+ GPRReg storageReg = storage.gpr();
+
+ if (node->op() == PutByValAlias) {
+ // Store the value to the array.
+ GPRReg propertyReg = property.gpr();
+ FPRReg valueReg = value.fpr();
+ m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
+
+ noResult(m_currentNode);
+ return;
}
- ASSERT(osrEntryIndex == m_osrEntryHeads.size());
-}
+
+ GPRTemporary temporary;
+ GPRReg temporaryReg = temporaryRegisterForPutByVal(temporary, node);
-ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource)
-{
- switch (valueSource.kind()) {
- case SourceIsDead:
- return ValueRecovery::constant(jsUndefined());
-
- case ValueInRegisterFile:
- return ValueRecovery::alreadyInRegisterFile();
-
- case Int32InRegisterFile:
- return ValueRecovery::alreadyInRegisterFileAsUnboxedInt32();
-
- case CellInRegisterFile:
- return ValueRecovery::alreadyInRegisterFileAsUnboxedCell();
-
- case BooleanInRegisterFile:
- return ValueRecovery::alreadyInRegisterFileAsUnboxedBoolean();
-
- case DoubleInRegisterFile:
- return ValueRecovery::alreadyInRegisterFileAsUnboxedDouble();
-
- case HaveNode: {
- if (isConstant(valueSource.nodeIndex()))
- return ValueRecovery::constant(valueOfJSConstant(valueSource.nodeIndex()));
-
- Node* nodePtr = &at(valueSource.nodeIndex());
- if (!nodePtr->shouldGenerate()) {
- // It's legitimately dead. As in, nobody will ever use this node, or operand,
- // ever. Set it to Undefined to make the GC happy after the OSR.
- return ValueRecovery::constant(jsUndefined());
- }
-
- GenerationInfo* infoPtr = &m_generationInfo[nodePtr->virtualRegister()];
- if (!infoPtr->alive() || infoPtr->nodeIndex() != valueSource.nodeIndex()) {
- // Try to see if there is an alternate node that would contain the value we want.
- // There are four possibilities:
- //
- // Int32ToDouble: We can use this in place of the original node, but
- // we'd rather not; so we use it only if it is the only remaining
- // live version.
- //
- // ValueToInt32: If the only remaining live version of the value is
- // ValueToInt32, then we can use it.
- //
- // UInt32ToNumber: If the only live version of the value is a UInt32ToNumber
- // then the only remaining uses are ones that want a properly formed number
- // rather than a UInt32 intermediate.
- //
- // The reverse of the above: This node could be a UInt32ToNumber, but its
- // alternative is still alive. This means that the only remaining uses of
- // the number would be fine with a UInt32 intermediate.
- //
- // DoubleAsInt32: Same as UInt32ToNumber.
- //
-
- bool found = false;
-
- if (nodePtr->op() == UInt32ToNumber || nodePtr->op() == DoubleAsInt32) {
- NodeIndex nodeIndex = nodePtr->child1().index();
- nodePtr = &at(nodeIndex);
- infoPtr = &m_generationInfo[nodePtr->virtualRegister()];
- if (infoPtr->alive() && infoPtr->nodeIndex() == nodeIndex)
- found = true;
- }
+ MacroAssembler::Jump slowCase;
+
+ if (arrayMode.isInBounds()) {
+ speculationCheck(
+ StoreToHoleOrOutOfBounds, JSValueRegs(), 0,
+ m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
+ } else {
+ MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
- if (!found) {
- NodeIndex int32ToDoubleIndex = NoNode;
- NodeIndex valueToInt32Index = NoNode;
- NodeIndex uint32ToNumberIndex = NoNode;
- NodeIndex doubleAsInt32Index = NoNode;
-
- for (unsigned virtualRegister = 0; virtualRegister < m_generationInfo.size(); ++virtualRegister) {
- GenerationInfo& info = m_generationInfo[virtualRegister];
- if (!info.alive())
- continue;
- if (info.nodeIndex() == NoNode)
- continue;
- Node& node = at(info.nodeIndex());
- if (node.child1Unchecked() != valueSource.nodeIndex())
- continue;
- switch (node.op()) {
- case Int32ToDouble:
- int32ToDoubleIndex = info.nodeIndex();
- break;
- case ValueToInt32:
- valueToInt32Index = info.nodeIndex();
- break;
- case UInt32ToNumber:
- uint32ToNumberIndex = info.nodeIndex();
- break;
- case DoubleAsInt32:
- doubleAsInt32Index = info.nodeIndex();
- default:
- break;
- }
- }
-
- NodeIndex nodeIndexToUse;
- if (doubleAsInt32Index != NoNode)
- nodeIndexToUse = doubleAsInt32Index;
- else if (int32ToDoubleIndex != NoNode)
- nodeIndexToUse = int32ToDoubleIndex;
- else if (valueToInt32Index != NoNode)
- nodeIndexToUse = valueToInt32Index;
- else if (uint32ToNumberIndex != NoNode)
- nodeIndexToUse = uint32ToNumberIndex;
- else
- nodeIndexToUse = NoNode;
-
- if (nodeIndexToUse != NoNode) {
- nodePtr = &at(nodeIndexToUse);
- infoPtr = &m_generationInfo[nodePtr->virtualRegister()];
- ASSERT(infoPtr->alive() && infoPtr->nodeIndex() == nodeIndexToUse);
- found = true;
- }
- }
+ slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
- if (!found)
- return ValueRecovery::constant(jsUndefined());
- }
+ if (!arrayMode.isOutOfBounds())
+ speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
+
+ m_jit.add32(TrustedImm32(1), propertyReg, temporaryReg);
+ m_jit.store32(temporaryReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
+
+ inBounds.link(&m_jit);
+ }
- ASSERT(infoPtr->alive());
+ m_jit.storeDouble(valueReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight));
- if (infoPtr->registerFormat() != DataFormatNone) {
- if (infoPtr->registerFormat() == DataFormatDouble)
- return ValueRecovery::inFPR(infoPtr->fpr());
-#if USE(JSVALUE32_64)
- if (infoPtr->registerFormat() & DataFormatJS)
- return ValueRecovery::inPair(infoPtr->tagGPR(), infoPtr->payloadGPR());
-#endif
- return ValueRecovery::inGPR(infoPtr->gpr(), infoPtr->registerFormat());
- }
- if (infoPtr->spillFormat() != DataFormatNone)
- return ValueRecovery::displacedInRegisterFile(static_cast<VirtualRegister>(nodePtr->virtualRegister()), infoPtr->spillFormat());
+ base.use();
+ property.use();
+ value.use();
+ storage.use();
- ASSERT_NOT_REACHED();
- return ValueRecovery();
- }
-
- default:
- ASSERT_NOT_REACHED();
- return ValueRecovery();
+ if (arrayMode.isOutOfBounds()) {
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCase, this,
+ m_jit.codeBlock()->isStrictMode() ? operationPutDoubleByValBeyondArrayBoundsStrict : operationPutDoubleByValBeyondArrayBoundsNonStrict,
+ NoResult, baseReg, propertyReg, valueReg));
}
+
+ noResult(m_currentNode, UseChildrenCalledExplicitly);
}
-void SpeculativeJIT::compileGetCharCodeAt(Node& node)
+void SpeculativeJIT::compileGetCharCodeAt(Node* node)
{
- ASSERT(node.child3() == NoNode);
- SpeculateCellOperand string(this, node.child1());
- SpeculateStrictInt32Operand index(this, node.child2());
- StorageOperand storage(this, node.child3());
+ SpeculateCellOperand string(this, node->child1());
+ SpeculateStrictInt32Operand index(this, node->child2());
+ StorageOperand storage(this, node->child3());
GPRReg stringReg = string.gpr();
GPRReg indexReg = index.gpr();
GPRReg storageReg = storage.gpr();
- if (!isStringPrediction(m_state.forNode(node.child1()).m_type)) {
- ASSERT(!(at(node.child1()).prediction() & PredictString));
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
- noResult(m_compileIndex);
- return;
- }
+ ASSERT(speculationChecked(m_state.forNode(node->child1()).m_type, SpecString));
// unsigned comparison so we can filter out negative indices and indices that are too large
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, indexReg, MacroAssembler::Address(stringReg, JSString::offsetOfLength())));
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
cont8Bit.link(&m_jit);
- integerResult(scratchReg, m_compileIndex);
+ integerResult(scratchReg, m_currentNode);
}
-void SpeculativeJIT::compileGetByValOnString(Node& node)
+void SpeculativeJIT::compileGetByValOnString(Node* node)
{
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
- StorageOperand storage(this, node.child3());
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateStrictInt32Operand property(this, node->child2());
+ StorageOperand storage(this, node->child3());
GPRReg baseReg = base.gpr();
GPRReg propertyReg = property.gpr();
GPRReg storageReg = storage.gpr();
- if (!isStringPrediction(m_state.forNode(node.child1()).m_type)) {
- ASSERT(!(at(node.child1()).prediction() & PredictString));
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
- noResult(m_compileIndex);
- return;
- }
+ ASSERT(ArrayMode(Array::String).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
// unsigned comparison so we can filter out negative indices and indices that are too large
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSString::offsetOfLength())));
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSString::offsetOfLength())));
GPRTemporary scratch(this);
GPRReg scratchReg = scratch.gpr();
m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo, 0), scratchReg);
// We only support ascii characters
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100)));
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, scratchReg, TrustedImm32(0x100)));
// 8 bit string values don't need the isASCII check.
cont8Bit.link(&m_jit);
GPRTemporary smallStrings(this);
GPRReg smallStringsReg = smallStrings.gpr();
- m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalData()->smallStrings.singleCharacterStrings()), smallStringsReg);
+ m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, scratchReg, MacroAssembler::ScalePtr, 0), scratchReg);
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
- cellResult(scratchReg, m_compileIndex);
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
+ cellResult(scratchReg, m_currentNode);
}
-GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(NodeIndex nodeIndex)
+void SpeculativeJIT::compileFromCharCode(Node* node)
{
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("checkGeneratedTypeForToInt32@%d ", nodeIndex);
-#endif
- Node& node = at(nodeIndex);
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
-
- if (info.registerFormat() == DataFormatNone) {
- if (node.hasConstant()) {
- if (isInt32Constant(nodeIndex))
- return GeneratedOperandInteger;
+ SpeculateStrictInt32Operand property(this, node->child1());
+ GPRReg propertyReg = property.gpr();
+ GPRTemporary smallStrings(this);
+ GPRTemporary scratch(this);
+ GPRReg scratchReg = scratch.gpr();
+ GPRReg smallStringsReg = smallStrings.gpr();
- if (isNumberConstant(nodeIndex))
- return GeneratedOperandDouble;
+ JITCompiler::JumpList slowCases;
+ slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, TrustedImm32(0xff)));
+ m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.vm()->smallStrings.singleCharacterStrings()), smallStringsReg);
+ m_jit.loadPtr(MacroAssembler::BaseIndex(smallStringsReg, propertyReg, MacroAssembler::ScalePtr, 0), scratchReg);
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
- return GeneratedOperandTypeUnknown;
- }
+ slowCases.append(m_jit.branchTest32(MacroAssembler::Zero, scratchReg));
+ addSlowPathGenerator(slowPathCall(slowCases, this, operationStringFromCharCode, scratchReg, propertyReg));
+ cellResult(scratchReg, m_currentNode);
+}
- if (info.spillFormat() == DataFormatDouble)
- return GeneratedOperandDouble;
- }
+GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(Node* node)
+{
+#if DFG_ENABLE(DEBUG_VERBOSE)
+ dataLogF("checkGeneratedTypeForToInt32@%d ", node->index());
+#endif
+ VirtualRegister virtualRegister = node->virtualRegister();
+ GenerationInfo& info = m_generationInfo[virtualRegister];
switch (info.registerFormat()) {
- case DataFormatBoolean: // This type never occurs.
case DataFormatStorage:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
+ case DataFormatBoolean:
case DataFormatCell:
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
return GeneratedOperandTypeUnknown;
case DataFormatNone:
case DataFormatJSDouble:
case DataFormatDouble:
return GeneratedOperandDouble;
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return GeneratedOperandTypeUnknown;
}
-
- ASSERT_NOT_REACHED();
- return GeneratedOperandTypeUnknown;
}
-void SpeculativeJIT::compileValueToInt32(Node& node)
+void SpeculativeJIT::compileValueToInt32(Node* node)
{
- if (at(node.child1()).shouldSpeculateInteger()) {
- SpeculateIntegerOperand op1(this, node.child1());
+ switch (node->child1().useKind()) {
+ case Int32Use: {
+ SpeculateIntegerOperand op1(this, node->child1());
GPRTemporary result(this, op1);
m_jit.move(op1.gpr(), result.gpr());
- integerResult(result.gpr(), m_compileIndex, op1.format());
+ integerResult(result.gpr(), node, op1.format());
return;
}
- if (at(node.child1()).shouldSpeculateNumber()) {
- switch (checkGeneratedTypeForToInt32(node.child1().index())) {
+ case NumberUse:
+ case NotCellUse: {
+ switch (checkGeneratedTypeForToInt32(node->child1().node())) {
case GeneratedOperandInteger: {
- SpeculateIntegerOperand op1(this, node.child1());
+ SpeculateIntegerOperand op1(this, node->child1(), ManualOperandSpeculation);
GPRTemporary result(this, op1);
m_jit.move(op1.gpr(), result.gpr());
- integerResult(result.gpr(), m_compileIndex, op1.format());
+ integerResult(result.gpr(), node, op1.format());
return;
}
case GeneratedOperandDouble: {
GPRTemporary result(this);
- DoubleOperand op1(this, node.child1());
+ SpeculateDoubleOperand op1(this, node->child1(), ManualOperandSpeculation);
FPRReg fpr = op1.fpr();
GPRReg gpr = result.gpr();
- JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful);
-
- silentSpillAllRegisters(gpr);
- callOperation(toInt32, gpr, fpr);
- silentFillAllRegisters(gpr);
+ JITCompiler::Jump notTruncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateFailed);
+
+ addSlowPathGenerator(slowPathCall(notTruncatedToInteger, this, toInt32, gpr, fpr));
- truncatedToInteger.link(&m_jit);
- integerResult(gpr, m_compileIndex);
+ integerResult(gpr, node);
return;
}
case GeneratedOperandJSValue: {
GPRTemporary result(this);
#if USE(JSVALUE64)
- JSValueOperand op1(this, node.child1());
+ JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
GPRReg gpr = op1.gpr();
GPRReg resultGpr = result.gpr();
FPRTemporary tempFpr(this);
FPRReg fpr = tempFpr.fpr();
- JITCompiler::Jump isInteger = m_jit.branchPtr(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::Jump isInteger = m_jit.branch64(MacroAssembler::AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ JITCompiler::JumpList converted;
- speculationCheck(BadType, JSValueRegs(gpr), node.child1().index(), m_jit.branchTestPtr(MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
+ if (node->child1().useKind() == NumberUse) {
+ DFG_TYPE_CHECK(
+ JSValueRegs(gpr), node->child1(), SpecNumber,
+ m_jit.branchTest64(
+ MacroAssembler::Zero, gpr, GPRInfo::tagTypeNumberRegister));
+ } else {
+ JITCompiler::Jump isNumber = m_jit.branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
+
+ DFG_TYPE_CHECK(
+ JSValueRegs(gpr), node->child1(), ~SpecCell,
+ m_jit.branchTest64(
+ JITCompiler::Zero, gpr, GPRInfo::tagMaskRegister));
+
+ // It's not a cell: so true turns into 1 and all else turns into 0.
+ m_jit.compare64(JITCompiler::Equal, gpr, TrustedImm32(ValueTrue), resultGpr);
+ converted.append(m_jit.jump());
+
+ isNumber.link(&m_jit);
+ }
// First, if we get here we have a double encoded as a JSValue
m_jit.move(gpr, resultGpr);
callOperation(toInt32, resultGpr, fpr);
silentFillAllRegisters(resultGpr);
- JITCompiler::Jump converted = m_jit.jump();
+ converted.append(m_jit.jump());
isInteger.link(&m_jit);
m_jit.zeroExtend32ToPtr(gpr, resultGpr);
converted.link(&m_jit);
#else
- Node& childNode = at(node.child1().index());
- VirtualRegister virtualRegister = childNode.virtualRegister();
+ Node* childNode = node->child1().node();
+ VirtualRegister virtualRegister = childNode->virtualRegister();
GenerationInfo& info = m_generationInfo[virtualRegister];
- JSValueOperand op1(this, node.child1());
+ JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
GPRReg payloadGPR = op1.payloadGPR();
GPRReg resultGpr = result.gpr();
+
+ JITCompiler::JumpList converted;
if (info.registerFormat() == DataFormatJSInteger)
m_jit.move(payloadGPR, resultGpr);
JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
- speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), node.child1().index(), m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
+ if (node->child1().useKind() == NumberUse) {
+ DFG_TYPE_CHECK(
+ JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecNumber,
+ m_jit.branch32(
+ MacroAssembler::AboveOrEqual, tagGPR,
+ TrustedImm32(JSValue::LowestTag)));
+ } else {
+ JITCompiler::Jump isNumber = m_jit.branch32(MacroAssembler::Below, tagGPR, TrustedImm32(JSValue::LowestTag));
+
+ DFG_TYPE_CHECK(
+ JSValueRegs(tagGPR, payloadGPR), node->child1(), ~SpecCell,
+ m_jit.branch32(
+ JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::CellTag)));
+
+ // It's not a cell: so true turns into 1 and all else turns into 0.
+ JITCompiler::Jump isBoolean = m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::BooleanTag));
+ m_jit.move(TrustedImm32(0), resultGpr);
+ converted.append(m_jit.jump());
+
+ isBoolean.link(&m_jit);
+ m_jit.move(payloadGPR, resultGpr);
+ converted.append(m_jit.jump());
+
+ isNumber.link(&m_jit);
+ }
unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
callOperation(toInt32, resultGpr, fpr);
silentFillAllRegisters(resultGpr);
- JITCompiler::Jump converted = m_jit.jump();
+ converted.append(m_jit.jump());
isInteger.link(&m_jit);
m_jit.move(payloadGPR, resultGpr);
converted.link(&m_jit);
}
#endif
- integerResult(resultGpr, m_compileIndex);
+ integerResult(resultGpr, node);
return;
}
case GeneratedOperandTypeUnknown:
- ASSERT_NOT_REACHED();
- break;
+ RELEASE_ASSERT(!m_compileOkay);
+ return;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+ }
+
+ case BooleanUse: {
+ SpeculateBooleanOperand op1(this, node->child1());
+ GPRTemporary result(this, op1);
+
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.and32(JITCompiler::TrustedImm32(1), result.gpr());
+
+ integerResult(result.gpr(), node);
+ return;
+ }
+
+ default:
+ ASSERT(!m_compileOkay);
+ return;
+ }
+}
+
+void SpeculativeJIT::compileUInt32ToNumber(Node* node)
+{
+ if (!nodeCanSpeculateInteger(node->arithNodeFlags())) {
+ // We know that this sometimes produces doubles. So produce a double every
+ // time. This at least allows subsequent code to not have weird conditionals.
+
+ IntegerOperand op1(this, node->child1());
+ FPRTemporary result(this);
+
+ GPRReg inputGPR = op1.gpr();
+ FPRReg outputFPR = result.fpr();
+
+ m_jit.convertInt32ToDouble(inputGPR, outputFPR);
+
+ JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
+ m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
+ positive.link(&m_jit);
+
+ doubleResult(outputFPR, node);
+ return;
+ }
+
+ IntegerOperand op1(this, node->child1());
+ GPRTemporary result(this); // For the benefit of OSR exit, force these to be in different registers. In reality the OSR exit compiler could find cases where you have uint32(%r1) followed by int32(%r1) and then use different registers, but that seems like too much effort.
+
+ m_jit.move(op1.gpr(), result.gpr());
+
+ // Test the operand is positive. This is a very special speculation check - we actually
+ // use roll-forward speculation here, where if this fails, we jump to the baseline
+ // instruction that follows us, rather than the one we're executing right now. We have
+ // to do this because by this point, the original values necessary to compile whatever
+ // operation the UInt32ToNumber originated from might be dead.
+ forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, result.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(result.gpr()));
+
+ integerResult(result.gpr(), node, op1.format());
+}
+
+void SpeculativeJIT::compileDoubleAsInt32(Node* node)
+{
+ SpeculateDoubleOperand op1(this, node->child1());
+ FPRTemporary scratch(this);
+ GPRTemporary result(this);
+
+ FPRReg valueFPR = op1.fpr();
+ FPRReg scratchFPR = scratch.fpr();
+ GPRReg resultGPR = result.gpr();
+
+ JITCompiler::JumpList failureCases;
+ bool negZeroCheck = !nodeCanIgnoreNegativeZero(node->arithNodeFlags());
+ m_jit.branchConvertDoubleToInt32(valueFPR, resultGPR, failureCases, scratchFPR, negZeroCheck);
+ forwardSpeculationCheck(Overflow, JSValueRegs(), 0, failureCases, ValueRecovery::inFPR(valueFPR));
+
+ integerResult(resultGPR, node);
+}
+
+void SpeculativeJIT::compileInt32ToDouble(Node* node)
+{
+ ASSERT(!isInt32Constant(node->child1().node())); // This should have been constant folded.
+
+ if (isInt32Speculation(m_state.forNode(node->child1()).m_type)) {
+ SpeculateIntegerOperand op1(this, node->child1(), ManualOperandSpeculation);
+ FPRTemporary result(this);
+ m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
+ doubleResult(result.fpr(), node);
+ return;
+ }
+
+ JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
+ FPRTemporary result(this);
+
+#if USE(JSVALUE64)
+ GPRTemporary temp(this);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg tempGPR = temp.gpr();
+ FPRReg resultFPR = result.fpr();
+
+ JITCompiler::Jump isInteger = m_jit.branch64(
+ MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
+
+ if (needsTypeCheck(node->child1(), SpecNumber)) {
+ if (node->op() == ForwardInt32ToDouble) {
+ forwardTypeCheck(
+ JSValueRegs(op1GPR), node->child1(), SpecNumber,
+ m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister),
+ ValueRecovery::inGPR(op1GPR, DataFormatJS));
+ } else {
+ backwardTypeCheck(
+ JSValueRegs(op1GPR), node->child1(), SpecNumber,
+ m_jit.branchTest64(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
+ }
+ }
+
+ m_jit.move(op1GPR, tempGPR);
+ unboxDouble(tempGPR, resultFPR);
+ JITCompiler::Jump done = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(op1GPR, resultFPR);
+ done.link(&m_jit);
+#else
+ FPRTemporary temp(this);
+
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ FPRReg tempFPR = temp.fpr();
+ FPRReg resultFPR = result.fpr();
+
+ JITCompiler::Jump isInteger = m_jit.branch32(
+ MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
+
+ if (needsTypeCheck(node->child1(), SpecNumber)) {
+ if (node->op() == ForwardInt32ToDouble) {
+ forwardTypeCheck(
+ JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecNumber,
+ m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)),
+ ValueRecovery::inPair(op1TagGPR, op1PayloadGPR));
+ } else {
+ backwardTypeCheck(
+ JSValueRegs(op1TagGPR, op1PayloadGPR), node->child1(), SpecNumber,
+ m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
}
}
- if (at(node.child1()).shouldSpeculateBoolean()) {
- SpeculateBooleanOperand op1(this, node.child1());
- GPRTemporary result(this, op1);
-
- m_jit.and32(JITCompiler::TrustedImm32(1), op1.gpr());
-
- integerResult(op1.gpr(), m_compileIndex);
+ unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
+ JITCompiler::Jump done = m_jit.jump();
+
+ isInteger.link(&m_jit);
+ m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
+ done.link(&m_jit);
+#endif
+
+ doubleResult(resultFPR, node);
+}
+
+static double clampDoubleToByte(double d)
+{
+ d += 0.5;
+ if (!(d > 0))
+ d = 0;
+ else if (d > 255)
+ d = 255;
+ return d;
+}
+
+static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
+{
+ MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
+ MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
+ jit.xorPtr(result, result);
+ MacroAssembler::Jump clamped = jit.jump();
+ tooBig.link(&jit);
+ jit.move(JITCompiler::TrustedImm32(255), result);
+ clamped.link(&jit);
+ inBounds.link(&jit);
+}
+
+static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
+{
+ // Unordered compare so we pick up NaN
+ static const double zero = 0;
+ static const double byteMax = 255;
+ static const double half = 0.5;
+ jit.loadDouble(&zero, scratch);
+ MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
+ jit.loadDouble(&byteMax, scratch);
+ MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
+
+ jit.loadDouble(&half, scratch);
+ // FIXME: This should probably just use a floating point round!
+ // https://bugs.webkit.org/show_bug.cgi?id=72054
+ jit.addDouble(source, scratch);
+ jit.truncateDoubleToInt32(scratch, result);
+ MacroAssembler::Jump truncatedInt = jit.jump();
+
+ tooSmall.link(&jit);
+ jit.xorPtr(result, result);
+ MacroAssembler::Jump zeroed = jit.jump();
+
+ tooBig.link(&jit);
+ jit.move(JITCompiler::TrustedImm32(255), result);
+
+ truncatedInt.link(&jit);
+ zeroed.link(&jit);
+
+}
+
+void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor& descriptor, Node* node, size_t elementSize, TypedArraySignedness signedness)
+{
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateStrictInt32Operand property(this, node->child2());
+ StorageOperand storage(this, node->child3());
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ GPRTemporary result(this);
+ GPRReg resultReg = result.gpr();
+
+ ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
+
+ speculationCheck(
+ Uncountable, JSValueRegs(), 0,
+ m_jit.branch32(
+ MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset)));
+ switch (elementSize) {
+ case 1:
+ if (signedness == SignedTypedArray)
+ m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
+ else
+ m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
+ break;
+ case 2:
+ if (signedness == SignedTypedArray)
+ m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
+ else
+ m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
+ break;
+ case 4:
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
+ break;
+ default:
+ CRASH();
+ }
+ if (elementSize < 4 || signedness == SignedTypedArray) {
+ integerResult(resultReg, node);
+ return;
+ }
+
+ ASSERT(elementSize == 4 && signedness == UnsignedTypedArray);
+ if (node->shouldSpeculateInteger()) {
+ forwardSpeculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)), ValueRecovery::uint32InGPR(resultReg));
+ integerResult(resultReg, node);
return;
}
- // Do it the safe way.
- nonSpeculativeValueToInt32(node);
- return;
+ FPRTemporary fresult(this);
+ m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
+ JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
+ m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
+ positive.link(&m_jit);
+ doubleResult(fresult.fpr(), node);
}
-void SpeculativeJIT::compileUInt32ToNumber(Node& node)
+void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node* node, size_t elementSize, TypedArraySignedness signedness, TypedArrayRounding rounding)
{
- if (!nodeCanSpeculateInteger(node.arithNodeFlags())) {
- // We know that this sometimes produces doubles. So produce a double every
- // time. This at least allows subsequent code to not have weird conditionals.
-
- IntegerOperand op1(this, node.child1());
- FPRTemporary result(this);
-
- GPRReg inputGPR = op1.gpr();
- FPRReg outputFPR = result.fpr();
-
- m_jit.convertInt32ToDouble(inputGPR, outputFPR);
+ StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
+ GPRReg storageReg = storage.gpr();
+
+ Edge valueUse = m_jit.graph().varArgChild(node, 2);
+
+ GPRTemporary value;
+ GPRReg valueGPR;
+
+ if (valueUse->isConstant()) {
+ JSValue jsValue = valueOfJSConstant(valueUse.node());
+ if (!jsValue.isNumber()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ noResult(node);
+ return;
+ }
+ double d = jsValue.asNumber();
+ if (rounding == ClampRounding) {
+ ASSERT(elementSize == 1);
+ d = clampDoubleToByte(d);
+ }
+ GPRTemporary scratch(this);
+ GPRReg scratchReg = scratch.gpr();
+ m_jit.move(Imm32(toInt32(d)), scratchReg);
+ value.adopt(scratch);
+ valueGPR = scratchReg;
+ } else {
+ switch (valueUse.useKind()) {
+ case Int32Use: {
+ SpeculateIntegerOperand valueOp(this, valueUse);
+ GPRTemporary scratch(this);
+ GPRReg scratchReg = scratch.gpr();
+ m_jit.move(valueOp.gpr(), scratchReg);
+ if (rounding == ClampRounding) {
+ ASSERT(elementSize == 1);
+ compileClampIntegerToByte(m_jit, scratchReg);
+ }
+ value.adopt(scratch);
+ valueGPR = scratchReg;
+ break;
+ }
- JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, inputGPR, TrustedImm32(0));
- m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), outputFPR);
- positive.link(&m_jit);
+ case NumberUse: {
+ if (rounding == ClampRounding) {
+ ASSERT(elementSize == 1);
+ SpeculateDoubleOperand valueOp(this, valueUse);
+ GPRTemporary result(this);
+ FPRTemporary floatScratch(this);
+ FPRReg fpr = valueOp.fpr();
+ GPRReg gpr = result.gpr();
+ compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
+ value.adopt(result);
+ valueGPR = gpr;
+ } else {
+ SpeculateDoubleOperand valueOp(this, valueUse);
+ GPRTemporary result(this);
+ FPRReg fpr = valueOp.fpr();
+ GPRReg gpr = result.gpr();
+ MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
+ m_jit.xorPtr(gpr, gpr);
+ MacroAssembler::Jump fixed = m_jit.jump();
+ notNaN.link(&m_jit);
+
+ MacroAssembler::Jump failed;
+ if (signedness == SignedTypedArray)
+ failed = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
+ else
+ failed = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateFailed);
+
+ addSlowPathGenerator(slowPathCall(failed, this, toInt32, gpr, fpr));
+
+ fixed.link(&m_jit);
+ value.adopt(result);
+ valueGPR = gpr;
+ }
+ break;
+ }
- doubleResult(outputFPR, m_compileIndex);
- return;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ }
+
+ ASSERT_UNUSED(valueGPR, valueGPR != property);
+ ASSERT(valueGPR != base);
+ ASSERT(valueGPR != storageReg);
+ MacroAssembler::Jump outOfBounds;
+ if (node->op() == PutByVal)
+ outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
+
+ switch (elementSize) {
+ case 1:
+ m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
+ break;
+ case 2:
+ m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
+ break;
+ case 4:
+ m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
+ break;
+ default:
+ CRASH();
}
+ if (node->op() == PutByVal)
+ outOfBounds.link(&m_jit);
+ noResult(node);
+}
- IntegerOperand op1(this, node.child1());
- GPRTemporary result(this, op1);
+void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor& descriptor, Node* node, size_t elementSize)
+{
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateStrictInt32Operand property(this, node->child2());
+ StorageOperand storage(this, node->child3());
- // Test the operand is positive. This is a very special speculation check - we actually
- // use roll-forward speculation here, where if this fails, we jump to the baseline
- // instruction that follows us, rather than the one we're executing right now. We have
- // to do this because by this point, the original values necessary to compile whatever
- // operation the UInt32ToNumber originated from might be dead.
- forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op1.gpr(), TrustedImm32(0)), ValueRecovery::uint32InGPR(op1.gpr()));
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
- m_jit.move(op1.gpr(), result.gpr());
- integerResult(result.gpr(), m_compileIndex, op1.format());
+ ASSERT(node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
+
+ FPRTemporary result(this);
+ FPRReg resultReg = result.fpr();
+ speculationCheck(
+ Uncountable, JSValueRegs(), 0,
+ m_jit.branch32(
+ MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset)));
+ switch (elementSize) {
+ case 4:
+ m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
+ m_jit.convertFloatToDouble(resultReg, resultReg);
+ break;
+ case 8: {
+ m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
+ break;
+ }
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
+ static const double NaN = QNaN;
+ m_jit.loadDouble(&NaN, resultReg);
+ notNaN.link(&m_jit);
+
+ doubleResult(resultReg, node);
}
-void SpeculativeJIT::compileDoubleAsInt32(Node& node)
+void SpeculativeJIT::compilePutByValForFloatTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node* node, size_t elementSize)
{
- SpeculateDoubleOperand op1(this, node.child1());
- FPRTemporary scratch(this);
- GPRTemporary result(this);
+ StorageOperand storage(this, m_jit.graph().varArgChild(node, 3));
+ GPRReg storageReg = storage.gpr();
- FPRReg valueFPR = op1.fpr();
- FPRReg scratchFPR = scratch.fpr();
- GPRReg resultGPR = result.gpr();
+ Edge baseUse = m_jit.graph().varArgChild(node, 0);
+ Edge valueUse = m_jit.graph().varArgChild(node, 2);
- JITCompiler::JumpList failureCases;
- m_jit.branchConvertDoubleToInt32(valueFPR, resultGPR, failureCases, scratchFPR);
- forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, failureCases, ValueRecovery::inFPR(valueFPR));
+ SpeculateDoubleOperand valueOp(this, valueUse);
+ FPRTemporary scratch(this);
+ FPRReg valueFPR = valueOp.fpr();
+ FPRReg scratchFPR = scratch.fpr();
- integerResult(resultGPR, m_compileIndex);
+ ASSERT_UNUSED(baseUse, node->arrayMode().alreadyChecked(m_jit.graph(), node, m_state.forNode(baseUse)));
+
+ MacroAssembler::Jump outOfBounds;
+ if (node->op() == PutByVal)
+ outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
+
+ switch (elementSize) {
+ case 4: {
+ m_jit.moveDouble(valueFPR, scratchFPR);
+ m_jit.convertDoubleToFloat(valueFPR, scratchFPR);
+ m_jit.storeFloat(scratchFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
+ break;
+ }
+ case 8:
+ m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ if (node->op() == PutByVal)
+ outOfBounds.link(&m_jit);
+ noResult(node);
}
-void SpeculativeJIT::compileInt32ToDouble(Node& node)
+void SpeculativeJIT::compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
{
+ // Check that prototype is an object.
+ m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
+ speculationCheck(BadType, JSValueRegs(), 0, m_jit.branchIfNotObject(scratchReg));
+
+ // Initialize scratchReg with the value being checked.
+ m_jit.move(valueReg, scratchReg);
+
+ // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
+ MacroAssembler::Label loop(&m_jit);
+ m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
#if USE(JSVALUE64)
- // On JSVALUE64 we have a way of loading double constants in a more direct manner
- // than a int->double conversion. On 32_64, unfortunately, we currently don't have
- // any such mechanism - though we could have it, if we just provisioned some memory
- // in CodeBlock for the double form of integer constants.
- if (at(node.child1()).hasConstant()) {
- ASSERT(isInt32Constant(node.child1().index()));
- FPRTemporary result(this);
- GPRTemporary temp(this);
- m_jit.move(MacroAssembler::ImmPtr(reinterpret_cast<void*>(reinterpretDoubleToIntptr(valueOfNumberConstant(node.child1().index())))), temp.gpr());
- m_jit.movePtrToDouble(temp.gpr(), result.fpr());
- doubleResult(result.fpr(), m_compileIndex);
- return;
- }
+ m_jit.load64(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
+ MacroAssembler::Jump isInstance = m_jit.branch64(MacroAssembler::Equal, scratchReg, prototypeReg);
+ m_jit.branchTest64(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
+#else
+ m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
+ MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
+ m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
#endif
- if (isInt32Prediction(m_state.forNode(node.child1()).m_type)) {
- SpeculateIntegerOperand op1(this, node.child1());
- FPRTemporary result(this);
- m_jit.convertInt32ToDouble(op1.gpr(), result.fpr());
- doubleResult(result.fpr(), m_compileIndex);
+ // No match - result is false.
+#if USE(JSVALUE64)
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
+#else
+ m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
+#endif
+ MacroAssembler::Jump putResult = m_jit.jump();
+
+ isInstance.link(&m_jit);
+#if USE(JSVALUE64)
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(true))), scratchReg);
+#else
+ m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
+#endif
+
+ putResult.link(&m_jit);
+}
+
+void SpeculativeJIT::compileInstanceOf(Node* node)
+{
+ if (node->child1().useKind() == UntypedUse) {
+ // It might not be a cell. Speculate less aggressively.
+ // Or: it might only be used once (i.e. by us), so we get zero benefit
+ // from speculating any more aggressively than we absolutely need to.
+
+ JSValueOperand value(this, node->child1());
+ SpeculateCellOperand prototype(this, node->child2());
+ GPRTemporary scratch(this);
+
+ GPRReg prototypeReg = prototype.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+#if USE(JSVALUE64)
+ GPRReg valueReg = value.gpr();
+ MacroAssembler::Jump isCell = m_jit.branchTest64(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
+ m_jit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(false))), scratchReg);
+#else
+ GPRReg valueTagReg = value.tagGPR();
+ GPRReg valueReg = value.payloadGPR();
+ MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
+ m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
+#endif
+
+ MacroAssembler::Jump done = m_jit.jump();
+
+ isCell.link(&m_jit);
+
+ compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
+
+ done.link(&m_jit);
+
+#if USE(JSVALUE64)
+ jsValueResult(scratchReg, node, DataFormatJSBoolean);
+#else
+ booleanResult(scratchReg, node);
+#endif
return;
}
- JSValueOperand op1(this, node.child1());
- FPRTemporary result(this);
+ SpeculateCellOperand value(this, node->child1());
+ SpeculateCellOperand prototype(this, node->child2());
+
+ GPRTemporary scratch(this);
+ GPRReg valueReg = value.gpr();
+ GPRReg prototypeReg = prototype.gpr();
+ GPRReg scratchReg = scratch.gpr();
+
+ compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
+
#if USE(JSVALUE64)
- GPRTemporary temp(this);
+ jsValueResult(scratchReg, node, DataFormatJSBoolean);
+#else
+ booleanResult(scratchReg, node);
+#endif
+}
+
+void SpeculativeJIT::compileSoftModulo(Node* node)
+{
+ // In the fast path, the dividend value could be the final result
+ // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
+ SpeculateStrictInt32Operand op1(this, node->child1());
+#if CPU(X86) || CPU(X86_64)
+ if (isInt32Constant(node->child2().node())) {
+ int32_t divisor = valueOfInt32Constant(node->child2().node());
+ if (divisor) {
+ GPRReg op1Gpr = op1.gpr();
+
+ GPRTemporary eax(this, X86Registers::eax);
+ GPRTemporary edx(this, X86Registers::edx);
+ GPRTemporary scratch(this);
+ GPRReg scratchGPR = scratch.gpr();
+
+ GPRReg op1SaveGPR;
+ if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
+ op1SaveGPR = allocate();
+ ASSERT(op1Gpr != op1SaveGPR);
+ m_jit.move(op1Gpr, op1SaveGPR);
+ } else
+ op1SaveGPR = op1Gpr;
+ ASSERT(op1SaveGPR != X86Registers::eax);
+ ASSERT(op1SaveGPR != X86Registers::edx);
+
+ m_jit.move(op1Gpr, eax.gpr());
+ m_jit.move(TrustedImm32(divisor), scratchGPR);
+ if (divisor == -1)
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, eax.gpr(), TrustedImm32(-2147483647-1)));
+ m_jit.assembler().cdq();
+ m_jit.assembler().idivl_r(scratchGPR);
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+ // Check that we're not about to create negative zero.
+ JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
+ numeratorPositive.link(&m_jit);
+ }
+ if (op1SaveGPR != op1Gpr)
+ unlock(op1SaveGPR);
+
+ integerResult(edx.gpr(), node);
+ return;
+ }
+ }
+#elif CPU(ARM64)
+ if (isInt32Constant(node->child2().node())) {
+ int32_t divisor = valueOfInt32Constant(node->child2().node());
+ if (divisor > 0 && hasOneBitSet(divisor)) { // If power of 2 then just mask
+ GPRReg dividendGPR = op1.gpr();
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.assembler().cmp<32>(dividendGPR, UInt12(0));
+ m_jit.assembler().cneg<32>(resultGPR, dividendGPR, ARM64Assembler::ConditionLT);
+ m_jit.and32(TrustedImm32(divisor - 1), resultGPR);
+ m_jit.assembler().cneg<32>(resultGPR, resultGPR, ARM64Assembler::ConditionLT);
+
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+ // Check that we're not about to create negative zero.
+ JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
+ numeratorPositive.link(&m_jit);
+ }
+ integerResult(resultGPR, node);
+ return;
+ }
+ }
+#elif CPU(APPLE_ARMV7S) || CPU(ARM_THUMB2)
+ if (isInt32Constant(node->child2().node())) {
+ int32_t divisor = valueOfInt32Constant(node->child2().node());
+ if (divisor > 0 && hasOneBitSet(divisor)) { // If power of 2 then just mask
+ GPRReg dividendGPR = op1.gpr();
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.assembler().cmp(dividendGPR, ARMThumbImmediate::makeEncodedImm(0));
+ m_jit.assembler().it(ARMv7Assembler::ConditionLT, false);
+ m_jit.assembler().neg(resultGPR, dividendGPR);
+ m_jit.assembler().mov(resultGPR, dividendGPR);
+ m_jit.and32(TrustedImm32(divisor - 1), resultGPR);
+ m_jit.assembler().it(ARMv7Assembler::ConditionLT);
+ m_jit.assembler().neg(resultGPR, resultGPR);
+
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+ // Check that we're not about to create negative zero.
+ JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, resultGPR));
+ numeratorPositive.link(&m_jit);
+ }
+ integerResult(resultGPR, node);
+ return;
+ }
+ }
+#endif
+ SpeculateIntegerOperand op2(this, node->child2());
+#if CPU(X86) || CPU(X86_64)
+ GPRTemporary eax(this, X86Registers::eax);
+ GPRTemporary edx(this, X86Registers::edx);
GPRReg op1GPR = op1.gpr();
- GPRReg tempGPR = temp.gpr();
- FPRReg resultFPR = result.fpr();
-
- JITCompiler::Jump isInteger = m_jit.branchPtr(
- MacroAssembler::AboveOrEqual, op1GPR, GPRInfo::tagTypeNumberRegister);
-
- speculationCheck(
- BadType, JSValueRegs(op1GPR), node.child1(),
- m_jit.branchTestPtr(MacroAssembler::Zero, op1GPR, GPRInfo::tagTypeNumberRegister));
+ GPRReg op2GPR = op2.gpr();
- m_jit.move(op1GPR, tempGPR);
- unboxDouble(tempGPR, resultFPR);
- JITCompiler::Jump done = m_jit.jump();
+ GPRReg op2TempGPR;
+ GPRReg temp;
+ GPRReg op1SaveGPR;
- isInteger.link(&m_jit);
- m_jit.convertInt32ToDouble(op1GPR, resultFPR);
- done.link(&m_jit);
-#else
- FPRTemporary temp(this);
+ if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
+ op2TempGPR = allocate();
+ temp = op2TempGPR;
+ } else {
+ op2TempGPR = InvalidGPRReg;
+ if (op1GPR == X86Registers::eax)
+ temp = X86Registers::edx;
+ else
+ temp = X86Registers::eax;
+ }
- GPRReg op1TagGPR = op1.tagGPR();
- GPRReg op1PayloadGPR = op1.payloadGPR();
- FPRReg tempFPR = temp.fpr();
- FPRReg resultFPR = result.fpr();
+ if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
+ op1SaveGPR = allocate();
+ ASSERT(op1GPR != op1SaveGPR);
+ m_jit.move(op1GPR, op1SaveGPR);
+ } else
+ op1SaveGPR = op1GPR;
- JITCompiler::Jump isInteger = m_jit.branch32(
- MacroAssembler::Equal, op1TagGPR, TrustedImm32(JSValue::Int32Tag));
+ ASSERT(temp != op1GPR);
+ ASSERT(temp != op2GPR);
+ ASSERT(op1SaveGPR != X86Registers::eax);
+ ASSERT(op1SaveGPR != X86Registers::edx);
- speculationCheck(
- BadType, JSValueRegs(op1TagGPR, op1PayloadGPR), node.child1(),
- m_jit.branch32(MacroAssembler::AboveOrEqual, op1TagGPR, TrustedImm32(JSValue::LowestTag)));
+ m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
- unboxDouble(op1TagGPR, op1PayloadGPR, resultFPR, tempFPR);
- JITCompiler::Jump done = m_jit.jump();
+ JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
- isInteger.link(&m_jit);
- m_jit.convertInt32ToDouble(op1PayloadGPR, resultFPR);
- done.link(&m_jit);
-#endif
+ JITCompiler::Jump done;
+ // FIXME: if the node is not used as number then we can do this more easily.
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
- doubleResult(resultFPR, m_compileIndex);
-}
-
-static double clampDoubleToByte(double d)
-{
- d += 0.5;
- if (!(d > 0))
- d = 0;
- else if (d > 255)
- d = 255;
- return d;
-}
-
-static void compileClampIntegerToByte(JITCompiler& jit, GPRReg result)
-{
- MacroAssembler::Jump inBounds = jit.branch32(MacroAssembler::BelowOrEqual, result, JITCompiler::TrustedImm32(0xff));
- MacroAssembler::Jump tooBig = jit.branch32(MacroAssembler::GreaterThan, result, JITCompiler::TrustedImm32(0xff));
- jit.xorPtr(result, result);
- MacroAssembler::Jump clamped = jit.jump();
- tooBig.link(&jit);
- jit.move(JITCompiler::TrustedImm32(255), result);
- clamped.link(&jit);
- inBounds.link(&jit);
-}
+ safeDenominator.link(&m_jit);
+
+ if (op2TempGPR != InvalidGPRReg) {
+ m_jit.move(op2GPR, op2TempGPR);
+ op2GPR = op2TempGPR;
+ }
+
+ m_jit.move(op1GPR, eax.gpr());
+ m_jit.assembler().cdq();
+ m_jit.assembler().idivl_r(op2GPR);
+
+ if (op2TempGPR != InvalidGPRReg)
+ unlock(op2TempGPR);
-static void compileClampDoubleToByte(JITCompiler& jit, GPRReg result, FPRReg source, FPRReg scratch)
-{
- // Unordered compare so we pick up NaN
- static const double zero = 0;
- static const double byteMax = 255;
- static const double half = 0.5;
- jit.loadDouble(&zero, scratch);
- MacroAssembler::Jump tooSmall = jit.branchDouble(MacroAssembler::DoubleLessThanOrEqualOrUnordered, source, scratch);
- jit.loadDouble(&byteMax, scratch);
- MacroAssembler::Jump tooBig = jit.branchDouble(MacroAssembler::DoubleGreaterThan, source, scratch);
-
- jit.loadDouble(&half, scratch);
- // FIXME: This should probably just use a floating point round!
- // https://bugs.webkit.org/show_bug.cgi?id=72054
- jit.addDouble(source, scratch);
- jit.truncateDoubleToInt32(scratch, result);
- MacroAssembler::Jump truncatedInt = jit.jump();
-
- tooSmall.link(&jit);
- jit.xorPtr(result, result);
- MacroAssembler::Jump zeroed = jit.jump();
-
- tooBig.link(&jit);
- jit.move(JITCompiler::TrustedImm32(255), result);
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+ // Check that we're not about to create negative zero.
+ JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
+ numeratorPositive.link(&m_jit);
+ }
- truncatedInt.link(&jit);
- zeroed.link(&jit);
-
-}
+ if (op1SaveGPR != op1GPR)
+ unlock(op1SaveGPR);
+
+ integerResult(edx.gpr(), node);
+#elif CPU(ARM64)
+ GPRTemporary temp(this);
+ GPRTemporary quotientThenRemainder(this);
+ GPRTemporary multiplyAnswer(this);
+ GPRReg dividendGPR = op1.gpr();
+ GPRReg divisorGPR = op2.gpr();
+ GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
+ GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
-void SpeculativeJIT::compileGetTypedArrayLength(const TypedArrayDescriptor& descriptor, Node& node, bool needsSpeculationCheck)
-{
- SpeculateCellOperand base(this, node.child1());
- GPRTemporary result(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg resultGPR = result.gpr();
-
- if (needsSpeculationCheck)
- speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
-
- m_jit.load32(MacroAssembler::Address(baseGPR, descriptor.m_lengthOffset), resultGPR);
-
- integerResult(resultGPR, m_compileIndex);
-}
+ m_jit.assembler().sdiv<32>(quotientThenRemainderGPR, dividendGPR, divisorGPR);
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
+ m_jit.assembler().sub<32>(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
-void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor& descriptor, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements, TypedArraySignedness signedness)
-{
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
- StorageOperand storage(this, node.child3());
+ // If the user cares about negative zero, then speculate that we're not about
+ // to produce negative zero.
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+ // Check that we're not about to create negative zero.
+ JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
+ numeratorPositive.link(&m_jit);
+ }
- GPRReg baseReg = base.gpr();
- GPRReg propertyReg = property.gpr();
- GPRReg storageReg = storage.gpr();
+ integerResult(quotientThenRemainderGPR, node);
+#elif CPU(APPLE_ARMV7S)
+ GPRTemporary temp(this);
+ GPRTemporary quotientThenRemainder(this);
+ GPRTemporary multiplyAnswer(this);
+ GPRReg dividendGPR = op1.gpr();
+ GPRReg divisorGPR = op2.gpr();
+ GPRReg quotientThenRemainderGPR = quotientThenRemainder.gpr();
+ GPRReg multiplyAnswerGPR = multiplyAnswer.gpr();
- GPRTemporary result(this);
- GPRReg resultReg = result.gpr();
+ m_jit.assembler().sdiv(quotientThenRemainderGPR, dividendGPR, divisorGPR);
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotientThenRemainderGPR, divisorGPR, multiplyAnswerGPR));
+ m_jit.assembler().sub(quotientThenRemainderGPR, dividendGPR, multiplyAnswerGPR);
- if (speculationRequirements != NoTypedArrayTypeSpecCheck) {
- ASSERT_NOT_REACHED();
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
- noResult(m_compileIndex);
- return;
+ // If the user cares about negative zero, then speculate that we're not about
+ // to produce negative zero.
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+ // Check that we're not about to create negative zero.
+ JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, dividendGPR, TrustedImm32(0));
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, quotientThenRemainderGPR));
+ numeratorPositive.link(&m_jit);
}
- MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset));
- m_jit.xorPtr(resultReg, resultReg);
- MacroAssembler::Jump outOfBounds = m_jit.jump();
- inBounds.link(&m_jit);
- switch (elementSize) {
- case 1:
- if (signedness == SignedTypedArray)
- m_jit.load8Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
- else
- m_jit.load8(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesOne), resultReg);
- break;
- case 2:
- if (signedness == SignedTypedArray)
- m_jit.load16Signed(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
- else
- m_jit.load16(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesTwo), resultReg);
- break;
- case 4:
- m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
- break;
- default:
- ASSERT_NOT_REACHED();
- }
- outOfBounds.link(&m_jit);
- if (elementSize < 4 || signedness == SignedTypedArray) {
- integerResult(resultReg, m_compileIndex);
- return;
- }
+ integerResult(quotientThenRemainderGPR, node);
+#else // not architecture that can do integer division
+ // Do this the *safest* way possible: call out to a C function that will do the modulo,
+ // and then attempt to convert back.
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
- ASSERT(elementSize == 4 && signedness == UnsignedTypedArray);
- if (node.shouldSpeculateInteger()) {
- forwardSpeculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, resultReg, TrustedImm32(0)), ValueRecovery::uint32InGPR(resultReg));
- integerResult(resultReg, m_compileIndex);
- return;
+ FPRResult result(this);
+
+ flushRegisters();
+ callOperation(operationFModOnInts, result.fpr(), op1GPR, op2GPR);
+
+ FPRTemporary scratch(this);
+ GPRTemporary intResult(this);
+ JITCompiler::JumpList failureCases;
+ m_jit.branchConvertDoubleToInt32(result.fpr(), intResult.gpr(), failureCases, scratch.fpr(), false);
+ speculationCheck(Overflow, JSValueRegs(), 0, failureCases);
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+ // Check that we're not about to create negative zero.
+ JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1GPR, TrustedImm32(0));
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, intResult.gpr()));
+ numeratorPositive.link(&m_jit);
}
- FPRTemporary fresult(this);
- m_jit.convertInt32ToDouble(resultReg, fresult.fpr());
- JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, resultReg, TrustedImm32(0));
- m_jit.addDouble(JITCompiler::AbsoluteAddress(&AssemblyHelpers::twoToThe32), fresult.fpr());
- positive.link(&m_jit);
- doubleResult(fresult.fpr(), m_compileIndex);
+ integerResult(intResult.gpr(), node);
+#endif // CPU(X86) || CPU(X86_64)
}
-void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements, TypedArraySignedness signedness, TypedArrayRounding rounding)
+void SpeculativeJIT::compileAdd(Node* node)
{
- Edge baseUse = node.child1();
- Edge valueUse = node.child3();
-
- if (speculationRequirements != NoTypedArrayTypeSpecCheck)
- speculationCheck(BadType, JSValueSource::unboxedCell(base), baseUse, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
- GPRTemporary value;
- GPRReg valueGPR;
-
- if (at(valueUse).isConstant()) {
- JSValue jsValue = valueOfJSConstant(valueUse.index());
- if (!jsValue.isNumber()) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
- noResult(m_compileIndex);
+ switch (node->binaryUseKind()) {
+ case Int32Use: {
+ if (isNumberConstant(node->child1().node())) {
+ int32_t imm1 = valueOfInt32Constant(node->child1().node());
+ SpeculateIntegerOperand op2(this, node->child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node->arithNodeFlags())) {
+ m_jit.move(op2.gpr(), result.gpr());
+ m_jit.add32(Imm32(imm1), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
+
+ integerResult(result.gpr(), node);
return;
}
- double d = jsValue.asNumber();
- if (rounding == ClampRounding) {
- ASSERT(elementSize == 1);
- d = clampDoubleToByte(d);
- }
- GPRTemporary scratch(this);
- GPRReg scratchReg = scratch.gpr();
- m_jit.move(Imm32(toInt32(d)), scratchReg);
- value.adopt(scratch);
- valueGPR = scratchReg;
- } else if (at(valueUse).shouldSpeculateInteger()) {
- SpeculateIntegerOperand valueOp(this, valueUse);
- GPRTemporary scratch(this);
- GPRReg scratchReg = scratch.gpr();
- m_jit.move(valueOp.gpr(), scratchReg);
- if (rounding == ClampRounding) {
- ASSERT(elementSize == 1);
- compileClampIntegerToByte(m_jit, scratchReg);
+
+ if (isNumberConstant(node->child2().node())) {
+ SpeculateIntegerOperand op1(this, node->child1());
+ int32_t imm2 = valueOfInt32Constant(node->child2().node());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node->arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.add32(Imm32(imm2), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
+
+ integerResult(result.gpr(), node);
+ return;
}
- value.adopt(scratch);
- valueGPR = scratchReg;
- } else if (rounding == ClampRounding) {
- ASSERT(elementSize == 1);
- SpeculateDoubleOperand valueOp(this, valueUse);
- GPRTemporary result(this);
- FPRTemporary floatScratch(this);
- FPRReg fpr = valueOp.fpr();
- GPRReg gpr = result.gpr();
- compileClampDoubleToByte(m_jit, gpr, fpr, floatScratch.fpr());
- value.adopt(result);
- valueGPR = gpr;
- } else {
- SpeculateDoubleOperand valueOp(this, valueUse);
- GPRTemporary result(this);
- FPRReg fpr = valueOp.fpr();
- GPRReg gpr = result.gpr();
- MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, fpr, fpr);
- m_jit.xorPtr(gpr, gpr);
- MacroAssembler::Jump fixed = m_jit.jump();
- notNaN.link(&m_jit);
-
- MacroAssembler::Jump done;
- if (signedness == SignedTypedArray)
- done = m_jit.branchTruncateDoubleToInt32(fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful);
- else
- done = m_jit.branchTruncateDoubleToUint32(fpr, gpr, MacroAssembler::BranchIfTruncateSuccessful);
+
+ SpeculateIntegerOperand op1(this, node->child1());
+ SpeculateIntegerOperand op2(this, node->child2());
+ GPRTemporary result(this, op1, op2);
- silentSpillAllRegisters(gpr);
- callOperation(toInt32, gpr, fpr);
- silentFillAllRegisters(gpr);
+ GPRReg gpr1 = op1.gpr();
+ GPRReg gpr2 = op2.gpr();
+ GPRReg gprResult = result.gpr();
- done.link(&m_jit);
- fixed.link(&m_jit);
- value.adopt(result);
- valueGPR = gpr;
- }
- ASSERT_UNUSED(valueGPR, valueGPR != property);
- ASSERT(valueGPR != base);
- GPRTemporary storage(this);
- GPRReg storageReg = storage.gpr();
- ASSERT(valueGPR != storageReg);
- m_jit.loadPtr(MacroAssembler::Address(base, descriptor.m_storageOffset), storageReg);
- MacroAssembler::Jump outOfBounds;
- if (speculationRequirements != NoTypedArraySpecCheck)
- outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
+ if (nodeCanTruncateInteger(node->arithNodeFlags())) {
+ if (gpr1 == gprResult)
+ m_jit.add32(gpr2, gprResult);
+ else {
+ m_jit.move(gpr2, gprResult);
+ m_jit.add32(gpr1, gprResult);
+ }
+ } else {
+ MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
+
+ if (gpr1 == gprResult)
+ speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
+ else if (gpr2 == gprResult)
+ speculationCheck(Overflow, JSValueRegs(), 0, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
+ else
+ speculationCheck(Overflow, JSValueRegs(), 0, check);
+ }
- switch (elementSize) {
- case 1:
- m_jit.store8(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesOne));
- break;
- case 2:
- m_jit.store16(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesTwo));
- break;
- case 4:
- m_jit.store32(value.gpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
- break;
- default:
- ASSERT_NOT_REACHED();
+ integerResult(gprResult, node);
+ return;
}
- if (speculationRequirements != NoTypedArraySpecCheck)
- outOfBounds.link(&m_jit);
- noResult(m_compileIndex);
-}
+
+ case NumberUse: {
+ SpeculateDoubleOperand op1(this, node->child1());
+ SpeculateDoubleOperand op2(this, node->child2());
+ FPRTemporary result(this, op1, op2);
-void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor& descriptor, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements)
-{
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
- StorageOperand storage(this, node.child3());
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.addDouble(reg1, reg2, result.fpr());
- GPRReg baseReg = base.gpr();
- GPRReg propertyReg = property.gpr();
- GPRReg storageReg = storage.gpr();
-
- if (speculationRequirements != NoTypedArrayTypeSpecCheck) {
- ASSERT_NOT_REACHED();
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
- noResult(m_compileIndex);
+ doubleResult(result.fpr(), node);
return;
}
-
- FPRTemporary result(this);
- FPRReg resultReg = result.fpr();
- ASSERT(speculationRequirements != NoTypedArraySpecCheck);
- MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, descriptor.m_lengthOffset));
- static const double zero = 0;
- m_jit.loadDouble(&zero, resultReg);
- MacroAssembler::Jump outOfBounds = m_jit.jump();
- inBounds.link(&m_jit);
- switch (elementSize) {
- case 4:
- m_jit.loadFloat(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesFour), resultReg);
- m_jit.convertFloatToDouble(resultReg, resultReg);
- break;
- case 8: {
- m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), resultReg);
- MacroAssembler::Jump notNaN = m_jit.branchDouble(MacroAssembler::DoubleEqual, resultReg, resultReg);
- static const double NaN = std::numeric_limits<double>::quiet_NaN();
- m_jit.loadDouble(&NaN, resultReg);
- notNaN.link(&m_jit);
- break;
+
+ case UntypedUse: {
+ RELEASE_ASSERT(node->op() == ValueAdd);
+ compileValueAdd(node);
+ return;
}
+
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
}
- outOfBounds.link(&m_jit);
- doubleResult(resultReg, m_compileIndex);
}
-void SpeculativeJIT::compilePutByValForFloatTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements)
+void SpeculativeJIT::compileMakeRope(Node* node)
{
- Edge baseUse = node.child1();
- Edge valueUse = node.child3();
-
- SpeculateDoubleOperand valueOp(this, valueUse);
-
- if (speculationRequirements != NoTypedArrayTypeSpecCheck)
- speculationCheck(BadType, JSValueSource::unboxedCell(base), baseUse.index(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
+ ASSERT(node->child1().useKind() == KnownStringUse);
+ ASSERT(node->child2().useKind() == KnownStringUse);
+ ASSERT(!node->child3() || node->child3().useKind() == KnownStringUse);
+ SpeculateCellOperand op1(this, node->child1());
+ SpeculateCellOperand op2(this, node->child2());
+ SpeculateCellOperand op3(this, node->child3());
GPRTemporary result(this);
+ GPRTemporary allocator(this);
+ GPRTemporary scratch(this);
- GPRTemporary storage(this);
- GPRReg storageReg = storage.gpr();
+ GPRReg opGPRs[3];
+ unsigned numOpGPRs;
+ opGPRs[0] = op1.gpr();
+ opGPRs[1] = op2.gpr();
+ if (node->child3()) {
+ opGPRs[2] = op3.gpr();
+ numOpGPRs = 3;
+ } else {
+ opGPRs[2] = InvalidGPRReg;
+ numOpGPRs = 2;
+ }
+ GPRReg resultGPR = result.gpr();
+ GPRReg allocatorGPR = allocator.gpr();
+ GPRReg scratchGPR = scratch.gpr();
- m_jit.loadPtr(MacroAssembler::Address(base, descriptor.m_storageOffset), storageReg);
- MacroAssembler::Jump outOfBounds;
- if (speculationRequirements != NoTypedArraySpecCheck)
- outOfBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, property, MacroAssembler::Address(base, descriptor.m_lengthOffset));
+ JITCompiler::JumpList slowPath;
+ MarkedAllocator& markedAllocator = m_jit.vm()->heap.allocatorForObjectWithImmortalStructureDestructor(sizeof(JSRopeString));
+ m_jit.move(TrustedImmPtr(&markedAllocator), allocatorGPR);
+ emitAllocateJSCell(resultGPR, allocatorGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()), scratchGPR, slowPath);
+
+ m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSString::offsetOfValue()));
+ for (unsigned i = 0; i < numOpGPRs; ++i)
+ m_jit.storePtr(opGPRs[i], JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
+ for (unsigned i = numOpGPRs; i < JSRopeString::s_maxInternalRopeLength; ++i)
+ m_jit.storePtr(TrustedImmPtr(0), JITCompiler::Address(resultGPR, JSRopeString::offsetOfFibers() + sizeof(WriteBarrier<JSString>) * i));
+ m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfFlags()), scratchGPR);
+ m_jit.load32(JITCompiler::Address(opGPRs[0], JSString::offsetOfLength()), allocatorGPR);
+ for (unsigned i = 1; i < numOpGPRs; ++i) {
+ m_jit.and32(JITCompiler::Address(opGPRs[i], JSString::offsetOfFlags()), scratchGPR);
+ m_jit.add32(JITCompiler::Address(opGPRs[i], JSString::offsetOfLength()), allocatorGPR);
+ }
+ m_jit.and32(JITCompiler::TrustedImm32(JSString::Is8Bit), scratchGPR);
+ m_jit.store32(scratchGPR, JITCompiler::Address(resultGPR, JSString::offsetOfFlags()));
+ m_jit.store32(allocatorGPR, JITCompiler::Address(resultGPR, JSString::offsetOfLength()));
- switch (elementSize) {
- case 4: {
- FPRTemporary scratch(this);
- m_jit.moveDouble(valueOp.fpr(), scratch.fpr());
- m_jit.convertDoubleToFloat(valueOp.fpr(), scratch.fpr());
- m_jit.storeFloat(scratch.fpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesFour));
+ switch (numOpGPRs) {
+ case 2:
+ addSlowPathGenerator(slowPathCall(
+ slowPath, this, operationMakeRope2, resultGPR, opGPRs[0], opGPRs[1]));
break;
- }
- case 8:
- m_jit.storeDouble(valueOp.fpr(), MacroAssembler::BaseIndex(storageReg, property, MacroAssembler::TimesEight));
+ case 3:
+ addSlowPathGenerator(slowPathCall(
+ slowPath, this, operationMakeRope3, resultGPR, opGPRs[0], opGPRs[1], opGPRs[2]));
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
}
- if (speculationRequirements != NoTypedArraySpecCheck)
- outOfBounds.link(&m_jit);
- noResult(m_compileIndex);
+
+ cellResult(resultGPR, node);
}
-void SpeculativeJIT::compileInstanceOfForObject(Node&, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchReg)
+void SpeculativeJIT::compileArithSub(Node* node)
{
- // Check that prototype is an object.
- m_jit.loadPtr(MacroAssembler::Address(prototypeReg, JSCell::structureOffset()), scratchReg);
- speculationCheck(BadType, JSValueRegs(), NoNode, m_jit.branchIfNotObject(scratchReg));
-
- // Initialize scratchReg with the value being checked.
- m_jit.move(valueReg, scratchReg);
-
- // Walk up the prototype chain of the value (in scratchReg), comparing to prototypeReg.
- MacroAssembler::Label loop(&m_jit);
- m_jit.loadPtr(MacroAssembler::Address(scratchReg, JSCell::structureOffset()), scratchReg);
-#if USE(JSVALUE64)
- m_jit.loadPtr(MacroAssembler::Address(scratchReg, Structure::prototypeOffset()), scratchReg);
-#else
- m_jit.load32(MacroAssembler::Address(scratchReg, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), scratchReg);
-#endif
- MacroAssembler::Jump isInstance = m_jit.branchPtr(MacroAssembler::Equal, scratchReg, prototypeReg);
-#if USE(JSVALUE64)
- m_jit.branchTestPtr(MacroAssembler::Zero, scratchReg, GPRInfo::tagMaskRegister).linkTo(loop, &m_jit);
-#else
- m_jit.branchTest32(MacroAssembler::NonZero, scratchReg).linkTo(loop, &m_jit);
-#endif
-
- // No match - result is false.
-#if USE(JSVALUE64)
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), scratchReg);
-#else
- m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
-#endif
- MacroAssembler::Jump putResult = m_jit.jump();
-
- isInstance.link(&m_jit);
-#if USE(JSVALUE64)
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(true))), scratchReg);
+ switch (node->binaryUseKind()) {
+ case Int32Use: {
+ if (isNumberConstant(node->child2().node())) {
+ SpeculateIntegerOperand op1(this, node->child1());
+ int32_t imm2 = valueOfInt32Constant(node->child2().node());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node->arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.sub32(Imm32(imm2), result.gpr());
+ } else {
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ GPRTemporary scratch(this);
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
#else
- m_jit.move(MacroAssembler::TrustedImm32(1), scratchReg);
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
#endif
-
- putResult.link(&m_jit);
-}
+ }
-void SpeculativeJIT::compileInstanceOf(Node& node)
-{
- if ((!!(at(node.child1()).prediction() & ~PredictCell)
- && !!(m_state.forNode(node.child1()).m_type & ~PredictCell))
- || at(node.child1()).adjustedRefCount() == 1) {
- // It might not be a cell. Speculate less aggressively.
- // Or: it might only be used once (i.e. by us), so we get zero benefit
- // from speculating any more aggressively than we absolutely need to.
-
- JSValueOperand value(this, node.child1());
- SpeculateCellOperand prototype(this, node.child3());
- GPRTemporary scratch(this);
+ integerResult(result.gpr(), node);
+ return;
+ }
+
+ if (isNumberConstant(node->child1().node())) {
+ int32_t imm1 = valueOfInt32Constant(node->child1().node());
+ SpeculateIntegerOperand op2(this, node->child2());
+ GPRTemporary result(this);
+
+ m_jit.move(Imm32(imm1), result.gpr());
+ if (nodeCanTruncateInteger(node->arithNodeFlags()))
+ m_jit.sub32(op2.gpr(), result.gpr());
+ else
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
+
+ integerResult(result.gpr(), node);
+ return;
+ }
+
+ SpeculateIntegerOperand op1(this, node->child1());
+ SpeculateIntegerOperand op2(this, node->child2());
+ GPRTemporary result(this);
+
+ if (nodeCanTruncateInteger(node->arithNodeFlags())) {
+ m_jit.move(op1.gpr(), result.gpr());
+ m_jit.sub32(op2.gpr(), result.gpr());
+ } else
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
+
+ integerResult(result.gpr(), node);
+ return;
+ }
- GPRReg prototypeReg = prototype.gpr();
- GPRReg scratchReg = scratch.gpr();
+ case NumberUse: {
+ SpeculateDoubleOperand op1(this, node->child1());
+ SpeculateDoubleOperand op2(this, node->child2());
+ FPRTemporary result(this, op1);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+ m_jit.subDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), node);
+ return;
+ }
-#if USE(JSVALUE64)
- GPRReg valueReg = value.gpr();
- MacroAssembler::Jump isCell = m_jit.branchTestPtr(MacroAssembler::Zero, valueReg, GPRInfo::tagMaskRegister);
- m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsBoolean(false))), scratchReg);
-#else
- GPRReg valueTagReg = value.tagGPR();
- GPRReg valueReg = value.payloadGPR();
- MacroAssembler::Jump isCell = m_jit.branch32(MacroAssembler::Equal, valueTagReg, TrustedImm32(JSValue::CellTag));
- m_jit.move(MacroAssembler::TrustedImm32(0), scratchReg);
-#endif
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+ }
+}
- MacroAssembler::Jump done = m_jit.jump();
+void SpeculativeJIT::compileArithNegate(Node* node)
+{
+ switch (node->child1().useKind()) {
+ case Int32Use: {
+ SpeculateIntegerOperand op1(this, node->child1());
+ GPRTemporary result(this);
+
+ m_jit.move(op1.gpr(), result.gpr());
+
+ if (nodeCanTruncateInteger(node->arithNodeFlags()))
+ m_jit.neg32(result.gpr());
+ else {
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags()))
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branchTest32(MacroAssembler::Zero, result.gpr()));
+ }
+
+ integerResult(result.gpr(), node);
+ return;
+ }
- isCell.link(&m_jit);
+ case NumberUse: {
+ SpeculateDoubleOperand op1(this, node->child1());
+ FPRTemporary result(this);
- compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
+ m_jit.negateDouble(op1.fpr(), result.fpr());
- done.link(&m_jit);
-
-#if USE(JSVALUE64)
- jsValueResult(scratchReg, m_compileIndex, DataFormatJSBoolean);
-#else
- booleanResult(scratchReg, m_compileIndex);
-#endif
+ doubleResult(result.fpr(), node);
return;
}
-
- SpeculateCellOperand value(this, node.child1());
- // Base unused since we speculate default InstanceOf behaviour in CheckHasInstance.
- SpeculateCellOperand prototype(this, node.child3());
-
- GPRTemporary scratch(this);
-
- GPRReg valueReg = value.gpr();
- GPRReg prototypeReg = prototype.gpr();
- GPRReg scratchReg = scratch.gpr();
-
- compileInstanceOfForObject(node, valueReg, prototypeReg, scratchReg);
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+ }
+}
+void SpeculativeJIT::compileArithIMul(Node* node)
+{
+ SpeculateIntegerOperand op1(this, node->child1());
+ SpeculateIntegerOperand op2(this, node->child2());
+ GPRTemporary result(this);
-#if USE(JSVALUE64)
- jsValueResult(scratchReg, m_compileIndex, DataFormatJSBoolean);
-#else
- booleanResult(scratchReg, m_compileIndex);
-#endif
+ GPRReg reg1 = op1.gpr();
+ GPRReg reg2 = op2.gpr();
+
+ m_jit.move(reg1, result.gpr());
+ m_jit.mul32(reg2, result.gpr());
+ integerResult(result.gpr(), node);
+ return;
}
-void SpeculativeJIT::compileSoftModulo(Node& node)
+void SpeculativeJIT::compileArithMul(Node* node)
{
- // In the fast path, the dividend value could be the final result
- // (in case of |dividend| < |divisor|), so we speculate it as strict int32.
- SpeculateStrictInt32Operand op1(this, node.child1());
-#if CPU(X86) || CPU(X86_64)
- if (isInt32Constant(node.child2().index())) {
- int32_t divisor = valueOfInt32Constant(node.child2().index());
- if (divisor) {
- GPRReg op1Gpr = op1.gpr();
-
- GPRTemporary eax(this, X86Registers::eax);
- GPRTemporary edx(this, X86Registers::edx);
- GPRTemporary scratch(this);
- GPRReg scratchGPR = scratch.gpr();
+ switch (node->binaryUseKind()) {
+ case Int32Use: {
+ SpeculateIntegerOperand op1(this, node->child1());
+ SpeculateIntegerOperand op2(this, node->child2());
+ GPRTemporary result(this);
- GPRReg op1SaveGPR;
- if (op1Gpr == X86Registers::eax || op1Gpr == X86Registers::edx) {
- op1SaveGPR = allocate();
- ASSERT(op1Gpr != op1SaveGPR);
- m_jit.move(op1Gpr, op1SaveGPR);
- } else
- op1SaveGPR = op1Gpr;
- ASSERT(op1SaveGPR != X86Registers::eax);
- ASSERT(op1SaveGPR != X86Registers::edx);
+ GPRReg reg1 = op1.gpr();
+ GPRReg reg2 = op2.gpr();
- m_jit.move(op1Gpr, eax.gpr());
- m_jit.move(TrustedImm32(divisor), scratchGPR);
- if (divisor == -1)
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(JITCompiler::Equal, eax.gpr(), TrustedImm32(-2147483647-1)));
- m_jit.assembler().cdq();
- m_jit.assembler().idivl_r(scratchGPR);
- // Check that we're not about to create negative zero.
- // FIXME: if the node use doesn't care about neg zero, we can do this more easily.
- JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
- numeratorPositive.link(&m_jit);
+ // We can perform truncated multiplications if we get to this point, because if the
+ // fixup phase could not prove that it would be safe, it would have turned us into
+ // a double multiplication.
+ if (nodeCanTruncateInteger(node->arithNodeFlags())) {
+ m_jit.move(reg1, result.gpr());
+ m_jit.mul32(reg2, result.gpr());
+ } else {
+ speculationCheck(
+ Overflow, JSValueRegs(), 0,
+ m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
+ }
- if (op1SaveGPR != op1Gpr)
- unlock(op1SaveGPR);
-
- integerResult(edx.gpr(), m_compileIndex);
- return;
+ // Check for negative zero, if the users of this node care about such things.
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+ MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
+ resultNonZero.link(&m_jit);
}
+
+ integerResult(result.gpr(), node);
+ return;
}
-#endif
+
+ case NumberUse: {
+ SpeculateDoubleOperand op1(this, node->child1());
+ SpeculateDoubleOperand op2(this, node->child2());
+ FPRTemporary result(this, op1, op2);
+
+ FPRReg reg1 = op1.fpr();
+ FPRReg reg2 = op2.fpr();
+
+ m_jit.mulDouble(reg1, reg2, result.fpr());
+
+ doubleResult(result.fpr(), node);
+ return;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+ }
+}
- SpeculateIntegerOperand op2(this, node.child2());
#if CPU(X86) || CPU(X86_64)
+void SpeculativeJIT::compileIntegerArithDivForX86(Node* node)
+{
+ SpeculateIntegerOperand op1(this, node->child1());
+ SpeculateIntegerOperand op2(this, node->child2());
GPRTemporary eax(this, X86Registers::eax);
GPRTemporary edx(this, X86Registers::edx);
GPRReg op1GPR = op1.gpr();
GPRReg op2TempGPR;
GPRReg temp;
- GPRReg op1SaveGPR;
-
if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
op2TempGPR = allocate();
temp = op2TempGPR;
temp = X86Registers::eax;
}
- if (op1GPR == X86Registers::eax || op1GPR == X86Registers::edx) {
- op1SaveGPR = allocate();
- ASSERT(op1GPR != op1SaveGPR);
- m_jit.move(op1GPR, op1SaveGPR);
- } else
- op1SaveGPR = op1GPR;
-
ASSERT(temp != op1GPR);
- ASSERT(temp != op2GPR);
- ASSERT(op1SaveGPR != X86Registers::eax);
- ASSERT(op1SaveGPR != X86Registers::edx);
+ ASSERT(temp != op2GPR);
m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
- JITCompiler::Jump done;
- // FIXME: if the node is not used as number then we can do this more easily.
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
+ JITCompiler::JumpList done;
+ if (nodeUsedAsNumber(node->arithNodeFlags())) {
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
+ } else {
+ // This is the case where we convert the result to an int after we're done, and we
+ // already know that the denominator is either -1 or 0. So, if the denominator is
+ // zero, then the result should be zero. If the denominator is not zero (i.e. it's
+ // -1) and the numerator is -2^31 then the result should be -2^31. Otherwise we
+ // are happy to fall through to a normal division, since we're just dividing
+ // something by negative 1.
+
+ JITCompiler::Jump notZero = m_jit.branchTest32(JITCompiler::NonZero, op2GPR);
+ m_jit.move(TrustedImm32(0), eax.gpr());
+ done.append(m_jit.jump());
+
+ notZero.link(&m_jit);
+ JITCompiler::Jump notNeg2ToThe31 =
+ m_jit.branch32(JITCompiler::NotEqual, op1GPR, TrustedImm32(-2147483647-1));
+ m_jit.move(op1GPR, eax.gpr());
+ done.append(m_jit.jump());
+
+ notNeg2ToThe31.link(&m_jit);
+ }
safeDenominator.link(&m_jit);
-
+
+ // If the user cares about negative zero, then speculate that we're not about
+ // to produce negative zero.
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+ MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
+ numeratorNonZero.link(&m_jit);
+ }
+
if (op2TempGPR != InvalidGPRReg) {
m_jit.move(op2GPR, op2TempGPR);
op2GPR = op2TempGPR;
if (op2TempGPR != InvalidGPRReg)
unlock(op2TempGPR);
- // Check that we're not about to create negative zero.
- // FIXME: if the node use doesn't care about neg zero, we can do this more easily.
- JITCompiler::Jump numeratorPositive = m_jit.branch32(JITCompiler::GreaterThanOrEqual, op1SaveGPR, TrustedImm32(0));
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, edx.gpr()));
- numeratorPositive.link(&m_jit);
-
- if (op1SaveGPR != op1GPR)
- unlock(op1SaveGPR);
+ // Check that there was no remainder. If there had been, then we'd be obligated to
+ // produce a double result instead.
+ if (nodeUsedAsNumber(node->arithNodeFlags()))
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
+ else
+ done.link(&m_jit);
- integerResult(edx.gpr(), m_compileIndex);
-#else // CPU(X86) || CPU(X86_64) --> so not X86
- // Do this the *safest* way possible: call out to a C function that will do the modulo,
- // and then attempt to convert back.
+ integerResult(eax.gpr(), node);
+}
+#elif CPU(ARM64)
+void SpeculativeJIT::compileIntegerArithDivForARM64(Node* node)
+{
+ SpeculateIntegerOperand op1(this, node->child1());
+ SpeculateIntegerOperand op2(this, node->child2());
GPRReg op1GPR = op1.gpr();
GPRReg op2GPR = op2.gpr();
-
- FPRResult result(this);
-
- flushRegisters();
- callOperation(operationFModOnInts, result.fpr(), op1GPR, op2GPR);
-
- FPRTemporary scratch(this);
- GPRTemporary intResult(this);
- JITCompiler::JumpList failureCases;
- m_jit.branchConvertDoubleToInt32(result.fpr(), intResult.gpr(), failureCases, scratch.fpr());
- speculationCheck(Overflow, JSValueRegs(), NoNode, failureCases);
-
- integerResult(intResult.gpr(), m_compileIndex);
-#endif // CPU(X86) || CPU(X86_64)
+ GPRTemporary quotient(this);
+ GPRTemporary multiplyAnswer(this);
+
+ // If the user cares about negative zero, then speculate that we're not about
+ // to produce negative zero.
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+ MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
+ numeratorNonZero.link(&m_jit);
+ }
+
+ m_jit.assembler().sdiv<32>(quotient.gpr(), op1GPR, op2GPR);
+
+ // Check that there was no remainder. If there had been, then we'd be obligated to
+ // produce a double result instead.
+ if (nodeUsedAsNumber(node->arithNodeFlags())) {
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
+ }
+
+ integerResult(quotient.gpr(), node);
+}
+#elif CPU(APPLE_ARMV7S)
+void SpeculativeJIT::compileIntegerArithDivForARMv7s(Node* node)
+{
+ SpeculateIntegerOperand op1(this, node->child1());
+ SpeculateIntegerOperand op2(this, node->child2());
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+ GPRTemporary quotient(this);
+ GPRTemporary multiplyAnswer(this);
+
+ // If the user cares about negative zero, then speculate that we're not about
+ // to produce negative zero.
+ if (!nodeCanIgnoreNegativeZero(node->arithNodeFlags())) {
+ MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
+ speculationCheck(NegativeZero, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
+ numeratorNonZero.link(&m_jit);
+ }
+
+ m_jit.assembler().sdiv(quotient.gpr(), op1GPR, op2GPR);
+
+ // Check that there was no remainder. If there had been, then we'd be obligated to
+ // produce a double result instead.
+ if (nodeUsedAsNumber(node->arithNodeFlags())) {
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branchMul32(JITCompiler::Overflow, quotient.gpr(), op2GPR, multiplyAnswer.gpr()));
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(JITCompiler::NotEqual, multiplyAnswer.gpr(), op1GPR));
+ }
+
+ integerResult(quotient.gpr(), node);
}
+#endif
-void SpeculativeJIT::compileAdd(Node& node)
+void SpeculativeJIT::compileArithMod(Node* node)
{
- if (m_jit.graph().addShouldSpeculateInteger(node)) {
- if (isNumberConstant(node.child1().index())) {
- int32_t imm1 = valueOfNumberConstantAsInt32(node.child1().index());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this);
+ switch (node->binaryUseKind()) {
+ case Int32Use: {
+ compileSoftModulo(node);
+ return;
+ }
+
+ case NumberUse: {
+ SpeculateDoubleOperand op1(this, node->child1());
+ SpeculateDoubleOperand op2(this, node->child2());
+
+ FPRReg op1FPR = op1.fpr();
+ FPRReg op2FPR = op2.fpr();
+
+ flushRegisters();
+
+ FPRResult result(this);
+
+ callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
+
+ doubleResult(result.fpr(), node);
+ return;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+ }
+}
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op2.gpr(), result.gpr());
- m_jit.add32(Imm32(imm1), result.gpr());
- } else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op2.gpr(), Imm32(imm1), result.gpr()));
+// Returns true if the compare is fused with a subsequent branch.
+bool SpeculativeJIT::compare(Node* node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
+{
+ if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
+ return true;
- integerResult(result.gpr(), m_compileIndex);
- return;
+ if (node->isBinaryUseKind(Int32Use)) {
+ compileIntegerCompare(node, condition);
+ return false;
+ }
+
+ if (node->isBinaryUseKind(NumberUse)) {
+ compileDoubleCompare(node, doubleCondition);
+ return false;
+ }
+
+ if (node->op() == CompareEq) {
+ if (node->isBinaryUseKind(StringUse)) {
+ compileStringEquality(node);
+ return false;
+ }
+
+ if (node->isBinaryUseKind(BooleanUse)) {
+ compileBooleanCompare(node, condition);
+ return false;
}
-
- if (isNumberConstant(node.child2().index())) {
- SpeculateIntegerOperand op1(this, node.child1());
- int32_t imm2 = valueOfNumberConstantAsInt32(node.child2().index());
- GPRTemporary result(this);
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op1.gpr(), result.gpr());
- m_jit.add32(Imm32(imm2), result.gpr());
- } else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchAdd32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
- integerResult(result.gpr(), m_compileIndex);
- return;
+ if (node->isBinaryUseKind(ObjectUse)) {
+ compileObjectEquality(node);
+ return false;
}
-
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this, op1, op2);
+
+ if (node->child1().useKind() == ObjectUse && node->child2().useKind() == ObjectOrOtherUse) {
+ compileObjectToObjectOrOtherEquality(node->child1(), node->child2());
+ return false;
+ }
+
+ if (node->child1().useKind() == ObjectOrOtherUse && node->child2().useKind() == ObjectUse) {
+ compileObjectToObjectOrOtherEquality(node->child2(), node->child1());
+ return false;
+ }
+ }
+
+ nonSpeculativeNonPeepholeCompare(node, condition, operation);
+ return false;
+}
- GPRReg gpr1 = op1.gpr();
- GPRReg gpr2 = op2.gpr();
- GPRReg gprResult = result.gpr();
+bool SpeculativeJIT::compileStrictEqForConstant(Node* node, Edge value, JSValue constant)
+{
+ JSValueOperand op1(this, value);
+
+ // FIXME: This code is wrong for the case that the constant is null or undefined,
+ // and the value is an object that MasqueradesAsUndefined.
+ // https://bugs.webkit.org/show_bug.cgi?id=109487
+
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+ BlockIndex taken = branchNode->takenBlockIndex();
+ BlockIndex notTaken = branchNode->notTakenBlockIndex();
+ MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
+
+ // The branch instruction will branch to the taken block.
+ // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
+ if (taken == nextBlock()) {
+ condition = MacroAssembler::NotEqual;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- if (gpr1 == gprResult)
- m_jit.add32(gpr2, gprResult);
- else {
- m_jit.move(gpr2, gprResult);
- m_jit.add32(gpr1, gprResult);
- }
+#if USE(JSVALUE64)
+ branch64(condition, op1.gpr(), MacroAssembler::TrustedImm64(JSValue::encode(constant)), taken);
+#else
+ GPRReg payloadGPR = op1.payloadGPR();
+ GPRReg tagGPR = op1.tagGPR();
+ if (condition == MacroAssembler::Equal) {
+ // Drop down if not equal, go elsewhere if equal.
+ MacroAssembler::Jump notEqual = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()));
+ branch32(MacroAssembler::Equal, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
+ notEqual.link(&m_jit);
} else {
- MacroAssembler::Jump check = m_jit.branchAdd32(MacroAssembler::Overflow, gpr1, gpr2, gprResult);
-
- if (gpr1 == gprResult)
- speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr2));
- else if (gpr2 == gprResult)
- speculationCheck(Overflow, JSValueRegs(), NoNode, check, SpeculationRecovery(SpeculativeAdd, gprResult, gpr1));
- else
- speculationCheck(Overflow, JSValueRegs(), NoNode, check);
+ // Drop down if equal, go elsehwere if not equal.
+ branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()), taken);
+ branch32(MacroAssembler::NotEqual, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
+ }
+#endif
+
+ jump(notTaken);
+
+ use(node->child1());
+ use(node->child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+ return true;
+ }
+
+ GPRTemporary result(this);
+
+#if USE(JSVALUE64)
+ GPRReg op1GPR = op1.gpr();
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(MacroAssembler::TrustedImm64(ValueFalse), resultGPR);
+ MacroAssembler::Jump notEqual = m_jit.branch64(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImm64(JSValue::encode(constant)));
+ m_jit.or32(MacroAssembler::TrustedImm32(1), resultGPR);
+ notEqual.link(&m_jit);
+ jsValueResult(resultGPR, node, DataFormatJSBoolean);
+#else
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg resultGPR = result.gpr();
+ m_jit.move(TrustedImm32(0), resultGPR);
+ MacroAssembler::JumpList notEqual;
+ notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, MacroAssembler::Imm32(constant.tag())));
+ notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1PayloadGPR, MacroAssembler::Imm32(constant.payload())));
+ m_jit.move(TrustedImm32(1), resultGPR);
+ notEqual.link(&m_jit);
+ booleanResult(resultGPR, node);
+#endif
+
+ return false;
+}
+
+bool SpeculativeJIT::compileStrictEq(Node* node)
+{
+ switch (node->binaryUseKind()) {
+ case BooleanUse: {
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+ compilePeepHoleBooleanBranch(node, branchNode, MacroAssembler::Equal);
+ use(node->child1());
+ use(node->child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+ return true;
+ }
+ compileBooleanCompare(node, MacroAssembler::Equal);
+ return false;
+ }
+
+ case Int32Use: {
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+ compilePeepHoleIntegerBranch(node, branchNode, MacroAssembler::Equal);
+ use(node->child1());
+ use(node->child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+ return true;
+ }
+ compileIntegerCompare(node, MacroAssembler::Equal);
+ return false;
+ }
+
+ case NumberUse: {
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+ compilePeepHoleDoubleBranch(node, branchNode, MacroAssembler::DoubleEqual);
+ use(node->child1());
+ use(node->child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+ return true;
+ }
+ compileDoubleCompare(node, MacroAssembler::DoubleEqual);
+ return false;
+ }
+
+ case StringUse: {
+ compileStringEquality(node);
+ return false;
+ }
+
+ case ObjectUse: {
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock != UINT_MAX) {
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+ compilePeepHoleObjectEquality(node, branchNode);
+ use(node->child1());
+ use(node->child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
+ return true;
}
+ compileObjectEquality(node);
+ return false;
+ }
+
+ case UntypedUse: {
+ return nonSpeculativeStrictEq(node);
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return false;
+ }
+}
+
+void SpeculativeJIT::compileBooleanCompare(Node* node, MacroAssembler::RelationalCondition condition)
+{
+ SpeculateBooleanOperand op1(this, node->child1());
+ SpeculateBooleanOperand op2(this, node->child2());
+ GPRTemporary result(this);
+
+ m_jit.compare32(condition, op1.gpr(), op2.gpr(), result.gpr());
+
+ // If we add a DataFormatBool, we should use it here.
+#if USE(JSVALUE32_64)
+ booleanResult(result.gpr(), node);
+#else
+ m_jit.or32(TrustedImm32(ValueFalse), result.gpr());
+ jsValueResult(result.gpr(), m_currentNode, DataFormatJSBoolean);
+#endif
+}
+
+void SpeculativeJIT::compileStringEquality(Node* node)
+{
+ SpeculateCellOperand left(this, node->child1());
+ SpeculateCellOperand right(this, node->child2());
+ GPRTemporary length(this);
+ GPRTemporary leftTemp(this);
+ GPRTemporary rightTemp(this);
+ GPRTemporary leftTemp2(this, left);
+ GPRTemporary rightTemp2(this, right);
+
+ GPRReg leftGPR = left.gpr();
+ GPRReg rightGPR = right.gpr();
+ GPRReg lengthGPR = length.gpr();
+ GPRReg leftTempGPR = leftTemp.gpr();
+ GPRReg rightTempGPR = rightTemp.gpr();
+ GPRReg leftTemp2GPR = leftTemp2.gpr();
+ GPRReg rightTemp2GPR = rightTemp2.gpr();
+
+ JITCompiler::JumpList trueCase;
+ JITCompiler::JumpList falseCase;
+ JITCompiler::JumpList slowCase;
+
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(leftGPR), node->child1(), SpecString, m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(leftGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+
+ // It's safe to branch around the type check below, since proving that the values are
+ // equal does indeed prove that the right value is a string.
+ trueCase.append(m_jit.branchPtr(MacroAssembler::Equal, leftGPR, rightGPR));
+
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(rightGPR), node->child2(), SpecString, m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(rightGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
- integerResult(gprResult, m_compileIndex);
- return;
- }
-
- if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
- FPRTemporary result(this, op1, op2);
+ m_jit.load32(MacroAssembler::Address(leftGPR, JSString::offsetOfLength()), lengthGPR);
+
+ falseCase.append(m_jit.branch32(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(rightGPR, JSString::offsetOfLength()),
+ lengthGPR));
+
+ trueCase.append(m_jit.branchTest32(MacroAssembler::Zero, lengthGPR));
+
+ m_jit.loadPtr(MacroAssembler::Address(leftGPR, JSString::offsetOfValue()), leftTempGPR);
+ m_jit.loadPtr(MacroAssembler::Address(rightGPR, JSString::offsetOfValue()), rightTempGPR);
+
+ slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, leftTempGPR));
+ slowCase.append(m_jit.branchTestPtr(MacroAssembler::Zero, rightTempGPR));
+
+ slowCase.append(m_jit.branchTest32(
+ MacroAssembler::Zero,
+ MacroAssembler::Address(leftTempGPR, StringImpl::flagsOffset()),
+ TrustedImm32(StringImpl::flagIs8Bit())));
+ slowCase.append(m_jit.branchTest32(
+ MacroAssembler::Zero,
+ MacroAssembler::Address(rightTempGPR, StringImpl::flagsOffset()),
+ TrustedImm32(StringImpl::flagIs8Bit())));
+
+ m_jit.loadPtr(MacroAssembler::Address(leftTempGPR, StringImpl::dataOffset()), leftTempGPR);
+ m_jit.loadPtr(MacroAssembler::Address(rightTempGPR, StringImpl::dataOffset()), rightTempGPR);
+
+ MacroAssembler::Label loop = m_jit.label();
+
+ m_jit.sub32(TrustedImm32(1), lengthGPR);
- FPRReg reg1 = op1.fpr();
- FPRReg reg2 = op2.fpr();
- m_jit.addDouble(reg1, reg2, result.fpr());
+ // This isn't going to generate the best code on x86. But that's OK, it's still better
+ // than not inlining.
+ m_jit.load8(MacroAssembler::BaseIndex(leftTempGPR, lengthGPR, MacroAssembler::TimesOne), leftTemp2GPR);
+ m_jit.load8(MacroAssembler::BaseIndex(rightTempGPR, lengthGPR, MacroAssembler::TimesOne), rightTemp2GPR);
+ falseCase.append(m_jit.branch32(MacroAssembler::NotEqual, leftTemp2GPR, rightTemp2GPR));
+
+ m_jit.branchTest32(MacroAssembler::NonZero, lengthGPR).linkTo(loop, &m_jit);
+
+ trueCase.link(&m_jit);
+#if USE(JSVALUE64)
+ m_jit.move(TrustedImm64(ValueTrue), leftTempGPR);
+#else
+ m_jit.move(TrustedImm32(true), leftTempGPR);
+#endif
+
+ JITCompiler::Jump done = m_jit.jump();
- doubleResult(result.fpr(), m_compileIndex);
- return;
- }
+ falseCase.link(&m_jit);
+#if USE(JSVALUE64)
+ m_jit.move(TrustedImm64(ValueFalse), leftTempGPR);
+#else
+ m_jit.move(TrustedImm32(false), leftTempGPR);
+#endif
+
+ done.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCase, this, operationCompareStringEq, leftTempGPR, leftGPR, rightGPR));
+
+#if USE(JSVALUE64)
+ jsValueResult(leftTempGPR, node, DataFormatJSBoolean);
+#else
+ booleanResult(leftTempGPR, node);
+#endif
+}
- if (node.op() == ValueAdd) {
- compileValueAdd(node);
- return;
+void SpeculativeJIT::compileGetIndexedPropertyStorage(Node* node)
+{
+ SpeculateCellOperand base(this, node->child1());
+ GPRReg baseReg = base.gpr();
+
+ GPRTemporary storage(this);
+ GPRReg storageReg = storage.gpr();
+
+ const TypedArrayDescriptor* descriptor = typedArrayDescriptor(node->arrayMode());
+
+ switch (node->arrayMode().type()) {
+ case Array::String:
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), storageReg);
+
+ addSlowPathGenerator(
+ slowPathCall(
+ m_jit.branchTest32(MacroAssembler::Zero, storageReg),
+ this, operationResolveRope, storageReg, baseReg));
+
+ m_jit.loadPtr(MacroAssembler::Address(storageReg, StringImpl::dataOffset()), storageReg);
+ break;
+
+ default:
+ ASSERT(descriptor);
+ m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor->m_storageOffset), storageReg);
+ break;
}
- // We don't handle this yet. :-(
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ storageResult(storageReg, node);
}
-void SpeculativeJIT::compileArithSub(Node& node)
+void SpeculativeJIT::compileGetByValOnArguments(Node* node)
{
- if (m_jit.graph().addShouldSpeculateInteger(node)) {
- if (isNumberConstant(node.child2().index())) {
- SpeculateIntegerOperand op1(this, node.child1());
- int32_t imm2 = valueOfNumberConstantAsInt32(node.child2().index());
- GPRTemporary result(this);
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op1.gpr(), result.gpr());
- m_jit.sub32(Imm32(imm2), result.gpr());
- } else {
-#if ENABLE(JIT_CONSTANT_BLINDING)
- GPRTemporary scratch(this);
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr(), scratch.gpr()));
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateStrictInt32Operand property(this, node->child2());
+ GPRTemporary result(this);
+#if USE(JSVALUE32_64)
+ GPRTemporary resultTag(this);
+#endif
+ GPRTemporary scratch(this);
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg resultReg = result.gpr();
+#if USE(JSVALUE32_64)
+ GPRReg resultTagReg = resultTag.gpr();
+#endif
+ GPRReg scratchReg = scratch.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
+
+ // Two really lame checks.
+ speculationCheck(
+ Uncountable, JSValueSource(), 0,
+ m_jit.branch32(
+ MacroAssembler::AboveOrEqual, propertyReg,
+ MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_numArguments))));
+ speculationCheck(
+ Uncountable, JSValueSource(), 0,
+ m_jit.branchTestPtr(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(
+ baseReg, OBJECT_OFFSETOF(Arguments, m_slowArguments))));
+
+ m_jit.move(propertyReg, resultReg);
+ m_jit.neg32(resultReg);
+ m_jit.signExtend32ToPtr(resultReg, resultReg);
+ m_jit.loadPtr(
+ MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_registers)),
+ scratchReg);
+
+#if USE(JSVALUE32_64)
+ m_jit.load32(
+ MacroAssembler::BaseIndex(
+ scratchReg, resultReg, MacroAssembler::TimesEight,
+ CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register) +
+ OBJECT_OFFSETOF(JSValue, u.asBits.tag)),
+ resultTagReg);
+ m_jit.load32(
+ MacroAssembler::BaseIndex(
+ scratchReg, resultReg, MacroAssembler::TimesEight,
+ CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register) +
+ OBJECT_OFFSETOF(JSValue, u.asBits.payload)),
+ resultReg);
+ jsValueResult(resultTagReg, resultReg, node);
#else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), Imm32(imm2), result.gpr()));
+ m_jit.load64(
+ MacroAssembler::BaseIndex(
+ scratchReg, resultReg, MacroAssembler::TimesEight,
+ CallFrame::thisArgumentOffset() * sizeof(Register) - sizeof(Register)),
+ resultReg);
+ jsValueResult(resultReg, node);
#endif
- }
+}
- integerResult(result.gpr(), m_compileIndex);
- return;
- }
+void SpeculativeJIT::compileGetArgumentsLength(Node* node)
+{
+ SpeculateCellOperand base(this, node->child1());
+ GPRTemporary result(this, base);
+
+ GPRReg baseReg = base.gpr();
+ GPRReg resultReg = result.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ ASSERT(ArrayMode(Array::Arguments).alreadyChecked(m_jit.graph(), node, m_state.forNode(node->child1())));
+
+ speculationCheck(
+ Uncountable, JSValueSource(), 0,
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_overrodeLength))));
+
+ m_jit.load32(
+ MacroAssembler::Address(baseReg, OBJECT_OFFSETOF(Arguments, m_numArguments)),
+ resultReg);
+ integerResult(resultReg, node);
+}
+
+void SpeculativeJIT::compileGetArrayLength(Node* node)
+{
+ const TypedArrayDescriptor* descriptor = typedArrayDescriptor(node->arrayMode());
+
+ switch (node->arrayMode().type()) {
+ case Array::Int32:
+ case Array::Double:
+ case Array::Contiguous: {
+ StorageOperand storage(this, node->child2());
+ GPRTemporary result(this, storage);
+ GPRReg storageReg = storage.gpr();
+ GPRReg resultReg = result.gpr();
+ m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
- if (isNumberConstant(node.child1().index())) {
- int32_t imm1 = valueOfNumberConstantAsInt32(node.child1().index());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this);
-
- m_jit.move(Imm32(imm1), result.gpr());
- if (nodeCanTruncateInteger(node.arithNodeFlags()))
- m_jit.sub32(op2.gpr(), result.gpr());
- else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op2.gpr(), result.gpr()));
-
- integerResult(result.gpr(), m_compileIndex);
- return;
- }
+ integerResult(resultReg, node);
+ break;
+ }
+ case Array::ArrayStorage:
+ case Array::SlowPutArrayStorage: {
+ StorageOperand storage(this, node->child2());
+ GPRTemporary result(this, storage);
+ GPRReg storageReg = storage.gpr();
+ GPRReg resultReg = result.gpr();
+ m_jit.load32(MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()), resultReg);
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this);
-
- if (nodeCanTruncateInteger(node.arithNodeFlags())) {
- m_jit.move(op1.gpr(), result.gpr());
- m_jit.sub32(op2.gpr(), result.gpr());
- } else
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchSub32(MacroAssembler::Overflow, op1.gpr(), op2.gpr(), result.gpr()));
-
- integerResult(result.gpr(), m_compileIndex);
- return;
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::LessThan, resultReg, MacroAssembler::TrustedImm32(0)));
+
+ integerResult(resultReg, node);
+ break;
}
-
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
- FPRTemporary result(this, op1);
+ case Array::String: {
+ SpeculateCellOperand base(this, node->child1());
+ GPRTemporary result(this, base);
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+ m_jit.load32(MacroAssembler::Address(baseGPR, JSString::offsetOfLength()), resultGPR);
+ integerResult(resultGPR, node);
+ break;
+ }
+ case Array::Arguments: {
+ compileGetArgumentsLength(node);
+ break;
+ }
+ default:
+ SpeculateCellOperand base(this, node->child1());
+ GPRTemporary result(this, base);
+ GPRReg baseGPR = base.gpr();
+ GPRReg resultGPR = result.gpr();
+ ASSERT(descriptor);
+ m_jit.load32(MacroAssembler::Address(baseGPR, descriptor->m_lengthOffset), resultGPR);
+ integerResult(resultGPR, node);
+ break;
+ }
+}
- FPRReg reg1 = op1.fpr();
- FPRReg reg2 = op2.fpr();
- m_jit.subDouble(reg1, reg2, result.fpr());
+void SpeculativeJIT::compileNewFunctionNoCheck(Node* node)
+{
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ flushRegisters();
+ callOperation(
+ operationNewFunctionNoCheck, resultGPR, m_jit.codeBlock()->functionDecl(node->functionDeclIndex()));
+ cellResult(resultGPR, node);
+}
- doubleResult(result.fpr(), m_compileIndex);
+void SpeculativeJIT::compileNewFunctionExpression(Node* node)
+{
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+ flushRegisters();
+ callOperation(
+ operationNewFunctionExpression,
+ resultGPR,
+ m_jit.codeBlock()->functionExpr(node->functionExprIndex()));
+ cellResult(resultGPR, node);
}
-void SpeculativeJIT::compileArithNegate(Node& node)
+bool SpeculativeJIT::compileRegExpExec(Node* node)
{
- if (m_jit.graph().negateShouldSpeculateInteger(node)) {
- SpeculateIntegerOperand op1(this, node.child1());
- GPRTemporary result(this);
+ unsigned branchIndexInBlock = detectPeepHoleBranch();
+ if (branchIndexInBlock == UINT_MAX)
+ return false;
+ Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+ ASSERT(node->adjustedRefCount() == 1);
- m_jit.move(op1.gpr(), result.gpr());
+ BlockIndex taken = branchNode->takenBlockIndex();
+ BlockIndex notTaken = branchNode->notTakenBlockIndex();
+
+ bool invert = false;
+ if (taken == nextBlock()) {
+ invert = true;
+ BlockIndex tmp = taken;
+ taken = notTaken;
+ notTaken = tmp;
+ }
- if (nodeCanTruncateInteger(node.arithNodeFlags()))
- m_jit.neg32(result.gpr());
- else {
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchNeg32(MacroAssembler::Overflow, result.gpr()));
- if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags()))
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(MacroAssembler::Zero, result.gpr()));
- }
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateCellOperand argument(this, node->child2());
+ GPRReg baseGPR = base.gpr();
+ GPRReg argumentGPR = argument.gpr();
+
+ flushRegisters();
+ GPRResult result(this);
+ callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
- integerResult(result.gpr(), m_compileIndex);
- return;
- }
-
- SpeculateDoubleOperand op1(this, node.child1());
- FPRTemporary result(this);
+ branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, result.gpr(), taken);
+ jump(notTaken);
- m_jit.negateDouble(op1.fpr(), result.fpr());
+ use(node->child1());
+ use(node->child2());
+ m_indexInBlock = branchIndexInBlock;
+ m_currentNode = branchNode;
- doubleResult(result.fpr(), m_compileIndex);
+ return true;
}
-void SpeculativeJIT::compileArithMul(Node& node)
+void SpeculativeJIT::compileAllocatePropertyStorage(Node* node)
{
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this);
-
- GPRReg reg1 = op1.gpr();
- GPRReg reg2 = op2.gpr();
-
- // What is unfortunate is that we cannot take advantage of nodeCanTruncateInteger()
- // here. A multiply on integers performed in the double domain and then truncated to
- // an integer will give a different result than a multiply performed in the integer
- // domain and then truncated, if the integer domain result would have resulted in
- // something bigger than what a 32-bit integer can hold. JavaScript mandates that
- // the semantics are always as if the multiply had been performed in the double
- // domain.
-
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchMul32(MacroAssembler::Overflow, reg1, reg2, result.gpr()));
-
- // Check for negative zero, if the users of this node care about such things.
- if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags())) {
- MacroAssembler::Jump resultNonZero = m_jit.branchTest32(MacroAssembler::NonZero, result.gpr());
- speculationCheck(NegativeZero, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, reg1, TrustedImm32(0)));
- speculationCheck(NegativeZero, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, reg2, TrustedImm32(0)));
- resultNonZero.link(&m_jit);
- }
+ if (hasIndexingHeader(node->structureTransitionData().previousStructure->indexingType())) {
+ SpeculateCellOperand base(this, node->child1());
+
+ GPRReg baseGPR = base.gpr();
+
+ flushRegisters();
- integerResult(result.gpr(), m_compileIndex);
+ GPRResult result(this);
+ callOperation(operationReallocateButterflyToHavePropertyStorageWithInitialCapacity, result.gpr(), baseGPR);
+
+ storageResult(result.gpr(), node);
return;
}
+
+ SpeculateCellOperand base(this, node->child1());
+ GPRTemporary scratch(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ ASSERT(!node->structureTransitionData().previousStructure->outOfLineCapacity());
+ ASSERT(initialOutOfLineCapacity == node->structureTransitionData().newStructure->outOfLineCapacity());
+
+ JITCompiler::Jump slowPath =
+ emitAllocateBasicStorage(
+ TrustedImm32(initialOutOfLineCapacity * sizeof(JSValue)), scratchGPR);
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
- FPRTemporary result(this, op1, op2);
-
- FPRReg reg1 = op1.fpr();
- FPRReg reg2 = op2.fpr();
+ m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(JSValue)), scratchGPR);
+
+ addSlowPathGenerator(
+ slowPathCall(slowPath, this, operationAllocatePropertyStorageWithInitialCapacity, scratchGPR));
- m_jit.mulDouble(reg1, reg2, result.fpr());
+ m_jit.storePtr(scratchGPR, JITCompiler::Address(baseGPR, JSObject::butterflyOffset()));
- doubleResult(result.fpr(), m_compileIndex);
+ storageResult(scratchGPR, node);
}
-#if CPU(X86) || CPU(X86_64)
-void SpeculativeJIT::compileIntegerArithDivForX86(Node& node)
+void SpeculativeJIT::compileReallocatePropertyStorage(Node* node)
{
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary eax(this, X86Registers::eax);
- GPRTemporary edx(this, X86Registers::edx);
- GPRReg op1GPR = op1.gpr();
- GPRReg op2GPR = op2.gpr();
-
- GPRReg op2TempGPR;
- GPRReg temp;
- if (op2GPR == X86Registers::eax || op2GPR == X86Registers::edx) {
- op2TempGPR = allocate();
- temp = op2TempGPR;
- } else {
- op2TempGPR = InvalidGPRReg;
- if (op1GPR == X86Registers::eax)
- temp = X86Registers::edx;
- else
- temp = X86Registers::eax;
- }
-
- ASSERT(temp != op1GPR);
- ASSERT(temp != op2GPR);
-
- m_jit.add32(JITCompiler::TrustedImm32(1), op2GPR, temp);
-
- JITCompiler::Jump safeDenominator = m_jit.branch32(JITCompiler::Above, temp, JITCompiler::TrustedImm32(1));
-
- JITCompiler::Jump done;
- if (nodeUsedAsNumber(node.arithNodeFlags())) {
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::Zero, op2GPR));
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1)));
- } else {
- JITCompiler::Jump zero = m_jit.branchTest32(JITCompiler::Zero, op2GPR);
- JITCompiler::Jump notNeg2ToThe31 = m_jit.branch32(JITCompiler::Equal, op1GPR, TrustedImm32(-2147483647-1));
- zero.link(&m_jit);
- m_jit.move(TrustedImm32(0), eax.gpr());
- done = m_jit.jump();
- notNeg2ToThe31.link(&m_jit);
- }
-
- safeDenominator.link(&m_jit);
-
- // If the user cares about negative zero, then speculate that we're not about
- // to produce negative zero.
- if (!nodeCanIgnoreNegativeZero(node.arithNodeFlags())) {
- MacroAssembler::Jump numeratorNonZero = m_jit.branchTest32(MacroAssembler::NonZero, op1GPR);
- speculationCheck(NegativeZero, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, op2GPR, TrustedImm32(0)));
- numeratorNonZero.link(&m_jit);
- }
-
- if (op2TempGPR != InvalidGPRReg) {
- m_jit.move(op2GPR, op2TempGPR);
- op2GPR = op2TempGPR;
- }
-
- m_jit.move(op1GPR, eax.gpr());
- m_jit.assembler().cdq();
- m_jit.assembler().idivl_r(op2GPR);
-
- if (op2TempGPR != InvalidGPRReg)
- unlock(op2TempGPR);
+ size_t oldSize = node->structureTransitionData().previousStructure->outOfLineCapacity() * sizeof(JSValue);
+ size_t newSize = oldSize * outOfLineGrowthFactor;
+ ASSERT(newSize == node->structureTransitionData().newStructure->outOfLineCapacity() * sizeof(JSValue));
- // Check that there was no remainder. If there had been, then we'd be obligated to
- // produce a double result instead.
- if (nodeUsedAsNumber(node.arithNodeFlags()))
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branchTest32(JITCompiler::NonZero, edx.gpr()));
- else
- done.link(&m_jit);
-
- integerResult(eax.gpr(), m_compileIndex);
-}
-#endif // CPU(X86) || CPU(X86_64)
+ if (hasIndexingHeader(node->structureTransitionData().previousStructure->indexingType())) {
+ SpeculateCellOperand base(this, node->child1());
+
+ GPRReg baseGPR = base.gpr();
+
+ flushRegisters();
-void SpeculativeJIT::compileArithMod(Node& node)
-{
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2()))
- && node.canSpeculateInteger()) {
- compileSoftModulo(node);
+ GPRResult result(this);
+ callOperation(operationReallocateButterflyToGrowPropertyStorage, result.gpr(), baseGPR, newSize / sizeof(JSValue));
+
+ storageResult(result.gpr(), node);
return;
}
+
+ SpeculateCellOperand base(this, node->child1());
+ StorageOperand oldStorage(this, node->child2());
+ GPRTemporary scratch1(this);
+ GPRTemporary scratch2(this);
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
-
- FPRReg op1FPR = op1.fpr();
- FPRReg op2FPR = op2.fpr();
-
- flushRegisters();
+ GPRReg baseGPR = base.gpr();
+ GPRReg oldStorageGPR = oldStorage.gpr();
+ GPRReg scratchGPR1 = scratch1.gpr();
+ GPRReg scratchGPR2 = scratch2.gpr();
- FPRResult result(this);
+ JITCompiler::Jump slowPath =
+ emitAllocateBasicStorage(TrustedImm32(newSize), scratchGPR2);
- callOperation(fmodAsDFGOperation, result.fpr(), op1FPR, op2FPR);
+ m_jit.addPtr(JITCompiler::TrustedImm32(sizeof(JSValue)), scratchGPR2);
- doubleResult(result.fpr(), m_compileIndex);
+ addSlowPathGenerator(
+ slowPathCall(slowPath, this, operationAllocatePropertyStorage, scratchGPR2, newSize / sizeof(JSValue)));
+ // We have scratchGPR2 = new storage, scratchGPR1 = scratch
+ for (ptrdiff_t offset = 0; offset < static_cast<ptrdiff_t>(oldSize); offset += sizeof(void*)) {
+ m_jit.loadPtr(JITCompiler::Address(oldStorageGPR, -(offset + sizeof(JSValue) + sizeof(void*))), scratchGPR1);
+ m_jit.storePtr(scratchGPR1, JITCompiler::Address(scratchGPR2, -(offset + sizeof(JSValue) + sizeof(void*))));
+ }
+ m_jit.storePtr(scratchGPR2, JITCompiler::Address(baseGPR, JSObject::butterflyOffset()));
+
+ storageResult(scratchGPR2, node);
}
-// Returns true if the compare is fused with a subsequent branch.
-bool SpeculativeJIT::compare(Node& node, MacroAssembler::RelationalCondition condition, MacroAssembler::DoubleCondition doubleCondition, S_DFGOperation_EJJ operation)
+GPRReg SpeculativeJIT::temporaryRegisterForPutByVal(GPRTemporary& temporary, ArrayMode arrayMode)
{
- if (compilePeepHoleBranch(node, condition, doubleCondition, operation))
- return true;
+ if (!putByValWillNeedExtraRegister(arrayMode))
+ return InvalidGPRReg;
+
+ GPRTemporary realTemporary(this);
+ temporary.adopt(realTemporary);
+ return temporary.gpr();
+}
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2()))) {
- compileIntegerCompare(node, condition);
- return false;
- }
+void SpeculativeJIT::compileToStringOnCell(Node* node)
+{
+ SpeculateCellOperand op1(this, node->child1());
+ GPRReg op1GPR = op1.gpr();
- if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
- compileDoubleCompare(node, doubleCondition);
- return false;
+ switch (node->child1().useKind()) {
+ case StringObjectUse: {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
+
+ speculateStringObject(node->child1(), op1GPR);
+ m_state.forNode(node->child1()).filter(SpecStringObject);
+ m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR);
+ cellResult(resultGPR, node);
+ break;
}
-
- if (node.op() == CompareEq) {
- if (Node::shouldSpeculateFinalObject(at(node.child1()), at(node.child2()))) {
- compileObjectEquality(node, &JSFinalObject::s_info, isFinalObjectPrediction);
- return false;
- }
- if (Node::shouldSpeculateArray(at(node.child1()), at(node.child2()))) {
- compileObjectEquality(node, &JSArray::s_info, isArrayPrediction);
- return false;
- }
+ case StringOrStringObjectUse: {
+ GPRTemporary result(this);
+ GPRReg resultGPR = result.gpr();
- if (at(node.child1()).shouldSpeculateFinalObject()
- && at(node.child2()).shouldSpeculateFinalObjectOrOther()) {
- compileObjectToObjectOrOtherEquality(
- node.child1(), node.child2(), &JSFinalObject::s_info,
- isFinalObjectPrediction);
- return false;
- }
+ m_jit.loadPtr(JITCompiler::Address(op1GPR, JSCell::structureOffset()), resultGPR);
+ JITCompiler::Jump isString = m_jit.branchPtr(
+ JITCompiler::Equal, resultGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()));
- if (at(node.child1()).shouldSpeculateFinalObjectOrOther()
- && at(node.child2()).shouldSpeculateFinalObject()) {
- compileObjectToObjectOrOtherEquality(
- node.child2(), node.child1(), &JSFinalObject::s_info,
- isFinalObjectPrediction);
- return false;
- }
+ speculateStringObjectForStructure(node->child1(), resultGPR);
- if (at(node.child1()).shouldSpeculateArray()
- && at(node.child2()).shouldSpeculateArrayOrOther()) {
- compileObjectToObjectOrOtherEquality(
- node.child1(), node.child2(), &JSArray::s_info,
- isArrayPrediction);
- return false;
- }
+ m_jit.loadPtr(JITCompiler::Address(op1GPR, JSWrapperObject::internalValueCellOffset()), resultGPR);
- if (at(node.child1()).shouldSpeculateArrayOrOther()
- && at(node.child2()).shouldSpeculateArray()) {
- compileObjectToObjectOrOtherEquality(
- node.child2(), node.child1(), &JSArray::s_info,
- isArrayPrediction);
- return false;
+ JITCompiler::Jump done = m_jit.jump();
+ isString.link(&m_jit);
+ m_jit.move(op1GPR, resultGPR);
+ done.link(&m_jit);
+
+ m_state.forNode(node->child1()).filter(SpecString | SpecStringObject);
+
+ cellResult(resultGPR, node);
+ break;
+ }
+
+ case CellUse: {
+ GPRResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ // We flush registers instead of silent spill/fill because in this mode we
+ // believe that most likely the input is not a string, and we need to take
+ // slow path.
+ flushRegisters();
+ JITCompiler::Jump done;
+ if (node->child1()->prediction() & SpecString) {
+ JITCompiler::Jump needCall = m_jit.branchPtr(
+ JITCompiler::NotEqual,
+ JITCompiler::Address(op1GPR, JSCell::structureOffset()),
+ TrustedImmPtr(m_jit.vm()->stringStructure.get()));
+ m_jit.move(op1GPR, resultGPR);
+ done = m_jit.jump();
+ needCall.link(&m_jit);
}
+ callOperation(operationToStringOnCell, resultGPR, op1GPR);
+ if (done.isSet())
+ done.link(&m_jit);
+ cellResult(resultGPR, node);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
}
-
- nonSpeculativeNonPeepholeCompare(node, condition, operation);
- return false;
}
-bool SpeculativeJIT::compileStrictEqForConstant(Node& node, Edge value, JSValue constant)
+void SpeculativeJIT::compileNewStringObject(Node* node)
{
- JSValueOperand op1(this, value);
+ SpeculateCellOperand operand(this, node->child1());
- unsigned branchIndexInBlock = detectPeepHoleBranch();
- if (branchIndexInBlock != UINT_MAX) {
- NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
- Node& branchNode = at(branchNodeIndex);
- BlockIndex taken = branchNode.takenBlockIndex();
- BlockIndex notTaken = branchNode.notTakenBlockIndex();
- MacroAssembler::RelationalCondition condition = MacroAssembler::Equal;
-
- // The branch instruction will branch to the taken block.
- // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == (m_block + 1)) {
- condition = MacroAssembler::NotEqual;
- BlockIndex tmp = taken;
- taken = notTaken;
- notTaken = tmp;
- }
+ GPRTemporary result(this);
+ GPRTemporary scratch1(this);
+ GPRTemporary scratch2(this);
-#if USE(JSVALUE64)
- branchPtr(condition, op1.gpr(), MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(constant))), taken);
-#else
- GPRReg payloadGPR = op1.payloadGPR();
- GPRReg tagGPR = op1.tagGPR();
- if (condition == MacroAssembler::Equal) {
- // Drop down if not equal, go elsewhere if equal.
- MacroAssembler::Jump notEqual = m_jit.branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()));
- branch32(MacroAssembler::Equal, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
- notEqual.link(&m_jit);
- } else {
- // Drop down if equal, go elsehwere if not equal.
- branch32(MacroAssembler::NotEqual, tagGPR, MacroAssembler::Imm32(constant.tag()), taken);
- branch32(MacroAssembler::NotEqual, payloadGPR, MacroAssembler::Imm32(constant.payload()), taken);
- }
-#endif
-
- jump(notTaken);
-
- use(node.child1());
- use(node.child2());
- m_indexInBlock = branchIndexInBlock;
- m_compileIndex = branchNodeIndex;
- return true;
- }
+ GPRReg operandGPR = operand.gpr();
+ GPRReg resultGPR = result.gpr();
+ GPRReg scratch1GPR = scratch1.gpr();
+ GPRReg scratch2GPR = scratch2.gpr();
- GPRTemporary result(this);
+ JITCompiler::JumpList slowPath;
+
+ emitAllocateJSObject<StringObject>(
+ resultGPR, TrustedImmPtr(node->structure()), TrustedImmPtr(0), scratch1GPR, scratch2GPR,
+ slowPath);
+ m_jit.storePtr(
+ TrustedImmPtr(&StringObject::s_info),
+ JITCompiler::Address(resultGPR, JSDestructibleObject::classInfoOffset()));
#if USE(JSVALUE64)
- GPRReg op1GPR = op1.gpr();
- GPRReg resultGPR = result.gpr();
- m_jit.move(MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(ValueFalse)), resultGPR);
- MacroAssembler::Jump notEqual = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, MacroAssembler::TrustedImmPtr(bitwise_cast<void*>(JSValue::encode(constant))));
- m_jit.or32(MacroAssembler::TrustedImm32(1), resultGPR);
- notEqual.link(&m_jit);
- jsValueResult(resultGPR, m_compileIndex, DataFormatJSBoolean);
+ m_jit.store64(
+ operandGPR, JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset()));
#else
- GPRReg op1PayloadGPR = op1.payloadGPR();
- GPRReg op1TagGPR = op1.tagGPR();
- GPRReg resultGPR = result.gpr();
- m_jit.move(TrustedImm32(0), resultGPR);
- MacroAssembler::JumpList notEqual;
- notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, MacroAssembler::Imm32(constant.tag())));
- notEqual.append(m_jit.branch32(MacroAssembler::NotEqual, op1PayloadGPR, MacroAssembler::Imm32(constant.payload())));
- m_jit.move(TrustedImm32(1), resultGPR);
- notEqual.link(&m_jit);
- booleanResult(resultGPR, m_compileIndex);
+ m_jit.store32(
+ TrustedImm32(JSValue::CellTag),
+ JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(
+ operandGPR,
+ JITCompiler::Address(resultGPR, JSWrapperObject::internalValueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
#endif
- return false;
+ addSlowPathGenerator(slowPathCall(
+ slowPath, this, operationNewStringObject, resultGPR, operandGPR, node->structure()));
+
+ cellResult(resultGPR, node);
}
-bool SpeculativeJIT::compileStrictEq(Node& node)
+void SpeculativeJIT::speculateInt32(Edge edge)
{
- // 1) If either operand is a constant and that constant is not a double, integer,
- // or string, then do a JSValue comparison.
-
- if (isJSConstant(node.child1().index())) {
- JSValue value = valueOfJSConstant(node.child1().index());
- if (!value.isNumber() && !value.isString())
- return compileStrictEqForConstant(node, node.child2(), value);
- }
-
- if (isJSConstant(node.child2().index())) {
- JSValue value = valueOfJSConstant(node.child2().index());
- if (!value.isNumber() && !value.isString())
- return compileStrictEqForConstant(node, node.child1(), value);
- }
-
- // 2) If the operands are predicted integer, do an integer comparison.
-
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2()))) {
- unsigned branchIndexInBlock = detectPeepHoleBranch();
- if (branchIndexInBlock != UINT_MAX) {
- NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
- compilePeepHoleIntegerBranch(node, branchNodeIndex, MacroAssembler::Equal);
- use(node.child1());
- use(node.child2());
- m_indexInBlock = branchIndexInBlock;
- m_compileIndex = branchNodeIndex;
- return true;
- }
- compileIntegerCompare(node, MacroAssembler::Equal);
- return false;
- }
-
- // 3) If the operands are predicted double, do a double comparison.
-
- if (Node::shouldSpeculateNumber(at(node.child1()), at(node.child2()))) {
- unsigned branchIndexInBlock = detectPeepHoleBranch();
- if (branchIndexInBlock != UINT_MAX) {
- NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
- compilePeepHoleDoubleBranch(node, branchNodeIndex, MacroAssembler::DoubleEqual);
- use(node.child1());
- use(node.child2());
- m_indexInBlock = branchIndexInBlock;
- m_compileIndex = branchNodeIndex;
- return true;
- }
- compileDoubleCompare(node, MacroAssembler::DoubleEqual);
- return false;
- }
-
- // 4) If the operands are predicted final object or array, then do a final object
- // or array comparison.
+ if (!needsTypeCheck(edge, SpecInt32))
+ return;
- if (Node::shouldSpeculateFinalObject(at(node.child1()), at(node.child2()))) {
- unsigned branchIndexInBlock = detectPeepHoleBranch();
- if (branchIndexInBlock != UINT_MAX) {
- NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
- compilePeepHoleObjectEquality(node, branchNodeIndex, &JSFinalObject::s_info, isFinalObjectPrediction);
- use(node.child1());
- use(node.child2());
- m_indexInBlock = branchIndexInBlock;
- m_compileIndex = branchNodeIndex;
- return true;
- }
- compileObjectEquality(node, &JSFinalObject::s_info, isFinalObjectPrediction);
- return false;
- }
+ (SpeculateIntegerOperand(this, edge)).gpr();
+}
+
+void SpeculativeJIT::speculateNumber(Edge edge)
+{
+ if (!needsTypeCheck(edge, SpecNumber))
+ return;
- if (Node::shouldSpeculateArray(at(node.child1()), at(node.child2()))) {
- unsigned branchIndexInBlock = detectPeepHoleBranch();
- if (branchIndexInBlock != UINT_MAX) {
- NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
- compilePeepHoleObjectEquality(node, branchNodeIndex, &JSArray::s_info, isArrayPrediction);
- use(node.child1());
- use(node.child2());
- m_indexInBlock = branchIndexInBlock;
- m_compileIndex = branchNodeIndex;
- return true;
- }
- compileObjectEquality(node, &JSArray::s_info, isArrayPrediction);
- return false;
- }
+ (SpeculateDoubleOperand(this, edge)).fpr();
+}
+
+void SpeculativeJIT::speculateRealNumber(Edge edge)
+{
+ if (!needsTypeCheck(edge, SpecRealNumber))
+ return;
- // 5) Fall back to non-speculative strict equality.
+ SpeculateDoubleOperand operand(this, edge);
+ FPRReg fpr = operand.fpr();
+ DFG_TYPE_CHECK(
+ JSValueRegs(), edge, SpecRealNumber,
+ m_jit.branchDouble(
+ MacroAssembler::DoubleNotEqualOrUnordered, fpr, fpr));
+}
+
+void SpeculativeJIT::speculateBoolean(Edge edge)
+{
+ if (!needsTypeCheck(edge, SpecBoolean))
+ return;
- return nonSpeculativeStrictEq(node);
+ (SpeculateBooleanOperand(this, edge)).gpr();
}
-void SpeculativeJIT::compileGetIndexedPropertyStorage(Node& node)
+void SpeculativeJIT::speculateCell(Edge edge)
{
- if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) {
- terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
+ if (!needsTypeCheck(edge, SpecCell))
return;
- }
-
- SpeculateCellOperand base(this, node.child1());
- GPRReg baseReg = base.gpr();
- PredictedType basePrediction = at(node.child2()).prediction();
- if (!(basePrediction & PredictInt32) && basePrediction) {
- ASSERT_NOT_REACHED();
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
- noResult(m_compileIndex);
+ (SpeculateCellOperand(this, edge)).gpr();
+}
+
+void SpeculativeJIT::speculateObject(Edge edge)
+{
+ if (!needsTypeCheck(edge, SpecObject))
return;
- }
- GPRTemporary storage(this);
- GPRReg storageReg = storage.gpr();
- if (at(node.child1()).prediction() == PredictString) {
- if (!isStringPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info)));
+ SpeculateCellOperand operand(this, edge);
+ GPRReg gpr = operand.gpr();
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(gpr), edge, SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(gpr, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+}
- m_jit.loadPtr(MacroAssembler::Address(baseReg, JSString::offsetOfValue()), storageReg);
+void SpeculativeJIT::speculateObjectOrOther(Edge edge)
+{
+ if (!needsTypeCheck(edge, SpecObject | SpecOther))
+ return;
+
+ JSValueOperand operand(this, edge, ManualOperandSpeculation);
+ GPRTemporary temp(this);
+ GPRReg tempGPR = temp.gpr();
+#if USE(JSVALUE64)
+ GPRReg gpr = operand.gpr();
+ MacroAssembler::Jump notCell = m_jit.branchTest64(
+ MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister);
+ DFG_TYPE_CHECK(
+ JSValueRegs(gpr), edge, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(gpr, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ MacroAssembler::Jump done = m_jit.jump();
+ notCell.link(&m_jit);
+ if (needsTypeCheck(edge, SpecCell | SpecOther)) {
+ m_jit.move(gpr, tempGPR);
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), tempGPR);
- // Speculate that we're not accessing a rope
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTest32(MacroAssembler::Zero, storageReg));
-
- m_jit.loadPtr(MacroAssembler::Address(storageReg, StringImpl::dataOffset()), storageReg);
- } else if (at(node.child1()).shouldSpeculateInt8Array()) {
- const TypedArrayDescriptor& descriptor = m_jit.globalData()->int8ArrayDescriptor();
- if (!isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
- } else if (at(node.child1()).shouldSpeculateInt16Array()) {
- const TypedArrayDescriptor& descriptor = m_jit.globalData()->int16ArrayDescriptor();
- if (!isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
- } else if (at(node.child1()).shouldSpeculateInt32Array()) {
- const TypedArrayDescriptor& descriptor = m_jit.globalData()->int32ArrayDescriptor();
- if (!isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
- } else if (at(node.child1()).shouldSpeculateUint8Array()) {
- const TypedArrayDescriptor& descriptor = m_jit.globalData()->uint8ArrayDescriptor();
- if (!isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
- } else if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
- const TypedArrayDescriptor& descriptor = m_jit.globalData()->uint8ClampedArrayDescriptor();
- if (!isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
- } else if (at(node.child1()).shouldSpeculateUint16Array()) {
- const TypedArrayDescriptor& descriptor = m_jit.globalData()->uint16ArrayDescriptor();
- if (!isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
- } else if (at(node.child1()).shouldSpeculateUint32Array()) {
- const TypedArrayDescriptor& descriptor = m_jit.globalData()->uint32ArrayDescriptor();
- if (!isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
- } else if (at(node.child1()).shouldSpeculateFloat32Array()) {
- const TypedArrayDescriptor& descriptor = m_jit.globalData()->float32ArrayDescriptor();
- if (!isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
- } else if (at(node.child1()).shouldSpeculateFloat64Array()) {
- const TypedArrayDescriptor& descriptor = m_jit.globalData()->float64ArrayDescriptor();
- if (!isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, descriptor.m_storageOffset), storageReg);
- } else {
- if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
- m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
+ typeCheck(
+ JSValueRegs(gpr), edge, SpecCell | SpecOther,
+ m_jit.branch64(
+ MacroAssembler::NotEqual, tempGPR,
+ MacroAssembler::TrustedImm64(ValueNull)));
+ }
+ done.link(&m_jit);
+#else
+ GPRReg tagGPR = operand.tagGPR();
+ GPRReg payloadGPR = operand.payloadGPR();
+ MacroAssembler::Jump notCell =
+ m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag));
+ DFG_TYPE_CHECK(
+ JSValueRegs(tagGPR, payloadGPR), edge, (~SpecCell) | SpecObject, m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(payloadGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
+ MacroAssembler::Jump done = m_jit.jump();
+ notCell.link(&m_jit);
+ if (needsTypeCheck(edge, SpecCell | SpecOther)) {
+ m_jit.move(tagGPR, tempGPR);
+ m_jit.or32(TrustedImm32(1), tempGPR);
+
+ typeCheck(
+ JSValueRegs(tagGPR, payloadGPR), edge, SpecCell | SpecOther,
+ m_jit.branch32(
+ MacroAssembler::NotEqual, tempGPR,
+ MacroAssembler::TrustedImm32(JSValue::NullTag)));
}
- storageResult(storageReg, m_compileIndex);
+ done.link(&m_jit);
+#endif
}
-void SpeculativeJIT::compileNewFunctionNoCheck(Node& node)
+void SpeculativeJIT::speculateString(Edge edge)
{
- GPRResult result(this);
- GPRReg resultGPR = result.gpr();
- flushRegisters();
- callOperation(
- operationNewFunction, resultGPR, m_jit.codeBlock()->functionDecl(node.functionDeclIndex()));
- cellResult(resultGPR, m_compileIndex);
+ if (!needsTypeCheck(edge, SpecString))
+ return;
+
+ SpeculateCellOperand operand(this, edge);
+ GPRReg gpr = operand.gpr();
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(gpr), edge, SpecString, m_jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(gpr, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())));
}
-void SpeculativeJIT::compileNewFunctionExpression(Node& node)
+void SpeculativeJIT::speculateStringObject(Edge edge, GPRReg gpr)
{
- GPRResult result(this);
- GPRReg resultGPR = result.gpr();
- flushRegisters();
- callOperation(
- operationNewFunctionExpression,
- resultGPR,
- m_jit.codeBlock()->functionExpr(node.functionExprIndex()));
- cellResult(resultGPR, m_compileIndex);
+ speculateStringObjectForStructure(edge, JITCompiler::Address(gpr, JSCell::structureOffset()));
}
-bool SpeculativeJIT::compileRegExpExec(Node& node)
+void SpeculativeJIT::speculateStringObject(Edge edge)
{
- unsigned branchIndexInBlock = detectPeepHoleBranch();
- if (branchIndexInBlock == UINT_MAX)
- return false;
- NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
- ASSERT(node.adjustedRefCount() == 1);
-
- Node& branchNode = at(branchNodeIndex);
- BlockIndex taken = branchNode.takenBlockIndex();
- BlockIndex notTaken = branchNode.notTakenBlockIndex();
+ if (!needsTypeCheck(edge, SpecStringObject))
+ return;
- bool invert = false;
- if (taken == (m_block + 1)) {
- invert = true;
- BlockIndex tmp = taken;
- taken = notTaken;
- notTaken = tmp;
- }
+ SpeculateCellOperand operand(this, edge);
+ GPRReg gpr = operand.gpr();
+ if (!needsTypeCheck(edge, SpecStringObject))
+ return;
+
+ speculateStringObject(edge, gpr);
+ m_state.forNode(edge).filter(SpecStringObject);
+}
- SpeculateCellOperand base(this, node.child1());
- SpeculateCellOperand argument(this, node.child2());
- GPRReg baseGPR = base.gpr();
- GPRReg argumentGPR = argument.gpr();
+void SpeculativeJIT::speculateStringOrStringObject(Edge edge)
+{
+ if (!needsTypeCheck(edge, SpecString | SpecStringObject))
+ return;
- flushRegisters();
- GPRResult result(this);
- callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
+ SpeculateCellOperand operand(this, edge);
+ GPRReg gpr = operand.gpr();
+ if (!needsTypeCheck(edge, SpecString | SpecStringObject))
+ return;
+
+ GPRTemporary structure(this);
+ GPRReg structureGPR = structure.gpr();
+
+ m_jit.loadPtr(JITCompiler::Address(gpr, JSCell::structureOffset()), structureGPR);
+
+ JITCompiler::Jump isString = m_jit.branchPtr(
+ JITCompiler::Equal, structureGPR, TrustedImmPtr(m_jit.vm()->stringStructure.get()));
+
+ speculateStringObjectForStructure(edge, structureGPR);
+
+ isString.link(&m_jit);
+
+ m_state.forNode(edge).filter(SpecString | SpecStringObject);
+}
- branchTest32(invert ? JITCompiler::Zero : JITCompiler::NonZero, result.gpr(), taken);
- jump(notTaken);
+void SpeculativeJIT::speculateNotCell(Edge edge)
+{
+ if (!needsTypeCheck(edge, ~SpecCell))
+ return;
+
+ JSValueOperand operand(this, edge, ManualOperandSpeculation);
+#if USE(JSVALUE64)
+ typeCheck(
+ JSValueRegs(operand.gpr()), edge, ~SpecCell,
+ m_jit.branchTest64(
+ JITCompiler::Zero, operand.gpr(), GPRInfo::tagMaskRegister));
+#else
+ typeCheck(
+ JSValueRegs(operand.tagGPR(), operand.payloadGPR()), edge, ~SpecCell,
+ m_jit.branch32(
+ JITCompiler::Equal, operand.tagGPR(), TrustedImm32(JSValue::CellTag)));
+#endif
+}
- use(node.child1());
- use(node.child2());
- m_indexInBlock = branchIndexInBlock;
- m_compileIndex = branchNodeIndex;
+void SpeculativeJIT::speculateOther(Edge edge)
+{
+ if (!needsTypeCheck(edge, SpecOther))
+ return;
+
+ JSValueOperand operand(this, edge, ManualOperandSpeculation);
+ GPRTemporary temp(this);
+ GPRReg tempGPR = temp.gpr();
+#if USE(JSVALUE64)
+ m_jit.move(operand.gpr(), tempGPR);
+ m_jit.and64(MacroAssembler::TrustedImm32(~TagBitUndefined), tempGPR);
+ typeCheck(
+ JSValueRegs(operand.gpr()), edge, SpecOther,
+ m_jit.branch64(
+ MacroAssembler::NotEqual, tempGPR,
+ MacroAssembler::TrustedImm64(ValueNull)));
+#else
+ m_jit.move(operand.tagGPR(), tempGPR);
+ m_jit.or32(TrustedImm32(1), tempGPR);
+ typeCheck(
+ JSValueRegs(operand.tagGPR(), operand.payloadGPR()), edge, SpecOther,
+ m_jit.branch32(MacroAssembler::NotEqual, tempGPR, TrustedImm32(JSValue::NullTag)));
+#endif
+}
- return true;
+void SpeculativeJIT::speculate(Node*, Edge edge)
+{
+ switch (edge.useKind()) {
+ case UntypedUse:
+ break;
+ case KnownInt32Use:
+ ASSERT(!needsTypeCheck(edge, SpecInt32));
+ break;
+ case KnownNumberUse:
+ ASSERT(!needsTypeCheck(edge, SpecNumber));
+ break;
+ case KnownCellUse:
+ ASSERT(!needsTypeCheck(edge, SpecCell));
+ break;
+ case KnownStringUse:
+ ASSERT(!needsTypeCheck(edge, SpecString));
+ break;
+ case Int32Use:
+ speculateInt32(edge);
+ break;
+ case RealNumberUse:
+ speculateRealNumber(edge);
+ break;
+ case NumberUse:
+ speculateNumber(edge);
+ break;
+ case BooleanUse:
+ speculateBoolean(edge);
+ break;
+ case CellUse:
+ speculateCell(edge);
+ break;
+ case ObjectUse:
+ speculateObject(edge);
+ break;
+ case ObjectOrOtherUse:
+ speculateObjectOrOther(edge);
+ break;
+ case StringUse:
+ speculateString(edge);
+ break;
+ case StringObjectUse:
+ speculateStringObject(edge);
+ break;
+ case StringOrStringObjectUse:
+ speculateStringOrStringObject(edge);
+ break;
+ case NotCellUse:
+ speculateNotCell(edge);
+ break;
+ case OtherUse:
+ speculateOther(edge);
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
}
} } // namespace JSC::DFG