/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#if ENABLE(DFG_JIT)
-#include "DFGAbstractState.h"
+#include "DFGAbstractInterpreter.h"
#include "DFGGenerationInfo.h"
+#include "DFGInPlaceAbstractState.h"
#include "DFGJITCompiler.h"
#include "DFGOSRExit.h"
-#include "DFGOperations.h"
+#include "DFGOSRExitJumpPlaceholder.h"
+#include "DFGSilentRegisterSavePlan.h"
+#include "DFGValueSource.h"
+#include "JITOperations.h"
#include "MarkedAllocator.h"
+#include "PutKind.h"
#include "ValueRecovery.h"
+#include "VirtualRegister.h"
namespace JSC { namespace DFG {
+class GPRTemporary;
class JSValueOperand;
+class SlowPathGenerator;
class SpeculativeJIT;
-class SpeculateIntegerOperand;
+class SpeculateInt32Operand;
class SpeculateStrictInt32Operand;
class SpeculateDoubleOperand;
class SpeculateCellOperand;
class SpeculateBooleanOperand;
+enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandJSValue};
-enum ValueSourceKind {
- SourceNotSet,
- ValueInRegisterFile,
- Int32InRegisterFile,
- CellInRegisterFile,
- BooleanInRegisterFile,
- DoubleInRegisterFile,
- SourceIsDead,
- HaveNode
-};
-
-class ValueSource {
-public:
- ValueSource()
- : m_nodeIndex(nodeIndexFromKind(SourceNotSet))
- {
- }
-
- explicit ValueSource(ValueSourceKind valueSourceKind)
- : m_nodeIndex(nodeIndexFromKind(valueSourceKind))
- {
- ASSERT(kind() != SourceNotSet);
- ASSERT(kind() != HaveNode);
- }
-
- explicit ValueSource(NodeIndex nodeIndex)
- : m_nodeIndex(nodeIndex)
- {
- ASSERT(kind() == HaveNode);
- }
-
- static ValueSource forPrediction(PredictedType prediction)
- {
- if (isInt32Prediction(prediction))
- return ValueSource(Int32InRegisterFile);
- if (isArrayPrediction(prediction))
- return ValueSource(CellInRegisterFile);
- if (isBooleanPrediction(prediction))
- return ValueSource(BooleanInRegisterFile);
- return ValueSource(ValueInRegisterFile);
- }
-
- bool isSet() const
- {
- return kindFromNodeIndex(m_nodeIndex) != SourceNotSet;
- }
-
- ValueSourceKind kind() const
- {
- return kindFromNodeIndex(m_nodeIndex);
- }
-
- NodeIndex nodeIndex() const
- {
- ASSERT(kind() == HaveNode);
- return m_nodeIndex;
- }
-
- void dump(FILE* out) const;
-
-private:
- static NodeIndex nodeIndexFromKind(ValueSourceKind kind)
- {
- ASSERT(kind >= SourceNotSet && kind < HaveNode);
- return NoNode - kind;
- }
-
- static ValueSourceKind kindFromNodeIndex(NodeIndex nodeIndex)
- {
- unsigned kind = static_cast<unsigned>(NoNode - nodeIndex);
- if (kind >= static_cast<unsigned>(HaveNode))
- return HaveNode;
- return static_cast<ValueSourceKind>(kind);
- }
-
- NodeIndex m_nodeIndex;
-};
-
-
-enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandDouble, GeneratedOperandJSValue};
+inline GPRReg extractResult(GPRReg result) { return result; }
+#if USE(JSVALUE64)
+inline GPRReg extractResult(JSValueRegs result) { return result.gpr(); }
+#else
+inline JSValueRegs extractResult(JSValueRegs result) { return result; }
+#endif
+inline NoResultTag extractResult(NoResultTag) { return NoResult; }
// === SpeculativeJIT ===
//
// to propagate type information (including information that has
// only speculatively been asserted) through the dataflow.
class SpeculativeJIT {
+ WTF_MAKE_FAST_ALLOCATED;
+
friend struct OSRExit;
private:
typedef JITCompiler::TrustedImm32 TrustedImm32;
typedef JITCompiler::Imm32 Imm32;
typedef JITCompiler::TrustedImmPtr TrustedImmPtr;
typedef JITCompiler::ImmPtr ImmPtr;
+ typedef JITCompiler::TrustedImm64 TrustedImm64;
+ typedef JITCompiler::Imm64 Imm64;
// These constants are used to set priorities for spill order for
// the register allocator.
public:
SpeculativeJIT(JITCompiler&);
+ ~SpeculativeJIT();
bool compile();
+
void createOSREntries();
void linkOSREntries(LinkBuffer&);
- Node& at(NodeIndex nodeIndex)
+ BasicBlock* nextBlock()
{
- return m_jit.graph()[nodeIndex];
- }
- Node& at(Edge nodeUse)
- {
- return at(nodeUse.index());
+ for (BlockIndex resultIndex = m_block->index + 1; ; resultIndex++) {
+ if (resultIndex >= m_jit.graph().numBlocks())
+ return 0;
+ if (BasicBlock* result = m_jit.graph().block(resultIndex))
+ return result;
+ }
}
- GPRReg fillInteger(NodeIndex, DataFormat& returnFormat);
- FPRReg fillDouble(NodeIndex);
#if USE(JSVALUE64)
- GPRReg fillJSValue(NodeIndex);
+ GPRReg fillJSValue(Edge);
#elif USE(JSVALUE32_64)
- bool fillJSValue(NodeIndex, GPRReg&, GPRReg&, FPRReg&);
+ bool fillJSValue(Edge, GPRReg&, GPRReg&, FPRReg&);
#endif
- GPRReg fillStorage(NodeIndex);
+ GPRReg fillStorage(Edge);
// lock and unlock GPR & FPR registers.
void lock(GPRReg reg)
// Used to check whether a child node is on its last use,
// and its machine registers may be reused.
- bool canReuse(NodeIndex nodeIndex)
+ bool canReuse(Node* node)
{
- VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
- return info.canReuse();
+ return generationInfo(node).canReuse();
}
bool canReuse(Edge nodeUse)
{
- return canReuse(nodeUse.index());
+ return canReuse(nodeUse.node());
}
GPRReg reuse(GPRReg reg)
{
// Allocate a gpr/fpr.
GPRReg allocate()
{
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
+#endif
VirtualRegister spillMe;
GPRReg gpr = m_gprs.allocate(spillMe);
- if (spillMe != InvalidVirtualRegister) {
+ if (spillMe.isValid()) {
#if USE(JSVALUE32_64)
- GenerationInfo& info = m_generationInfo[spillMe];
- ASSERT(info.registerFormat() != DataFormatJSDouble);
+ GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
if ((info.registerFormat() & DataFormatJS))
m_gprs.release(info.tagGPR() == gpr ? info.payloadGPR() : info.tagGPR());
#endif
}
GPRReg allocate(GPRReg specific)
{
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
+#endif
VirtualRegister spillMe = m_gprs.allocateSpecific(specific);
- if (spillMe != InvalidVirtualRegister) {
+ if (spillMe.isValid()) {
#if USE(JSVALUE32_64)
- GenerationInfo& info = m_generationInfo[spillMe];
- ASSERT(info.registerFormat() != DataFormatJSDouble);
+ GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
+ RELEASE_ASSERT(info.registerFormat() != DataFormatJSDouble);
if ((info.registerFormat() & DataFormatJS))
m_gprs.release(info.tagGPR() == specific ? info.payloadGPR() : info.tagGPR());
#endif
}
FPRReg fprAllocate()
{
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ m_jit.addRegisterAllocationAtOffset(m_jit.debugOffset());
+#endif
VirtualRegister spillMe;
FPRReg fpr = m_fprs.allocate(spillMe);
- if (spillMe != InvalidVirtualRegister)
+ if (spillMe.isValid())
spill(spillMe);
return fpr;
}
// machine registers first (by locking VirtualRegsiters that are already
// in machine register before filling those that are not we attempt to
// avoid spilling values we will need immediately).
- bool isFilled(NodeIndex nodeIndex)
+ bool isFilled(Node* node)
{
- VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
- return info.registerFormat() != DataFormatNone;
+ return generationInfo(node).registerFormat() != DataFormatNone;
}
- bool isFilledDouble(NodeIndex nodeIndex)
+ bool isFilledDouble(Node* node)
{
- VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
- return info.registerFormat() == DataFormatDouble;
+ return generationInfo(node).registerFormat() == DataFormatDouble;
}
// Called on an operand once it has been consumed by a parent node.
- void use(NodeIndex nodeIndex)
+ void use(Node* node)
{
- VirtualRegister virtualRegister = at(nodeIndex).virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
+ if (!node->hasResult())
+ return;
+ GenerationInfo& info = generationInfo(node);
// use() returns true when the value becomes dead, and any
// associated resources may be freed.
- if (!info.use())
+ if (!info.use(*m_stream))
return;
// Release the associated machine registers.
else if (registerFormat != DataFormatNone)
m_gprs.release(info.gpr());
#elif USE(JSVALUE32_64)
- if (registerFormat == DataFormatDouble || registerFormat == DataFormatJSDouble)
+ if (registerFormat == DataFormatDouble)
m_fprs.release(info.fpr());
else if (registerFormat & DataFormatJS) {
m_gprs.release(info.tagGPR());
}
void use(Edge nodeUse)
{
- use(nodeUse.index());
+ use(nodeUse.node());
+ }
+
+ RegisterSet usedRegisters();
+
+ bool masqueradesAsUndefinedWatchpointIsStillValid(const CodeOrigin& codeOrigin)
+ {
+ return m_jit.graph().masqueradesAsUndefinedWatchpointIsStillValid(codeOrigin);
+ }
+ bool masqueradesAsUndefinedWatchpointIsStillValid()
+ {
+ return masqueradesAsUndefinedWatchpointIsStillValid(m_currentNode->origin.semantic);
}
- static void markCellCard(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2);
- static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, WriteBarrierUseKind);
+#if ENABLE(GGC)
+ void storeToWriteBarrierBuffer(GPRReg cell, GPRReg scratch1, GPRReg scratch2);
- void writeBarrier(GPRReg ownerGPR, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
- void writeBarrier(GPRReg ownerGPR, JSCell* value, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg, GPRReg scratchGPR2 = InvalidGPRReg);
- void writeBarrier(JSCell* owner, GPRReg valueGPR, Edge valueUse, WriteBarrierUseKind, GPRReg scratchGPR1 = InvalidGPRReg);
+ void writeBarrier(GPRReg owner, GPRReg scratch1, GPRReg scratch2);
+
+ void writeBarrier(GPRReg owner, GPRReg value, Edge valueUse, GPRReg scratch1, GPRReg scratch2);
+#endif
+ void compileStoreBarrier(Node*);
static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
{
// Called by the speculative operand types, below, to fill operand to
// machine registers, implicitly generating speculation checks as needed.
- GPRReg fillSpeculateInt(NodeIndex, DataFormat& returnFormat);
- GPRReg fillSpeculateIntStrict(NodeIndex);
- FPRReg fillSpeculateDouble(NodeIndex);
- GPRReg fillSpeculateCell(NodeIndex);
- GPRReg fillSpeculateBoolean(NodeIndex);
- GeneratedOperandType checkGeneratedTypeForToInt32(NodeIndex);
-
-private:
- void compile(Node&);
- void compileMovHint(Node&);
- void compile(BasicBlock&);
+ GPRReg fillSpeculateInt32(Edge, DataFormat& returnFormat);
+ GPRReg fillSpeculateInt32Strict(Edge);
+ GPRReg fillSpeculateInt52(Edge, DataFormat desiredFormat);
+ FPRReg fillSpeculateDouble(Edge);
+ GPRReg fillSpeculateCell(Edge);
+ GPRReg fillSpeculateBoolean(Edge);
+ GeneratedOperandType checkGeneratedTypeForToInt32(Node*);
+
+ void addSlowPathGenerator(std::unique_ptr<SlowPathGenerator>);
+ void runSlowPathGenerators();
+
+ void compile(Node*);
+ void noticeOSRBirth(Node*);
+ void bail(AbortReason);
+ void compileCurrentBlock();
void checkArgumentTypes();
// These methods are used when generating 'unexpected'
// calls out from JIT code to C++ helper routines -
// they spill all live values to the appropriate
- // slots in the RegisterFile without changing any state
+ // slots in the JSStack without changing any state
// in the GenerationInfo.
- void silentSpillGPR(VirtualRegister spillMe, GPRReg source)
+ SilentRegisterSavePlan silentSavePlanForGPR(VirtualRegister spillMe, GPRReg source);
+ SilentRegisterSavePlan silentSavePlanForFPR(VirtualRegister spillMe, FPRReg source);
+ void silentSpill(const SilentRegisterSavePlan&);
+ void silentFill(const SilentRegisterSavePlan&, GPRReg canTrample);
+
+ template<typename CollectionType>
+ void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg)
{
- GenerationInfo& info = m_generationInfo[spillMe];
- ASSERT(info.registerFormat() != DataFormatNone);
- ASSERT(info.registerFormat() != DataFormatDouble);
-
- if (!info.needsSpill())
- return;
-
- DataFormat registerFormat = info.registerFormat();
-
-#if USE(JSVALUE64)
- ASSERT(info.gpr() == source);
- if (registerFormat == DataFormatInteger)
- m_jit.store32(source, JITCompiler::addressFor(spillMe));
- else {
- ASSERT(registerFormat & DataFormatJS || registerFormat == DataFormatCell || registerFormat == DataFormatStorage);
- m_jit.storePtr(source, JITCompiler::addressFor(spillMe));
+ ASSERT(plans.isEmpty());
+ for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
+ GPRReg gpr = iter.regID();
+ if (iter.name().isValid() && gpr != exclude && gpr != exclude2) {
+ SilentRegisterSavePlan plan = silentSavePlanForGPR(iter.name(), gpr);
+ if (doSpill)
+ silentSpill(plan);
+ plans.append(plan);
+ }
}
-#elif USE(JSVALUE32_64)
- if (registerFormat & DataFormatJS) {
- ASSERT(info.tagGPR() == source || info.payloadGPR() == source);
- m_jit.store32(source, source == info.tagGPR() ? JITCompiler::tagFor(spillMe) : JITCompiler::payloadFor(spillMe));
- } else {
- ASSERT(info.gpr() == source);
- m_jit.store32(source, JITCompiler::payloadFor(spillMe));
+ for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
+ if (iter.name().isValid() && iter.regID() != fprExclude) {
+ SilentRegisterSavePlan plan = silentSavePlanForFPR(iter.name(), iter.regID());
+ if (doSpill)
+ silentSpill(plan);
+ plans.append(plan);
+ }
}
-#endif
}
- void silentSpillFPR(VirtualRegister spillMe, FPRReg source)
+ template<typename CollectionType>
+ void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, NoResultTag)
{
- GenerationInfo& info = m_generationInfo[spillMe];
- ASSERT(info.registerFormat() == DataFormatDouble);
-
- if (!info.needsSpill()) {
- // it's either a constant or it's already been spilled
- ASSERT(at(info.nodeIndex()).hasConstant() || info.spillFormat() != DataFormatNone);
- return;
- }
-
- // it's neither a constant nor has it been spilled.
- ASSERT(!at(info.nodeIndex()).hasConstant());
- ASSERT(info.spillFormat() == DataFormatNone);
- ASSERT(info.fpr() == source);
-
- m_jit.storeDouble(source, JITCompiler::addressFor(spillMe));
+ silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, InvalidFPRReg);
}
-
- void silentFillGPR(VirtualRegister spillMe, GPRReg target)
+ template<typename CollectionType>
+ void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, FPRReg exclude)
{
- GenerationInfo& info = m_generationInfo[spillMe];
-
- NodeIndex nodeIndex = info.nodeIndex();
- Node& node = at(nodeIndex);
- ASSERT(info.registerFormat() != DataFormatNone);
- ASSERT(info.registerFormat() != DataFormatDouble);
- DataFormat registerFormat = info.registerFormat();
-
- if (registerFormat == DataFormatInteger) {
- ASSERT(info.gpr() == target);
- ASSERT(isJSInteger(info.registerFormat()));
- if (node.hasConstant()) {
- ASSERT(isInt32Constant(nodeIndex));
- m_jit.move(Imm32(valueOfInt32Constant(nodeIndex)), target);
- } else
- m_jit.load32(JITCompiler::payloadFor(spillMe), target);
- return;
- }
-
- if (registerFormat == DataFormatBoolean) {
-#if USE(JSVALUE64)
- ASSERT_NOT_REACHED();
-#elif USE(JSVALUE32_64)
- ASSERT(info.gpr() == target);
- if (node.hasConstant()) {
- ASSERT(isBooleanConstant(nodeIndex));
- m_jit.move(TrustedImm32(valueOfBooleanConstant(nodeIndex)), target);
- } else
- m_jit.load32(JITCompiler::payloadFor(spillMe), target);
-#endif
- return;
- }
-
- if (registerFormat == DataFormatCell) {
- ASSERT(info.gpr() == target);
- if (node.hasConstant()) {
- JSValue value = valueOfJSConstant(nodeIndex);
- ASSERT(value.isCell());
- m_jit.move(TrustedImmPtr(value.asCell()), target);
- } else
- m_jit.loadPtr(JITCompiler::payloadFor(spillMe), target);
- return;
- }
-
- if (registerFormat == DataFormatStorage) {
- ASSERT(info.gpr() == target);
- m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
- return;
- }
-
- ASSERT(registerFormat & DataFormatJS);
-#if USE(JSVALUE64)
- ASSERT(info.gpr() == target);
- if (node.hasConstant()) {
- if (valueOfJSConstant(nodeIndex).isCell())
- m_jit.move(valueOfJSConstantAsImmPtr(nodeIndex).asTrustedImmPtr(), target);
- else
- m_jit.move(valueOfJSConstantAsImmPtr(nodeIndex), target);
- } else if (info.spillFormat() == DataFormatInteger) {
- ASSERT(registerFormat == DataFormatJSInteger);
- m_jit.load32(JITCompiler::payloadFor(spillMe), target);
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, target);
- } else if (info.spillFormat() == DataFormatDouble) {
- ASSERT(registerFormat == DataFormatJSDouble);
- m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
- m_jit.subPtr(GPRInfo::tagTypeNumberRegister, target);
- } else
- m_jit.loadPtr(JITCompiler::addressFor(spillMe), target);
-#else
- ASSERT(info.tagGPR() == target || info.payloadGPR() == target);
- if (node.hasConstant()) {
- JSValue v = valueOfJSConstant(nodeIndex);
- m_jit.move(info.tagGPR() == target ? Imm32(v.tag()) : Imm32(v.payload()), target);
- } else if (info.payloadGPR() == target)
- m_jit.load32(JITCompiler::payloadFor(spillMe), target);
- else { // Fill the Tag
- switch (info.spillFormat()) {
- case DataFormatInteger:
- ASSERT(registerFormat == DataFormatJSInteger);
- m_jit.move(TrustedImm32(JSValue::Int32Tag), target);
- break;
- case DataFormatCell:
- ASSERT(registerFormat == DataFormatJSCell);
- m_jit.move(TrustedImm32(JSValue::CellTag), target);
- break;
- case DataFormatBoolean:
- ASSERT(registerFormat == DataFormatJSBoolean);
- m_jit.move(TrustedImm32(JSValue::BooleanTag), target);
- break;
- default:
- m_jit.load32(JITCompiler::tagFor(spillMe), target);
- break;
- }
- }
-#endif
+ silentSpillAllRegistersImpl(doSpill, plans, InvalidGPRReg, InvalidGPRReg, exclude);
}
-
- void silentFillFPR(VirtualRegister spillMe, GPRReg canTrample, FPRReg target)
+#if USE(JSVALUE32_64)
+ template<typename CollectionType>
+ void silentSpillAllRegistersImpl(bool doSpill, CollectionType& plans, JSValueRegs exclude)
{
- GenerationInfo& info = m_generationInfo[spillMe];
- ASSERT(info.fpr() == target);
-
- NodeIndex nodeIndex = info.nodeIndex();
- Node& node = at(nodeIndex);
-#if USE(JSVALUE64)
- ASSERT(info.registerFormat() == DataFormatDouble);
-
- if (node.hasConstant()) {
- ASSERT(isNumberConstant(nodeIndex));
- m_jit.move(ImmPtr(bitwise_cast<void*>(valueOfNumberConstant(nodeIndex))), canTrample);
- m_jit.movePtrToDouble(canTrample, target);
- return;
- }
-
- if (info.spillFormat() != DataFormatNone && info.spillFormat() != DataFormatDouble) {
- // it was already spilled previously and not as a double, which means we need unboxing.
- ASSERT(info.spillFormat() & DataFormatJS);
- m_jit.loadPtr(JITCompiler::addressFor(spillMe), canTrample);
- unboxDouble(canTrample, target);
- return;
- }
-
- m_jit.loadDouble(JITCompiler::addressFor(spillMe), target);
-#elif USE(JSVALUE32_64)
- UNUSED_PARAM(canTrample);
- ASSERT(info.registerFormat() == DataFormatDouble || info.registerFormat() == DataFormatJSDouble);
- if (node.hasConstant()) {
- ASSERT(isNumberConstant(nodeIndex));
- m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), target);
- } else
- m_jit.loadDouble(JITCompiler::addressFor(spillMe), target);
-#endif
+ silentSpillAllRegistersImpl(doSpill, plans, exclude.tagGPR(), exclude.payloadGPR());
}
-
- void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg)
+#endif
+
+ void silentSpillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg, FPRReg fprExclude = InvalidFPRReg)
{
- for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- GPRReg gpr = iter.regID();
- if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2)
- silentSpillGPR(iter.name(), gpr);
- }
- for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister)
- silentSpillFPR(iter.name(), iter.regID());
- }
+ silentSpillAllRegistersImpl(true, m_plans, exclude, exclude2, fprExclude);
}
void silentSpillAllRegisters(FPRReg exclude)
{
- for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister)
- silentSpillGPR(iter.name(), iter.regID());
- }
- for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- FPRReg fpr = iter.regID();
- if (iter.name() != InvalidVirtualRegister && fpr != exclude)
- silentSpillFPR(iter.name(), fpr);
- }
+ silentSpillAllRegisters(InvalidGPRReg, InvalidGPRReg, exclude);
}
-
- void silentFillAllRegisters(GPRReg exclude, GPRReg exclude2 = InvalidGPRReg)
+
+ static GPRReg pickCanTrample(GPRReg exclude)
{
- GPRReg canTrample = GPRInfo::regT0;
- if (exclude == GPRInfo::regT0)
- canTrample = GPRInfo::regT1;
-
- for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister)
- silentFillFPR(iter.name(), canTrample, iter.regID());
- }
- for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- GPRReg gpr = iter.regID();
- if (iter.name() != InvalidVirtualRegister && gpr != exclude && gpr != exclude2)
- silentFillGPR(iter.name(), gpr);
+ GPRReg result = GPRInfo::regT0;
+ if (result == exclude)
+ result = GPRInfo::regT1;
+ return result;
+ }
+ static GPRReg pickCanTrample(FPRReg)
+ {
+ return GPRInfo::regT0;
+ }
+ static GPRReg pickCanTrample(NoResultTag)
+ {
+ return GPRInfo::regT0;
+ }
+
+#if USE(JSVALUE32_64)
+ static GPRReg pickCanTrample(JSValueRegs exclude)
+ {
+ GPRReg result = GPRInfo::regT0;
+ if (result == exclude.tagGPR()) {
+ result = GPRInfo::regT1;
+ if (result == exclude.payloadGPR())
+ result = GPRInfo::regT2;
+ } else if (result == exclude.payloadGPR()) {
+ result = GPRInfo::regT1;
+ if (result == exclude.tagGPR())
+ result = GPRInfo::regT2;
}
+ return result;
}
- void silentFillAllRegisters(FPRReg exclude)
+#endif
+
+ template<typename RegisterType>
+ void silentFillAllRegisters(RegisterType exclude)
{
- GPRReg canTrample = GPRInfo::regT0;
+ GPRReg canTrample = pickCanTrample(exclude);
- for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- FPRReg fpr = iter.regID();
- if (iter.name() != InvalidVirtualRegister && fpr != exclude)
- silentFillFPR(iter.name(), canTrample, fpr);
- }
- for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister)
- silentFillGPR(iter.name(), iter.regID());
+ while (!m_plans.isEmpty()) {
+ SilentRegisterSavePlan& plan = m_plans.last();
+ silentFill(plan, canTrample);
+ m_plans.removeLast();
}
}
{
return boxDouble(fpr, allocate());
}
+
+ void boxInt52(GPRReg sourceGPR, GPRReg targetGPR, DataFormat);
#elif USE(JSVALUE32_64)
void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
{
m_jit.unboxDouble(tagGPR, payloadGPR, fpr, scratchFPR);
}
#endif
+ void boxDouble(FPRReg fpr, JSValueRegs regs)
+ {
+ m_jit.boxDouble(fpr, regs);
+ }
- // Spill a VirtualRegister to the RegisterFile.
+ // Spill a VirtualRegister to the JSStack.
void spill(VirtualRegister spillMe)
{
- GenerationInfo& info = m_generationInfo[spillMe];
+ GenerationInfo& info = generationInfoFromVirtualRegister(spillMe);
#if USE(JSVALUE32_64)
if (info.registerFormat() == DataFormatNone) // it has been spilled. JS values which have two GPRs can reach here
return;
#endif
// Check the GenerationInfo to see if this value need writing
- // to the RegisterFile - if not, mark it as spilled & return.
+ // to the JSStack - if not, mark it as spilled & return.
if (!info.needsSpill()) {
- info.setSpilled();
+ info.setSpilled(*m_stream, spillMe);
return;
}
// This is special, since it's not a JS value - as in it's not visible to JS
// code.
m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe));
- info.spill(DataFormatStorage);
+ info.spill(*m_stream, spillMe, DataFormatStorage);
return;
}
- case DataFormatInteger: {
+ case DataFormatInt32: {
m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
- info.spill(DataFormatInteger);
+ info.spill(*m_stream, spillMe, DataFormatInt32);
return;
}
#if USE(JSVALUE64)
case DataFormatDouble: {
m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
- info.spill(DataFormatDouble);
+ info.spill(*m_stream, spillMe, DataFormatDouble);
+ return;
+ }
+
+ case DataFormatInt52:
+ case DataFormatStrictInt52: {
+ m_jit.store64(info.gpr(), JITCompiler::addressFor(spillMe));
+ info.spill(*m_stream, spillMe, spillFormat);
return;
}
default:
// The following code handles JSValues, int32s, and cells.
- ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS);
+ RELEASE_ASSERT(spillFormat == DataFormatCell || spillFormat & DataFormatJS);
GPRReg reg = info.gpr();
// We need to box int32 and cell values ...
// but on JSVALUE64 boxing a cell is a no-op!
- if (spillFormat == DataFormatInteger)
- m_jit.orPtr(GPRInfo::tagTypeNumberRegister, reg);
+ if (spillFormat == DataFormatInt32)
+ m_jit.or64(GPRInfo::tagTypeNumberRegister, reg);
// Spill the value, and record it as spilled in its boxed form.
- m_jit.storePtr(reg, JITCompiler::addressFor(spillMe));
- info.spill((DataFormat)(spillFormat | DataFormatJS));
+ m_jit.store64(reg, JITCompiler::addressFor(spillMe));
+ info.spill(*m_stream, spillMe, (DataFormat)(spillFormat | DataFormatJS));
return;
#elif USE(JSVALUE32_64)
case DataFormatCell:
case DataFormatBoolean: {
m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe));
- info.spill(spillFormat);
+ info.spill(*m_stream, spillMe, spillFormat);
return;
}
- case DataFormatDouble:
- case DataFormatJSDouble: {
+ case DataFormatDouble: {
// On JSVALUE32_64 boxing a double is a no-op.
m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe));
- info.spill(DataFormatJSDouble);
+ info.spill(*m_stream, spillMe, DataFormatDouble);
return;
}
default:
// The following code handles JSValues.
- ASSERT(spillFormat & DataFormatJS);
+ RELEASE_ASSERT(spillFormat & DataFormatJS);
m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe));
m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe));
- info.spill(spillFormat);
+ info.spill(*m_stream, spillMe, spillFormat);
return;
#endif
}
}
- bool isStrictInt32(NodeIndex);
+ bool isKnownInteger(Node* node) { return m_state.forNode(node).isType(SpecInt32); }
+ bool isKnownCell(Node* node) { return m_state.forNode(node).isType(SpecCell); }
- bool isKnownInteger(NodeIndex);
- bool isKnownNumeric(NodeIndex);
- bool isKnownCell(NodeIndex);
-
- bool isKnownNotInteger(NodeIndex);
- bool isKnownNotNumber(NodeIndex);
-
- bool isKnownNotCell(NodeIndex);
+ bool isKnownNotInteger(Node* node) { return !(m_state.forNode(node).m_type & SpecInt32); }
+ bool isKnownNotNumber(Node* node) { return !(m_state.forNode(node).m_type & SpecFullNumber); }
+ bool isKnownNotCell(Node* node) { return !(m_state.forNode(node).m_type & SpecCell); }
- // Checks/accessors for constant values.
- bool isConstant(NodeIndex nodeIndex) { return m_jit.graph().isConstant(nodeIndex); }
- bool isJSConstant(NodeIndex nodeIndex) { return m_jit.graph().isJSConstant(nodeIndex); }
- bool isInt32Constant(NodeIndex nodeIndex) { return m_jit.graph().isInt32Constant(nodeIndex); }
- bool isDoubleConstant(NodeIndex nodeIndex) { return m_jit.graph().isDoubleConstant(nodeIndex); }
- bool isNumberConstant(NodeIndex nodeIndex) { return m_jit.graph().isNumberConstant(nodeIndex); }
- bool isBooleanConstant(NodeIndex nodeIndex) { return m_jit.graph().isBooleanConstant(nodeIndex); }
- bool isFunctionConstant(NodeIndex nodeIndex) { return m_jit.graph().isFunctionConstant(nodeIndex); }
- int32_t valueOfInt32Constant(NodeIndex nodeIndex) { return m_jit.graph().valueOfInt32Constant(nodeIndex); }
- double valueOfNumberConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfNumberConstant(nodeIndex); }
- int32_t valueOfNumberConstantAsInt32(NodeIndex nodeIndex)
- {
- if (isInt32Constant(nodeIndex))
- return valueOfInt32Constant(nodeIndex);
- return JSC::toInt32(valueOfNumberConstant(nodeIndex));
- }
-#if USE(JSVALUE32_64)
- void* addressOfDoubleConstant(NodeIndex nodeIndex) { return m_jit.addressOfDoubleConstant(nodeIndex); }
-#endif
- JSValue valueOfJSConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfJSConstant(nodeIndex); }
- bool valueOfBooleanConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfBooleanConstant(nodeIndex); }
- JSFunction* valueOfFunctionConstant(NodeIndex nodeIndex) { return m_jit.graph().valueOfFunctionConstant(nodeIndex); }
- bool isNullConstant(NodeIndex nodeIndex)
+ UniquedStringImpl* identifierUID(unsigned index)
{
- if (!isConstant(nodeIndex))
- return false;
- return valueOfJSConstant(nodeIndex).isNull();
+ return m_jit.graph().identifiers()[index];
}
- Identifier* identifier(unsigned index)
- {
- return &m_jit.codeBlock()->identifier(index);
- }
-
- // Spill all VirtualRegisters back to the RegisterFile.
+ // Spill all VirtualRegisters back to the JSStack.
void flushRegisters()
{
for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister) {
+ if (iter.name().isValid()) {
spill(iter.name());
iter.release();
}
}
for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister) {
+ if (iter.name().isValid()) {
spill(iter.name());
iter.release();
}
}
}
-#ifndef NDEBUG
// Used to ASSERT flushRegisters() has been called prior to
// calling out from JIT code to a C helper function.
bool isFlushed()
{
for (gpr_iterator iter = m_gprs.begin(); iter != m_gprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister)
+ if (iter.name().isValid())
return false;
}
for (fpr_iterator iter = m_fprs.begin(); iter != m_fprs.end(); ++iter) {
- if (iter.name() != InvalidVirtualRegister)
+ if (iter.name().isValid())
return false;
}
return true;
}
-#endif
#if USE(JSVALUE64)
- MacroAssembler::ImmPtr valueOfJSConstantAsImmPtr(NodeIndex nodeIndex)
+ static MacroAssembler::Imm64 valueOfJSConstantAsImm64(Node* node)
{
- return MacroAssembler::ImmPtr(JSValue::encode(valueOfJSConstant(nodeIndex)));
+ return MacroAssembler::Imm64(JSValue::encode(node->asJSValue()));
}
#endif
m_jit.xor32(Imm32(imm), op1, result);
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
}
void bitOp(NodeType op, GPRReg op1, GPRReg op2, GPRReg result)
m_jit.xor32(op1, op2, result);
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
}
void shiftOp(NodeType op, GPRReg op1, int32_t shiftAmount, GPRReg result)
m_jit.urshift32(op1, Imm32(shiftAmount), result);
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
}
void shiftOp(NodeType op, GPRReg op1, GPRReg shiftAmount, GPRReg result)
m_jit.urshift32(op1, shiftAmount, result);
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
}
}
// Returns the index of the branch node if peephole is okay, UINT_MAX otherwise.
unsigned detectPeepHoleBranch()
{
- BasicBlock* block = m_jit.graph().m_blocks[m_block].get();
-
// Check that no intervening nodes will be generated.
- for (unsigned index = m_indexInBlock + 1; index < block->size() - 1; ++index) {
- NodeIndex nodeIndex = block->at(index);
- if (at(nodeIndex).shouldGenerate())
- return UINT_MAX;
+ for (unsigned index = m_indexInBlock + 1; index < m_block->size() - 1; ++index) {
+ Node* node = m_block->at(index);
+ if (!node->shouldGenerate())
+ continue;
+ // Check if it's a Phantom that can be safely ignored.
+ if (node->op() == Phantom && !node->child1())
+ continue;
+ return UINT_MAX;
}
// Check if the lastNode is a branch on this node.
- Node& lastNode = at(block->last());
- return lastNode.op() == Branch && lastNode.child1().index() == m_compileIndex ? block->size() - 1 : UINT_MAX;
+ Node* lastNode = m_block->terminal();
+ return lastNode->op() == Branch && lastNode->child1() == m_currentNode ? m_block->size() - 1 : UINT_MAX;
}
- void nonSpeculativeValueToNumber(Node&);
- void nonSpeculativeValueToInt32(Node&);
- void nonSpeculativeUInt32ToNumber(Node&);
+ void compileMovHint(Node*);
+ void compileMovHintAndCheck(Node*);
- enum SpillRegistersMode { NeedToSpill, DontSpill };
#if USE(JSVALUE64)
- JITCompiler::Call cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
- void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+ void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
#elif USE(JSVALUE32_64)
- JITCompiler::Call cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
- void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
+ void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
#endif
+
+ void compileIn(Node*);
+
+ void compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdge);
void nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert = false);
- void nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex branchNodeIndex, bool invert = false);
- bool nonSpeculativeCompareNull(Node&, Edge operand, bool invert = false);
+ void nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert = false);
+ bool nonSpeculativeCompareNull(Node*, Edge operand, bool invert = false);
- void nonSpeculativePeepholeBranch(Node&, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
- void nonSpeculativeNonPeepholeCompare(Node&, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
- bool nonSpeculativeCompare(Node&, MacroAssembler::RelationalCondition, S_DFGOperation_EJJ helperFunction);
+ void nonSpeculativePeepholeBranch(Node*, Node* branchNode, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction);
+ void nonSpeculativeNonPeepholeCompare(Node*, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction);
+ bool nonSpeculativeCompare(Node*, MacroAssembler::RelationalCondition, S_JITOperation_EJJ helperFunction);
- void nonSpeculativePeepholeStrictEq(Node&, NodeIndex branchNodeIndex, bool invert = false);
- void nonSpeculativeNonPeepholeStrictEq(Node&, bool invert = false);
- bool nonSpeculativeStrictEq(Node&, bool invert = false);
+ void nonSpeculativePeepholeStrictEq(Node*, Node* branchNode, bool invert = false);
+ void nonSpeculativeNonPeepholeStrictEq(Node*, bool invert = false);
+ bool nonSpeculativeStrictEq(Node*, bool invert = false);
- void compileInstanceOfForObject(Node&, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg);
- void compileInstanceOf(Node&);
+ void compileInstanceOfForObject(Node*, GPRReg valueReg, GPRReg prototypeReg, GPRReg scratchAndResultReg, GPRReg scratch2Reg);
+ void compileInstanceOf(Node*);
- // Access to our fixed callee CallFrame.
- MacroAssembler::Address callFrameSlot(int slot)
- {
- return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)));
- }
-
- // Access to our fixed callee CallFrame.
- MacroAssembler::Address argumentSlot(int argument)
- {
- return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)));
- }
-
- MacroAssembler::Address callFrameTagSlot(int slot)
- {
- return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- }
-
- MacroAssembler::Address callFramePayloadSlot(int slot)
- {
- return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + slot) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
- }
-
- MacroAssembler::Address argumentTagSlot(int argument)
- {
- return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- }
-
- MacroAssembler::Address argumentPayloadSlot(int argument)
- {
- return MacroAssembler::Address(GPRInfo::callFrameRegister, (m_jit.codeBlock()->m_numCalleeRegisters + argumentToOperand(argument)) * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
- }
-
- void emitCall(Node&);
+ void emitCall(Node*);
// Called once a node has completed code generation but prior to setting
// its result, to free up its children. (This must happen prior to setting
// the nodes result, since the node may have the same VirtualRegister as
// a child, and as such will use the same GeneratioInfo).
- void useChildren(Node&);
+ void useChildren(Node*);
// These method called to initialize the the GenerationInfo
// to describe the result of an operation.
- void integerResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatInteger, UseChildrenMode mode = CallUseChildren)
+ void int32Result(GPRReg reg, Node* node, DataFormat format = DataFormatInt32, UseChildrenMode mode = CallUseChildren)
{
- Node& node = at(nodeIndex);
if (mode == CallUseChildren)
useChildren(node);
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
+ VirtualRegister virtualRegister = node->virtualRegister();
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
- if (format == DataFormatInteger) {
+ if (format == DataFormatInt32) {
m_jit.jitAssertIsInt32(reg);
m_gprs.retain(reg, virtualRegister, SpillOrderInteger);
- info.initInteger(nodeIndex, node.refCount(), reg);
+ info.initInt32(node, node->refCount(), reg);
} else {
#if USE(JSVALUE64)
- ASSERT(format == DataFormatJSInteger);
+ RELEASE_ASSERT(format == DataFormatJSInt32);
m_jit.jitAssertIsJSInt32(reg);
m_gprs.retain(reg, virtualRegister, SpillOrderJS);
- info.initJSValue(nodeIndex, node.refCount(), reg, format);
+ info.initJSValue(node, node->refCount(), reg, format);
#elif USE(JSVALUE32_64)
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
#endif
}
}
- void integerResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode)
+ void int32Result(GPRReg reg, Node* node, UseChildrenMode mode)
+ {
+ int32Result(reg, node, DataFormatInt32, mode);
+ }
+ void int52Result(GPRReg reg, Node* node, DataFormat format, UseChildrenMode mode = CallUseChildren)
+ {
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node->virtualRegister();
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
+
+ m_gprs.retain(reg, virtualRegister, SpillOrderJS);
+ info.initInt52(node, node->refCount(), reg, format);
+ }
+ void int52Result(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
+ {
+ int52Result(reg, node, DataFormatInt52, mode);
+ }
+ void strictInt52Result(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
{
- integerResult(reg, nodeIndex, DataFormatInteger, mode);
+ int52Result(reg, node, DataFormatStrictInt52, mode);
}
- void noResult(NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
+ void noResult(Node* node, UseChildrenMode mode = CallUseChildren)
{
if (mode == UseChildrenCalledExplicitly)
return;
- Node& node = at(nodeIndex);
useChildren(node);
}
- void cellResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
+ void cellResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
{
- Node& node = at(nodeIndex);
if (mode == CallUseChildren)
useChildren(node);
- VirtualRegister virtualRegister = node.virtualRegister();
+ VirtualRegister virtualRegister = node->virtualRegister();
m_gprs.retain(reg, virtualRegister, SpillOrderCell);
- GenerationInfo& info = m_generationInfo[virtualRegister];
- info.initCell(nodeIndex, node.refCount(), reg);
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
+ info.initCell(node, node->refCount(), reg);
}
- void booleanResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
+ void blessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
{
- Node& node = at(nodeIndex);
- if (mode == CallUseChildren)
- useChildren(node);
-
- VirtualRegister virtualRegister = node.virtualRegister();
- m_gprs.retain(reg, virtualRegister, SpillOrderBoolean);
- GenerationInfo& info = m_generationInfo[virtualRegister];
- info.initBoolean(nodeIndex, node.refCount(), reg);
+#if USE(JSVALUE64)
+ jsValueResult(reg, node, DataFormatJSBoolean, mode);
+#else
+ booleanResult(reg, node, mode);
+#endif
+ }
+ void unblessedBooleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
+ {
+#if USE(JSVALUE64)
+ blessBoolean(reg);
+#endif
+ blessedBooleanResult(reg, node, mode);
}
#if USE(JSVALUE64)
- void jsValueResult(GPRReg reg, NodeIndex nodeIndex, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
+ void jsValueResult(GPRReg reg, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
{
- if (format == DataFormatJSInteger)
+ if (format == DataFormatJSInt32)
m_jit.jitAssertIsJSInt32(reg);
- Node& node = at(nodeIndex);
if (mode == CallUseChildren)
useChildren(node);
- VirtualRegister virtualRegister = node.virtualRegister();
+ VirtualRegister virtualRegister = node->virtualRegister();
m_gprs.retain(reg, virtualRegister, SpillOrderJS);
- GenerationInfo& info = m_generationInfo[virtualRegister];
- info.initJSValue(nodeIndex, node.refCount(), reg, format);
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
+ info.initJSValue(node, node->refCount(), reg, format);
}
- void jsValueResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode)
+ void jsValueResult(GPRReg reg, Node* node, UseChildrenMode mode)
{
- jsValueResult(reg, nodeIndex, DataFormatJS, mode);
+ jsValueResult(reg, node, DataFormatJS, mode);
}
#elif USE(JSVALUE32_64)
- void jsValueResult(GPRReg tag, GPRReg payload, NodeIndex nodeIndex, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
+ void booleanResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
{
- Node& node = at(nodeIndex);
if (mode == CallUseChildren)
useChildren(node);
- VirtualRegister virtualRegister = node.virtualRegister();
+ VirtualRegister virtualRegister = node->virtualRegister();
+ m_gprs.retain(reg, virtualRegister, SpillOrderBoolean);
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
+ info.initBoolean(node, node->refCount(), reg);
+ }
+ void jsValueResult(GPRReg tag, GPRReg payload, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
+ {
+ if (mode == CallUseChildren)
+ useChildren(node);
+
+ VirtualRegister virtualRegister = node->virtualRegister();
m_gprs.retain(tag, virtualRegister, SpillOrderJS);
m_gprs.retain(payload, virtualRegister, SpillOrderJS);
- GenerationInfo& info = m_generationInfo[virtualRegister];
- info.initJSValue(nodeIndex, node.refCount(), tag, payload, format);
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
+ info.initJSValue(node, node->refCount(), tag, payload, format);
}
- void jsValueResult(GPRReg tag, GPRReg payload, NodeIndex nodeIndex, UseChildrenMode mode)
+ void jsValueResult(GPRReg tag, GPRReg payload, Node* node, UseChildrenMode mode)
{
- jsValueResult(tag, payload, nodeIndex, DataFormatJS, mode);
+ jsValueResult(tag, payload, node, DataFormatJS, mode);
}
#endif
- void storageResult(GPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
+ void jsValueResult(JSValueRegs regs, Node* node, DataFormat format = DataFormatJS, UseChildrenMode mode = CallUseChildren)
+ {
+#if USE(JSVALUE64)
+ jsValueResult(regs.gpr(), node, format, mode);
+#else
+ jsValueResult(regs.tagGPR(), regs.payloadGPR(), node, format, mode);
+#endif
+ }
+ void storageResult(GPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
{
- Node& node = at(nodeIndex);
if (mode == CallUseChildren)
useChildren(node);
- VirtualRegister virtualRegister = node.virtualRegister();
+ VirtualRegister virtualRegister = node->virtualRegister();
m_gprs.retain(reg, virtualRegister, SpillOrderStorage);
- GenerationInfo& info = m_generationInfo[virtualRegister];
- info.initStorage(nodeIndex, node.refCount(), reg);
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
+ info.initStorage(node, node->refCount(), reg);
}
- void doubleResult(FPRReg reg, NodeIndex nodeIndex, UseChildrenMode mode = CallUseChildren)
+ void doubleResult(FPRReg reg, Node* node, UseChildrenMode mode = CallUseChildren)
{
- Node& node = at(nodeIndex);
if (mode == CallUseChildren)
useChildren(node);
- VirtualRegister virtualRegister = node.virtualRegister();
+ VirtualRegister virtualRegister = node->virtualRegister();
m_fprs.retain(reg, virtualRegister, SpillOrderDouble);
- GenerationInfo& info = m_generationInfo[virtualRegister];
- info.initDouble(nodeIndex, node.refCount(), reg);
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
+ info.initDouble(node, node->refCount(), reg);
}
- void initConstantInfo(NodeIndex nodeIndex)
+ void initConstantInfo(Node* node)
{
- ASSERT(isInt32Constant(nodeIndex) || isNumberConstant(nodeIndex) || isJSConstant(nodeIndex));
- Node& node = at(nodeIndex);
- m_generationInfo[node.virtualRegister()].initConstant(nodeIndex, node.refCount());
+ ASSERT(node->hasConstant());
+ generationInfo(node).initConstant(node, node->refCount());
}
// These methods add calls to C++ helper functions.
// deal with the fact that a JSValue may be passed in one or two
// machine registers, and delegate the calling convention specific
// decision as to how to fill the regsiters to setupArguments* methods.
-#if USE(JSVALUE64)
- JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, void* pointer)
+
+ JITCompiler::Call callOperation(V_JITOperation_E operation)
{
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
- return appendCallWithExceptionCheckSetResult(operation, result);
+ m_jit.setupArgumentsExecState();
+ return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1)
+ JITCompiler::Call callOperation(P_JITOperation_E operation, GPRReg result)
{
- m_jit.setupArguments(arg1);
- JITCompiler::Call call = m_jit.appendCall(operation);
- m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
- return call;
+ m_jit.setupArgumentsExecState();
+ return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EGI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
+ JITCompiler::Call callOperation(P_JITOperation_EC operation, GPRReg result, GPRReg cell)
{
- m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(cell);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg result, Identifier* identifier)
+ JITCompiler::Call callOperation(P_JITOperation_EO operation, GPRReg result, GPRReg object)
{
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(object);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg result, GPRReg arg1)
+ JITCompiler::Call callOperation(P_JITOperation_EOS operation, GPRReg result, GPRReg object, size_t size)
{
- m_jit.setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg result, void* pointer, size_t size)
+ JITCompiler::Call callOperation(P_JITOperation_EOZ operation, GPRReg result, GPRReg object, int32_t size)
{
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
+ m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(size));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg result, int startConstant, int numConstants)
+ JITCompiler::Call callOperation(C_JITOperation_EOZ operation, GPRReg result, GPRReg object, int32_t size)
{
- m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants));
+ m_jit.setupArgumentsWithExecState(object, TrustedImmPtr(static_cast<size_t>(size)));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, void* pointer)
+ JITCompiler::Call callOperation(P_JITOperation_EPS operation, GPRReg result, GPRReg old, size_t size)
{
- m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
+ m_jit.setupArgumentsWithExecState(old, TrustedImmPtr(size));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
+ JITCompiler::Call callOperation(P_JITOperation_ES operation, GPRReg result, size_t size)
{
- m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(size));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg result, GPRReg arg1, Identifier* identifier)
+ JITCompiler::Call callOperation(P_JITOperation_ESJss operation, GPRReg result, size_t index, GPRReg arg1)
{
- m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(index), arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ JITCompiler::Call callOperation(P_JITOperation_ESt operation, GPRReg result, Structure* structure)
{
- m_jit.setupArgumentsWithExecState(arg1, arg2);
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg result, GPRReg arg1)
+ JITCompiler::Call callOperation(P_JITOperation_EStZ operation, GPRReg result, Structure* structure, GPRReg arg2)
{
- m_jit.setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result)
+ JITCompiler::Call callOperation(P_JITOperation_EStZ operation, GPRReg result, Structure* structure, size_t arg2)
{
- m_jit.setupArgumentsExecState();
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(arg2));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, GPRReg arg1)
+ JITCompiler::Call callOperation(P_JITOperation_EStZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
{
- m_jit.setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, JSCell* cell)
+ JITCompiler::Call callOperation(P_JITOperation_EStPS operation, GPRReg result, Structure* structure, void* pointer, size_t size)
{
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(pointer), TrustedImmPtr(size));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(C_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell)
+ JITCompiler::Call callOperation(P_JITOperation_EStSS operation, GPRReg result, Structure* structure, size_t index, size_t size)
{
- m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImmPtr(index), TrustedImmPtr(size));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(S_DFGOperation_J operation, GPRReg result, GPRReg arg1)
+ JITCompiler::Call callOperation(C_JITOperation_E operation, GPRReg result)
{
- m_jit.setupArguments(arg1);
- return appendCallSetResult(operation, result);
+ m_jit.setupArgumentsExecState();
+ return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1)
+ JITCompiler::Call callOperation(C_JITOperation_EC operation, GPRReg result, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ JITCompiler::Call callOperation(C_JITOperation_EC operation, GPRReg result, JSCell* cell)
{
- m_jit.setupArgumentsWithExecState(arg1, arg2);
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(S_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ JITCompiler::Call callOperation(C_JITOperation_ECZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
{
m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ JITCompiler::Call callOperation(C_JITOperation_ECZC operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
- m_jit.setupArgumentsWithExecState(arg1, arg2);
+ m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::TrustedImm32 imm)
+ JITCompiler::Call callOperation(C_JITOperation_EJscC operation, GPRReg result, GPRReg arg1, JSCell* cell)
{
- m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::TrustedImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))));
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg result, MacroAssembler::TrustedImm32 imm, GPRReg arg2)
+ JITCompiler::Call callOperation(C_JITOperation_EIcf operation, GPRReg result, InlineCallFrame* inlineCallFrame)
{
- m_jit.setupArgumentsWithExecState(MacroAssembler::TrustedImmPtr(static_cast<const void*>(JSValue::encode(jsNumber(imm.m_value)))), arg2);
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(inlineCallFrame));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ JITCompiler::Call callOperation(C_JITOperation_ESt operation, GPRReg result, Structure* structure)
{
- m_jit.setupArgumentsWithExecState(arg1, arg2);
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ JITCompiler::Call callOperation(C_JITOperation_EStJscSymtab operation, GPRReg result, Structure* structure, GPRReg scope, SymbolTable* table)
{
- m_jit.setupArgumentsWithExecState(arg1, arg2);
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), scope, TrustedImmPtr(table));
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(V_DFGOperation_EC operation, GPRReg arg1)
+ JITCompiler::Call callOperation(C_JITOperation_EStZ operation, GPRReg result, Structure* structure, unsigned knownLength)
{
- m_jit.setupArgumentsWithExecState(arg1);
- return appendCallWithExceptionCheck(operation);
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(knownLength));
+ return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1, GPRReg arg2, void* pointer)
+ JITCompiler::Call callOperation(C_JITOperation_EStZZ operation, GPRReg result, Structure* structure, unsigned knownLength, unsigned minCapacity)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), TrustedImm32(knownLength), TrustedImm32(minCapacity));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_JITOperation_EStZ operation, GPRReg result, Structure* structure, GPRReg length)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), length);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_JITOperation_EStZZ operation, GPRReg result, Structure* structure, GPRReg length, unsigned minCapacity)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), length, TrustedImm32(minCapacity));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_JITOperation_EJssSt operation, GPRReg result, GPRReg arg1, Structure* structure)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(structure));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_JITOperation_EJssJss operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_JITOperation_EJssJssJss operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(S_JITOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(S_JITOperation_EGC operation, GPRReg result, JSGlobalObject* globalObject, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(globalObject), arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(C_JITOperation_EGC operation, GPRReg result, JSGlobalObject* globalObject, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(globalObject), arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(Jss_JITOperation_EZ operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(V_JITOperation_EC operation, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheck(operation);
+ }
+
+ JITCompiler::Call callOperation(V_JITOperation_EC operation, JSCell* arg1)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(arg1));
+ return appendCallWithExceptionCheck(operation);
+ }
+
+ JITCompiler::Call callOperation(V_JITOperation_ECIcf operation, GPRReg arg1, InlineCallFrame* inlineCallFrame)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(inlineCallFrame));
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_JITOperation_ECCIcf operation, GPRReg arg1, GPRReg arg2, InlineCallFrame* inlineCallFrame)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(inlineCallFrame));
+ return appendCallWithExceptionCheck(operation);
+ }
+
+ JITCompiler::Call callOperation(V_JITOperation_ECZ operation, GPRReg arg1, int arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_JITOperation_ECC operation, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_JITOperation_ECC operation, GPRReg arg1, JSCell* arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2));
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_JITOperation_ECC operation, JSCell* arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(arg1), arg2);
+ return appendCallWithExceptionCheck(operation);
+ }
+
+ JITCompiler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb operation, void* pointer)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
+ return appendCallWithCallFrameRollbackOnException(operation);
+ }
+
+ JITCompiler::Call callOperationWithCallFrameRollbackOnException(Z_JITOperation_E operation, GPRReg result)
+ {
+ m_jit.setupArgumentsExecState();
+ return appendCallWithCallFrameRollbackOnExceptionSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(Z_JITOperation_EC operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ template<typename FunctionType>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag)
+ {
+ return callOperation(operation);
+ }
+ template<typename FunctionType, typename ArgumentType1>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1)
+ {
+ return callOperation(operation, arg1);
+ }
+ template<typename FunctionType, typename ArgumentType1, typename ArgumentType2>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2)
+ {
+ return callOperation(operation, arg1, arg2);
+ }
+ template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3)
+ {
+ return callOperation(operation, arg1, arg2, arg3);
+ }
+ template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3, ArgumentType4 arg4)
+ {
+ return callOperation(operation, arg1, arg2, arg3, arg4);
+ }
+ template<typename FunctionType, typename ArgumentType1, typename ArgumentType2, typename ArgumentType3, typename ArgumentType4, typename ArgumentType5>
+ JITCompiler::Call callOperation(FunctionType operation, NoResultTag, ArgumentType1 arg1, ArgumentType2 arg2, ArgumentType3 arg3, ArgumentType4 arg4, ArgumentType5 arg5)
+ {
+ return callOperation(operation, arg1, arg2, arg3, arg4, arg5);
+ }
+
+ JITCompiler::Call callOperation(D_JITOperation_ZZ operation, FPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArguments(arg1, arg2);
+ return appendCallSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(D_JITOperation_D operation, FPRReg result, FPRReg arg1)
+ {
+ m_jit.setupArguments(arg1);
+ return appendCallSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(D_JITOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
+ {
+ m_jit.setupArguments(arg1, arg2);
+ return appendCallSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(T_JITOperation_EJss operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_JITOperation_EJscZ operation, GPRReg result, GPRReg arg1, int32_t arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_JITOperation_EZ operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_JITOperation_EZ operation, GPRReg result, int32_t arg1)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(J_JITOperation_EJscC operation, GPRReg result, GPRReg arg1, JSCell* cell)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(V_JITOperation_EWs operation, WatchpointSet* watchpointSet)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(watchpointSet));
+ return appendCall(operation);
+ }
+
+#if USE(JSVALUE64)
+ JITCompiler::Call callOperation(J_JITOperation_E operation, GPRReg result)
+ {
+ m_jit.setupArgumentsExecState();
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg result, void* pointer)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(Z_JITOperation_D operation, GPRReg result, FPRReg arg1)
+ {
+ m_jit.setupArguments(arg1);
+ JITCompiler::Call call = m_jit.appendCall(operation);
+ m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
+ return call;
+ }
+ JITCompiler::Call callOperation(Q_JITOperation_J operation, GPRReg result, GPRReg value)
+ {
+ m_jit.setupArguments(value);
+ return appendCallSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(Q_JITOperation_D operation, GPRReg result, FPRReg value)
+ {
+ m_jit.setupArguments(value);
+ return appendCallSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg result, UniquedStringImpl* uid)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EA operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EAZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJssZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EPS operation, GPRReg result, void* pointer, size_t size)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_ESS operation, GPRReg result, int startConstant, int numConstants)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EPP operation, GPRReg result, GPRReg arg1, void* pointer)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EC operation, GPRReg result, JSCell* cell)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_ECZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, const UniquedStringImpl* uid)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg result, StructureStubInfo* stubInfo, GPRReg arg1, UniquedStringImpl* uid)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EDA operation, GPRReg result, FPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJA operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg result, int32_t arg1)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EZZ operation, GPRReg result, int32_t arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EZIcfZ operation, GPRReg result, int32_t arg1, InlineCallFrame* inlineCallFrame, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImmPtr(inlineCallFrame), arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(P_JITOperation_EJS operation, GPRReg result, GPRReg value, size_t index)
+ {
+ m_jit.setupArgumentsWithExecState(value, TrustedImmPtr(index));
+ return appendCallSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(P_JITOperation_EStJ operation, GPRReg result, Structure* structure, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(C_JITOperation_EJ operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_JITOperation_EJJC operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_JITOperation_EJZ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(C_JITOperation_EJZC operation, GPRReg result, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(S_JITOperation_J operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArguments(arg1);
+ return appendCallSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(S_JITOperation_EJ operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJ operation, GPRReg result, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(S_JITOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(J_JITOperation_EPP operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg result, GPRReg arg1, MacroAssembler::TrustedImm32 imm)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value))));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg result, MacroAssembler::TrustedImm32 imm, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(MacroAssembler::TrustedImm64(JSValue::encode(jsNumber(imm.m_value))), arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg result, GPRReg arg1, JSValueRegs arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2.gpr());
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(V_JITOperation_EOZD operation, GPRReg arg1, GPRReg arg2, FPRReg arg3)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_JITOperation_EJ operation, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheck(operation);
+ }
+ JITCompiler::Call callOperation(V_JITOperation_EJPP operation, GPRReg arg1, GPRReg arg2, void* pointer)
{
m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(pointer));
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1, GPRReg arg2, Identifier* identifier)
+ JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1, GPRReg arg2, UniquedStringImpl* uid)
{
- m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, arg2, TrustedImmPtr(uid));
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_DFGOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ JITCompiler::Call callOperation(V_JITOperation_EJJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ JITCompiler::Call callOperation(V_JITOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_DFGOperation_EAZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+
+ JITCompiler::Call callOperation(V_JITOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ JITCompiler::Call callOperation(V_JITOperation_ECJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
m_jit.setupArgumentsWithExecState(arg1, arg2, arg3);
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1)
+
+ JITCompiler::Call callOperation(D_JITOperation_EJ operation, FPRReg result, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(D_DFGOperation_ZZ operation, FPRReg result, GPRReg arg1, GPRReg arg2)
+
+ JITCompiler::Call callOperation(Z_JITOperation_EJZZ operation, GPRReg result, GPRReg arg1, unsigned arg2, unsigned arg3)
{
- m_jit.setupArguments(arg1, arg2);
- return appendCallSetResult(operation, result);
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2), TrustedImm32(arg3));
+ return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
+ JITCompiler::Call callOperation(F_JITOperation_EFJZZ operation, GPRReg result, GPRReg arg1, GPRReg arg2, unsigned arg3, GPRReg arg4)
{
- m_jit.setupArguments(arg1, arg2);
- return appendCallSetResult(operation, result);
+ m_jit.setupArgumentsWithExecState(arg1, arg2, TrustedImm32(arg3), arg4);
+ return appendCallWithExceptionCheckSetResult(operation, result);
}
+ JITCompiler::Call callOperation(Z_JITOperation_EJZ operation, GPRReg result, GPRReg arg1, unsigned arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(V_JITOperation_EZJZZZ operation, unsigned arg1, GPRReg arg2, unsigned arg3, GPRReg arg4, unsigned arg5)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2, TrustedImm32(arg3), arg4, TrustedImm32(arg5));
+ return appendCallWithExceptionCheck(operation);
+ }
+#else // USE(JSVALUE32_64)
+
+// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
+// To avoid assemblies from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
+#if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
+#define EABI_32BIT_DUMMY_ARG TrustedImm32(0),
+#else
+#define EABI_32BIT_DUMMY_ARG
+#endif
+
+// JSVALUE32_64 is a 64-bit integer that cannot be put half in an argument register and half on stack when using SH4 architecture.
+// To avoid this, let's occupy the 4th argument register (r7) with a dummy argument when necessary. This must only be done when there
+// is no other 32-bit value argument behind this 64-bit JSValue.
+#if CPU(SH4)
+#define SH4_32BIT_DUMMY_ARG TrustedImm32(0),
#else
- JITCompiler::Call callOperation(Z_DFGOperation_D operation, GPRReg result, FPRReg arg1)
+#define SH4_32BIT_DUMMY_ARG
+#endif
+
+ JITCompiler::Call callOperation(Z_JITOperation_D operation, GPRReg result, FPRReg arg1)
{
prepareForExternalCall();
m_jit.setupArguments(arg1);
m_jit.zeroExtend32ToPtr(GPRInfo::returnValueGPR, result);
return call;
}
- JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, void* pointer)
+ JITCompiler::Call callOperation(J_JITOperation_E operation, GPRReg resultTag, GPRReg resultPayload)
+ {
+ m_jit.setupArgumentsExecState();
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, void* pointer)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EPP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, void* pointer)
+ JITCompiler::Call callOperation(J_JITOperation_EPP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, void* pointer)
{
m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(pointer));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EGI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, Identifier* identifier)
+ JITCompiler::Call callOperation(J_JITOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
+ {
+ m_jit.setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, UniquedStringImpl* uid)
{
- m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
+ JITCompiler::Call callOperation(J_JITOperation_EA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EI operation, GPRReg resultTag, GPRReg resultPayload, Identifier* identifier)
+ JITCompiler::Call callOperation(J_JITOperation_EAZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
{
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
+ JITCompiler::Call callOperation(J_JITOperation_EJ operation, GPRReg resultPayload, GPRReg resultTag, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EPS operation, GPRReg resultTag, GPRReg resultPayload, void* pointer, size_t size)
+ JITCompiler::Call callOperation(J_JITOperation_EJC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJssZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EPS operation, GPRReg resultTag, GPRReg resultPayload, void* pointer, size_t size)
{
m_jit.setupArgumentsWithExecState(TrustedImmPtr(pointer), TrustedImmPtr(size));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_ESS operation, GPRReg resultTag, GPRReg resultPayload, int startConstant, int numConstants)
+ JITCompiler::Call callOperation(J_JITOperation_ESS operation, GPRReg resultTag, GPRReg resultPayload, int startConstant, int numConstants)
{
m_jit.setupArgumentsWithExecState(TrustedImm32(startConstant), TrustedImm32(numConstants));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, void* pointer)
+ JITCompiler::Call callOperation(J_JITOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, void* pointer)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, TrustedImmPtr(pointer));
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImmPtr(pointer));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
+ JITCompiler::Call callOperation(J_JITOperation_EJP operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2);
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_ECI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, Identifier* identifier)
+
+ JITCompiler::Call callOperation(J_JITOperation_EC operation, GPRReg resultTag, GPRReg resultPayload, JSCell* cell)
{
- m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, Identifier* identifier)
+ JITCompiler::Call callOperation(J_JITOperation_ECZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJI operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1Tag, GPRReg arg1Payload, Identifier* identifier)
+ JITCompiler::Call callOperation(J_JITOperation_EJscC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, JSCell* cell)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, TrustedImm32(arg1Tag), TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
+ JITCompiler::Call callOperation(J_JITOperation_ESsiCI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1, const UniquedStringImpl* uid)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2);
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload)
+ JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag);
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(C_DFGOperation_E operation, GPRReg result)
+ JITCompiler::Call callOperation(J_JITOperation_ESsiJI operation, GPRReg resultTag, GPRReg resultPayload, StructureStubInfo* stubInfo, int32_t arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
{
- m_jit.setupArgumentsExecState();
- return appendCallWithExceptionCheckSetResult(operation, result);
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, TrustedImm32(arg1Tag), TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EDA operation, GPRReg resultTag, GPRReg resultPayload, FPRReg arg1, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJA operation, GPRReg resultTag, GPRReg resultPayload, TrustedImm32 arg1Tag, GPRReg arg1Payload, GPRReg arg2)
+ {
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_EJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload)
+ {
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, GPRReg arg1)
+ JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1)
{
m_jit.setupArgumentsWithExecState(arg1);
- return appendCallWithExceptionCheckSetResult(operation, result);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(C_DFGOperation_EC operation, GPRReg result, JSCell* cell)
+ JITCompiler::Call callOperation(J_JITOperation_EZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1)
{
- m_jit.setupArgumentsWithExecState(TrustedImmPtr(cell));
- return appendCallWithExceptionCheckSetResult(operation, result);
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(C_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, JSCell* cell)
+ JITCompiler::Call callOperation(J_JITOperation_EZIcfZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1, InlineCallFrame* inlineCallFrame, GPRReg arg2)
{
- m_jit.setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
- return appendCallWithExceptionCheckSetResult(operation, result);
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImmPtr(inlineCallFrame), arg2);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(S_DFGOperation_J operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
+ JITCompiler::Call callOperation(J_JITOperation_EZZ operation, GPRReg resultTag, GPRReg resultPayload, int32_t arg1, GPRReg arg2)
{
- m_jit.setupArguments(arg1Payload, arg1Tag);
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2);
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+
+ JITCompiler::Call callOperation(P_JITOperation_EJS operation, GPRReg result, JSValueRegs value, size_t index)
+ {
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG value.payloadGPR(), value.tagGPR(), TrustedImmPtr(index));
return appendCallSetResult(operation, result);
}
- JITCompiler::Call callOperation(S_DFGOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
+
+ JITCompiler::Call callOperation(P_JITOperation_EStJ operation, GPRReg result, Structure* structure, GPRReg arg2Tag, GPRReg arg2Payload)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag);
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(structure), arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(S_DFGOperation_ECC operation, GPRReg result, GPRReg arg1, GPRReg arg2)
+
+ JITCompiler::Call callOperation(C_JITOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
{
- m_jit.setupArgumentsWithExecState(arg1, arg2);
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(S_DFGOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
+ JITCompiler::Call callOperation(S_JITOperation_J operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
+ {
+ m_jit.setupArguments(arg1Payload, arg1Tag);
+ return appendCallSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(S_JITOperation_EJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
+ {
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+
+ JITCompiler::Call callOperation(S_JITOperation_EJJ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag);
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
+ JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2Payload, arg2Tag);
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, MacroAssembler::TrustedImm32 imm)
+ JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1Tag, GPRReg arg1Payload, MacroAssembler::TrustedImm32 imm)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, imm, TrustedImm32(JSValue::Int32Tag));
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG imm, TrustedImm32(JSValue::Int32Tag));
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, MacroAssembler::TrustedImm32 imm, GPRReg arg2Tag, GPRReg arg2Payload)
+ JITCompiler::Call callOperation(J_JITOperation_EJJ operation, GPRReg resultTag, GPRReg resultPayload, MacroAssembler::TrustedImm32 imm, GPRReg arg2Tag, GPRReg arg2Payload)
{
- m_jit.setupArgumentsWithExecState(imm, TrustedImm32(JSValue::Int32Tag), arg2Payload, arg2Tag);
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG imm, TrustedImm32(JSValue::Int32Tag), SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload)
+
+ JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload)
{
m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(J_DFGOperation_ECC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
+ JITCompiler::Call callOperation(J_JITOperation_ECJ operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2Payload)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2Payload, MacroAssembler::TrustedImm32(JSValue::CellTag));
+ return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
+ }
+ JITCompiler::Call callOperation(J_JITOperation_ECJ operation, JSValueRegs result, GPRReg arg1, JSValueRegs arg2)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2.payloadGPR(), arg2.tagGPR());
+ return appendCallWithExceptionCheckSetResult(operation, result.payloadGPR(), result.tagGPR());
+ }
+ JITCompiler::Call callOperation(J_JITOperation_ECC operation, GPRReg resultTag, GPRReg resultPayload, GPRReg arg1, GPRReg arg2)
{
m_jit.setupArgumentsWithExecState(arg1, arg2);
return appendCallWithExceptionCheckSetResult(operation, resultPayload, resultTag);
}
- JITCompiler::Call callOperation(V_DFGOperation_EC operation, GPRReg arg1)
+
+ JITCompiler::Call callOperation(V_JITOperation_EOZD operation, GPRReg arg1, GPRReg arg2, FPRReg arg3)
{
- m_jit.setupArgumentsWithExecState(arg1);
+ m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG arg3);
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_DFGOperation_EJPP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, void* pointer)
+
+ JITCompiler::Call callOperation(V_JITOperation_EJ operation, GPRReg arg1Tag, GPRReg arg1Payload)
+ {
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
+ return appendCallWithExceptionCheck(operation);
+ }
+
+ JITCompiler::Call callOperation(V_JITOperation_EJPP operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, void* pointer)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer));
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, TrustedImmPtr(pointer));
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_DFGOperation_EJCI operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, Identifier* identifier)
+ JITCompiler::Call callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Payload, UniquedStringImpl* uid)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag, arg2, TrustedImmPtr(identifier));
+ m_jit.setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, arg2Payload, TrustedImm32(JSValue::CellTag), TrustedImmPtr(uid));
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_DFGOperation_ECJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload)
+ JITCompiler::Call callOperation(V_JITOperation_ECJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload)
{
m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag);
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_DFGOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
+
+ JITCompiler::Call callOperation(V_JITOperation_EPZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
+ {
+ m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG SH4_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
+ return appendCallWithExceptionCheck(operation);
+ }
+
+ JITCompiler::Call callOperation(V_JITOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
{
- m_jit.setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag);
+ m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG SH4_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(V_DFGOperation_EAZJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
+ JITCompiler::Call callOperation(V_JITOperation_EOZJ operation, GPRReg arg1, GPRReg arg2, TrustedImm32 arg3Tag, GPRReg arg3Payload)
{
- m_jit.setupArgumentsWithExecState(arg1, arg2, arg3Payload, arg3Tag);
+ m_jit.setupArgumentsWithExecState(arg1, arg2, EABI_32BIT_DUMMY_ARG SH4_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
return appendCallWithExceptionCheck(operation);
}
- JITCompiler::Call callOperation(D_DFGOperation_EJ operation, FPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
+ JITCompiler::Call callOperation(D_JITOperation_EJ operation, FPRReg result, GPRReg arg1Tag, GPRReg arg1Payload)
{
- m_jit.setupArgumentsWithExecState(arg1Payload, arg1Tag);
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(D_DFGOperation_ZZ operation, FPRReg result, GPRReg arg1, GPRReg arg2)
+ JITCompiler::Call callOperation(Z_JITOperation_EJZZ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, unsigned arg2, unsigned arg3)
{
- m_jit.setupArguments(arg1, arg2);
- return appendCallSetResult(operation, result);
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2), TrustedImm32(arg3));
+ return appendCallWithExceptionCheckSetResult(operation, result);
}
- JITCompiler::Call callOperation(D_DFGOperation_DD operation, FPRReg result, FPRReg arg1, FPRReg arg2)
+ JITCompiler::Call callOperation(F_JITOperation_EFJZZ operation, GPRReg result, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, unsigned arg3, GPRReg arg4)
{
- m_jit.setupArguments(arg1, arg2);
- return appendCallSetResult(operation, result);
+ m_jit.setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4);
+ return appendCallWithExceptionCheckSetResult(operation, result);
}
-#endif
+ JITCompiler::Call callOperation(Z_JITOperation_EJZ operation, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, unsigned arg2)
+ {
+ m_jit.setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2));
+ return appendCallWithExceptionCheckSetResult(operation, result);
+ }
+ JITCompiler::Call callOperation(V_JITOperation_EZJZZZ operation, unsigned arg1, GPRReg arg2Tag, GPRReg arg2Payload, unsigned arg3, GPRReg arg4, unsigned arg5)
+ {
+ m_jit.setupArgumentsWithExecState(TrustedImm32(arg1), arg2Payload, arg2Tag, TrustedImm32(arg3), arg4, TrustedImm32(arg5));
+ return appendCallWithExceptionCheck(operation);
+ }
+#undef EABI_32BIT_DUMMY_ARG
+#undef SH4_32BIT_DUMMY_ARG
+
+ template<typename FunctionType>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result)
+ {
+ return callOperation(operation, result.tagGPR(), result.payloadGPR());
+ }
+ template<typename FunctionType, typename ArgumentType1>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result, ArgumentType1 arg1)
+ {
+ return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1);
+ }
+ template<typename FunctionType, typename ArgumentType1, typename ArgumentType2>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2)
+ {
+ return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2);
+ }
+ template<
+ typename FunctionType, typename ArgumentType1, typename ArgumentType2,
+ typename ArgumentType3>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2,
+ ArgumentType3 arg3)
+ {
+ return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3);
+ }
+ template<
+ typename FunctionType, typename ArgumentType1, typename ArgumentType2,
+ typename ArgumentType3, typename ArgumentType4>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2,
+ ArgumentType3 arg3, ArgumentType4 arg4)
+ {
+ return callOperation(operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3, arg4);
+ }
+ template<
+ typename FunctionType, typename ArgumentType1, typename ArgumentType2,
+ typename ArgumentType3, typename ArgumentType4, typename ArgumentType5>
+ JITCompiler::Call callOperation(
+ FunctionType operation, JSValueRegs result, ArgumentType1 arg1, ArgumentType2 arg2,
+ ArgumentType3 arg3, ArgumentType4 arg4, ArgumentType5 arg5)
+ {
+ return callOperation(
+ operation, result.tagGPR(), result.payloadGPR(), arg1, arg2, arg3, arg4, arg5);
+ }
+#endif // USE(JSVALUE32_64)
-#if !defined(NDEBUG) && !CPU(ARM_THUMB2)
+#if !defined(NDEBUG) && !CPU(ARM) && !CPU(MIPS) && !CPU(SH4)
void prepareForExternalCall()
{
+ // We're about to call out to a "native" helper function. The helper
+ // function is expected to set topCallFrame itself with the ExecState
+ // that is passed to it.
+ //
+ // We explicitly trash topCallFrame here so that we'll know if some of
+ // the helper functions are not setting topCallFrame when they should
+ // be doing so. Note: the previous value in topcallFrame was not valid
+ // anyway since it was not being updated by JIT'ed code by design.
+
for (unsigned i = 0; i < sizeof(void*) / 4; i++)
- m_jit.store32(TrustedImm32(0xbadbeef), reinterpret_cast<char*>(&m_jit.globalData()->topCallFrame) + i * 4);
+ m_jit.store32(TrustedImm32(0xbadbeef), reinterpret_cast<char*>(&m_jit.vm()->topCallFrame) + i * 4);
}
#else
void prepareForExternalCall() { }
JITCompiler::Call appendCallWithExceptionCheck(const FunctionPtr& function)
{
prepareForExternalCall();
- CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
- CallBeginToken token = m_jit.beginCall();
+ m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
+ JITCompiler::Call call = m_jit.appendCall(function);
+ m_jit.exceptionCheck();
+ return call;
+ }
+ JITCompiler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr& function)
+ {
+ prepareForExternalCall();
+ m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
JITCompiler::Call call = m_jit.appendCall(function);
- m_jit.addExceptionCheck(call, codeOrigin, token);
+ m_jit.exceptionCheckWithCallFrameRollback();
return call;
}
JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result)
{
JITCompiler::Call call = appendCallWithExceptionCheck(function);
- m_jit.move(GPRInfo::returnValueGPR, result);
+ if ((result != InvalidGPRReg) && (result != GPRInfo::returnValueGPR))
+ m_jit.move(GPRInfo::returnValueGPR, result);
+ return call;
+ }
+ JITCompiler::Call appendCallWithCallFrameRollbackOnExceptionSetResult(const FunctionPtr& function, GPRReg result)
+ {
+ JITCompiler::Call call = appendCallWithCallFrameRollbackOnException(function);
+ if ((result != InvalidGPRReg) && (result != GPRInfo::returnValueGPR))
+ m_jit.move(GPRInfo::returnValueGPR, result);
return call;
}
JITCompiler::Call appendCallSetResult(const FunctionPtr& function, GPRReg result)
{
prepareForExternalCall();
+ m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
JITCompiler::Call call = m_jit.appendCall(function);
- m_jit.move(GPRInfo::returnValueGPR, result);
+ if (result != InvalidGPRReg)
+ m_jit.move(GPRInfo::returnValueGPR, result);
return call;
}
+ JITCompiler::Call appendCall(const FunctionPtr& function)
+ {
+ prepareForExternalCall();
+ m_jit.emitStoreCodeOrigin(m_currentNode->origin.semantic);
+ return m_jit.appendCall(function);
+ }
JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, GPRReg result1, GPRReg result2)
{
JITCompiler::Call call = appendCallWithExceptionCheck(function);
JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
{
JITCompiler::Call call = appendCallWithExceptionCheck(function);
- m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister);
- m_jit.loadDouble(JITCompiler::stackPointerRegister, result);
+ if (result != InvalidFPRReg) {
+ m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister);
+ m_jit.loadDouble(JITCompiler::stackPointerRegister, result);
+ }
return call;
}
JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
{
- JITCompiler::Call call = m_jit.appendCall(function);
- m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister);
- m_jit.loadDouble(JITCompiler::stackPointerRegister, result);
+ JITCompiler::Call call = appendCall(function);
+ if (result != InvalidFPRReg) {
+ m_jit.assembler().fstpl(0, JITCompiler::stackPointerRegister);
+ m_jit.loadDouble(JITCompiler::stackPointerRegister, result);
+ }
return call;
}
-#elif CPU(ARM)
+#elif CPU(ARM) && !CPU(ARM_HARDFP)
JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
{
JITCompiler::Call call = appendCallWithExceptionCheck(function);
- m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
+ if (result != InvalidFPRReg)
+ m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
return call;
}
JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
{
- JITCompiler::Call call = m_jit.appendCall(function);
- m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
+ JITCompiler::Call call = appendCall(function);
+ if (result != InvalidFPRReg)
+ m_jit.assembler().vmov(result, GPRInfo::returnValueGPR, GPRInfo::returnValueGPR2);
return call;
}
-#else
+#else // CPU(X86_64) || (CPU(ARM) && CPU(ARM_HARDFP)) || CPU(ARM64) || CPU(MIPS) || CPU(SH4)
JITCompiler::Call appendCallWithExceptionCheckSetResult(const FunctionPtr& function, FPRReg result)
{
JITCompiler::Call call = appendCallWithExceptionCheck(function);
- m_jit.moveDouble(FPRInfo::returnValueFPR, result);
+ if (result != InvalidFPRReg)
+ m_jit.moveDouble(FPRInfo::returnValueFPR, result);
return call;
}
JITCompiler::Call appendCallSetResult(const FunctionPtr& function, FPRReg result)
{
- JITCompiler::Call call = m_jit.appendCall(function);
- m_jit.moveDouble(FPRInfo::returnValueFPR, result);
+ JITCompiler::Call call = appendCall(function);
+ if (result != InvalidFPRReg)
+ m_jit.moveDouble(FPRInfo::returnValueFPR, result);
return call;
}
#endif
- void branchDouble(JITCompiler::DoubleCondition cond, FPRReg left, FPRReg right, BlockIndex destination)
+ void branchDouble(JITCompiler::DoubleCondition cond, FPRReg left, FPRReg right, BasicBlock* destination)
{
- if (!haveEdgeCodeToEmit(destination))
- return addBranch(m_jit.branchDouble(cond, left, right), destination);
-
- JITCompiler::Jump notTaken = m_jit.branchDouble(JITCompiler::invert(cond), left, right);
- emitEdgeCode(destination);
- addBranch(m_jit.jump(), destination);
- notTaken.link(&m_jit);
+ return addBranch(m_jit.branchDouble(cond, left, right), destination);
}
- void branchDoubleNonZero(FPRReg value, FPRReg scratch, BlockIndex destination)
+ void branchDoubleNonZero(FPRReg value, FPRReg scratch, BasicBlock* destination)
{
- if (!haveEdgeCodeToEmit(destination))
- return addBranch(m_jit.branchDoubleNonZero(value, scratch), destination);
-
- JITCompiler::Jump notTaken = m_jit.branchDoubleZeroOrNaN(value, scratch);
- emitEdgeCode(destination);
- addBranch(m_jit.jump(), destination);
- notTaken.link(&m_jit);
+ return addBranch(m_jit.branchDoubleNonZero(value, scratch), destination);
}
template<typename T, typename U>
- void branch32(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination)
+ void branch32(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
{
- if (!haveEdgeCodeToEmit(destination))
- return addBranch(m_jit.branch32(cond, left, right), destination);
-
- JITCompiler::Jump notTaken = m_jit.branch32(JITCompiler::invert(cond), left, right);
- emitEdgeCode(destination);
- addBranch(m_jit.jump(), destination);
- notTaken.link(&m_jit);
+ return addBranch(m_jit.branch32(cond, left, right), destination);
}
template<typename T, typename U>
- void branchTest32(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination)
+ void branchTest32(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination)
{
- ASSERT(JITCompiler::isInvertible(cond));
-
- if (!haveEdgeCodeToEmit(destination))
- return addBranch(m_jit.branchTest32(cond, value, mask), destination);
-
- JITCompiler::Jump notTaken = m_jit.branchTest32(JITCompiler::invert(cond), value, mask);
- emitEdgeCode(destination);
- addBranch(m_jit.jump(), destination);
- notTaken.link(&m_jit);
+ return addBranch(m_jit.branchTest32(cond, value, mask), destination);
}
template<typename T>
- void branchTest32(JITCompiler::ResultCondition cond, T value, BlockIndex destination)
+ void branchTest32(JITCompiler::ResultCondition cond, T value, BasicBlock* destination)
{
- ASSERT(JITCompiler::isInvertible(cond));
-
- if (!haveEdgeCodeToEmit(destination))
- return addBranch(m_jit.branchTest32(cond, value), destination);
-
- JITCompiler::Jump notTaken = m_jit.branchTest32(JITCompiler::invert(cond), value);
- emitEdgeCode(destination);
- addBranch(m_jit.jump(), destination);
- notTaken.link(&m_jit);
+ return addBranch(m_jit.branchTest32(cond, value), destination);
}
+#if USE(JSVALUE64)
template<typename T, typename U>
- void branchPtr(JITCompiler::RelationalCondition cond, T left, U right, BlockIndex destination)
+ void branch64(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
{
- if (!haveEdgeCodeToEmit(destination))
- return addBranch(m_jit.branchPtr(cond, left, right), destination);
-
- JITCompiler::Jump notTaken = m_jit.branchPtr(JITCompiler::invert(cond), left, right);
- emitEdgeCode(destination);
- addBranch(m_jit.jump(), destination);
- notTaken.link(&m_jit);
+ return addBranch(m_jit.branch64(cond, left, right), destination);
}
+#endif
template<typename T, typename U>
- void branchTestPtr(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination)
+ void branch8(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
{
- ASSERT(JITCompiler::isInvertible(cond));
-
- if (!haveEdgeCodeToEmit(destination))
- return addBranch(m_jit.branchTestPtr(cond, value, mask), destination);
-
- JITCompiler::Jump notTaken = m_jit.branchTestPtr(JITCompiler::invert(cond), value, mask);
- emitEdgeCode(destination);
- addBranch(m_jit.jump(), destination);
- notTaken.link(&m_jit);
+ return addBranch(m_jit.branch8(cond, left, right), destination);
+ }
+
+ template<typename T, typename U>
+ void branchPtr(JITCompiler::RelationalCondition cond, T left, U right, BasicBlock* destination)
+ {
+ return addBranch(m_jit.branchPtr(cond, left, right), destination);
+ }
+
+ template<typename T, typename U>
+ void branchTestPtr(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination)
+ {
+ return addBranch(m_jit.branchTestPtr(cond, value, mask), destination);
}
template<typename T>
- void branchTestPtr(JITCompiler::ResultCondition cond, T value, BlockIndex destination)
+ void branchTestPtr(JITCompiler::ResultCondition cond, T value, BasicBlock* destination)
{
- ASSERT(JITCompiler::isInvertible(cond));
-
- if (!haveEdgeCodeToEmit(destination))
- return addBranch(m_jit.branchTestPtr(cond, value), destination);
-
- JITCompiler::Jump notTaken = m_jit.branchTestPtr(JITCompiler::invert(cond), value);
- emitEdgeCode(destination);
- addBranch(m_jit.jump(), destination);
- notTaken.link(&m_jit);
+ return addBranch(m_jit.branchTestPtr(cond, value), destination);
}
template<typename T, typename U>
- void branchTest8(JITCompiler::ResultCondition cond, T value, U mask, BlockIndex destination)
+ void branchTest8(JITCompiler::ResultCondition cond, T value, U mask, BasicBlock* destination)
{
- ASSERT(JITCompiler::isInvertible(cond));
-
- if (!haveEdgeCodeToEmit(destination))
- return addBranch(m_jit.branchTest8(cond, value, mask), destination);
-
- JITCompiler::Jump notTaken = m_jit.branchTest8(JITCompiler::invert(cond), value, mask);
- emitEdgeCode(destination);
- addBranch(m_jit.jump(), destination);
- notTaken.link(&m_jit);
+ return addBranch(m_jit.branchTest8(cond, value, mask), destination);
}
template<typename T>
- void branchTest8(JITCompiler::ResultCondition cond, T value, BlockIndex destination)
+ void branchTest8(JITCompiler::ResultCondition cond, T value, BasicBlock* destination)
{
- ASSERT(JITCompiler::isInvertible(cond));
-
- if (!haveEdgeCodeToEmit(destination))
- return addBranch(m_jit.branchTest8(cond, value), destination);
-
- JITCompiler::Jump notTaken = m_jit.branchTest8(JITCompiler::invert(cond), value);
- emitEdgeCode(destination);
- addBranch(m_jit.jump(), destination);
- notTaken.link(&m_jit);
+ return addBranch(m_jit.branchTest8(cond, value), destination);
}
enum FallThroughMode {
AtFallThroughPoint,
ForceJump
};
- void jump(BlockIndex destination, FallThroughMode fallThroughMode = AtFallThroughPoint)
+ void jump(BasicBlock* destination, FallThroughMode fallThroughMode = AtFallThroughPoint)
{
- if (haveEdgeCodeToEmit(destination))
- emitEdgeCode(destination);
- if (destination == m_block + 1
+ if (destination == nextBlock()
&& fallThroughMode == AtFallThroughPoint)
return;
addBranch(m_jit.jump(), destination);
}
- inline bool haveEdgeCodeToEmit(BlockIndex)
- {
- return DFG_ENABLE_EDGE_CODE_VERIFICATION;
- }
- void emitEdgeCode(BlockIndex destination)
- {
- if (!DFG_ENABLE_EDGE_CODE_VERIFICATION)
- return;
- m_jit.move(TrustedImm32(destination), GPRInfo::regT0);
- }
-
- void addBranch(const MacroAssembler::Jump& jump, BlockIndex destination)
+ void addBranch(const MacroAssembler::Jump& jump, BasicBlock* destination)
{
m_branches.append(BranchRecord(jump, destination));
}
+ void addBranch(const MacroAssembler::JumpList& jump, BasicBlock* destination);
- void linkBranches()
- {
- for (size_t i = 0; i < m_branches.size(); ++i) {
- BranchRecord& branch = m_branches[i];
- branch.jump.linkTo(m_blockHeads[branch.destination], &m_jit);
- }
- }
-
- BasicBlock* block()
- {
- return m_jit.graph().m_blocks[m_block].get();
- }
+ void linkBranches();
-#ifndef NDEBUG
void dump(const char* label = 0);
-#endif
-#if DFG_ENABLE(CONSISTENCY_CHECK)
- void checkConsistency();
-#else
- void checkConsistency() { }
+ bool betterUseStrictInt52(Node* node)
+ {
+ return !generationInfo(node).isInt52();
+ }
+ bool betterUseStrictInt52(Edge edge)
+ {
+ return betterUseStrictInt52(edge.node());
+ }
+
+ bool compare(Node*, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_JITOperation_EJJ);
+ bool compilePeepHoleBranch(Node*, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_JITOperation_EJJ);
+ void compilePeepHoleInt32Branch(Node*, Node* branchNode, JITCompiler::RelationalCondition);
+ void compilePeepHoleInt52Branch(Node*, Node* branchNode, JITCompiler::RelationalCondition);
+ void compilePeepHoleBooleanBranch(Node*, Node* branchNode, JITCompiler::RelationalCondition);
+ void compilePeepHoleDoubleBranch(Node*, Node* branchNode, JITCompiler::DoubleCondition);
+ void compilePeepHoleObjectEquality(Node*, Node* branchNode);
+ void compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode);
+ void compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode);
+ void compileObjectEquality(Node*);
+ void compileObjectStrictEquality(Edge objectChild, Edge otherChild);
+ void compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild);
+ void compileObjectOrOtherLogicalNot(Edge value);
+ void compileLogicalNot(Node*);
+ void compileStringEquality(
+ Node*, GPRReg leftGPR, GPRReg rightGPR, GPRReg lengthGPR,
+ GPRReg leftTempGPR, GPRReg rightTempGPR, GPRReg leftTemp2GPR,
+ GPRReg rightTemp2GPR, JITCompiler::JumpList fastTrue,
+ JITCompiler::JumpList fastSlow);
+ void compileStringEquality(Node*);
+ void compileStringIdentEquality(Node*);
+ void compileStringToUntypedEquality(Node*, Edge stringEdge, Edge untypedEdge);
+ void compileStringIdentToNotStringVarEquality(Node*, Edge stringEdge, Edge notStringVarEdge);
+ void compileStringZeroLength(Node*);
+ void compileMiscStrictEq(Node*);
+
+ void emitObjectOrOtherBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken);
+ void emitStringBranch(Edge value, BasicBlock* taken, BasicBlock* notTaken);
+ void emitBranch(Node*);
+
+ struct StringSwitchCase {
+ StringSwitchCase() { }
+
+ StringSwitchCase(StringImpl* string, BasicBlock* target)
+ : string(string)
+ , target(target)
+ {
+ }
+
+ bool operator<(const StringSwitchCase& other) const
+ {
+ return stringLessThan(*string, *other.string);
+ }
+
+ StringImpl* string;
+ BasicBlock* target;
+ };
+
+ void emitSwitchIntJump(SwitchData*, GPRReg value, GPRReg scratch);
+ void emitSwitchImm(Node*, SwitchData*);
+ void emitSwitchCharStringJump(SwitchData*, GPRReg value, GPRReg scratch);
+ void emitSwitchChar(Node*, SwitchData*);
+ void emitBinarySwitchStringRecurse(
+ SwitchData*, const Vector<StringSwitchCase>&, unsigned numChecked,
+ unsigned begin, unsigned end, GPRReg buffer, GPRReg length, GPRReg temp,
+ unsigned alreadyCheckedLength, bool checkedExactLength);
+ void emitSwitchStringOnString(SwitchData*, GPRReg string);
+ void emitSwitchString(Node*, SwitchData*);
+ void emitSwitch(Node*);
+
+ void compileToStringOrCallStringConstructorOnCell(Node*);
+ void compileNewStringObject(Node*);
+
+ void compileNewTypedArray(Node*);
+
+ void compileInt32Compare(Node*, MacroAssembler::RelationalCondition);
+ void compileInt52Compare(Node*, MacroAssembler::RelationalCondition);
+ void compileBooleanCompare(Node*, MacroAssembler::RelationalCondition);
+ void compileDoubleCompare(Node*, MacroAssembler::DoubleCondition);
+
+ bool compileStrictEq(Node*);
+
+ void compileAllocatePropertyStorage(Node*);
+ void compileReallocatePropertyStorage(Node*);
+
+#if USE(JSVALUE32_64)
+ template<typename BaseOperandType, typename PropertyOperandType, typename ValueOperandType, typename TagType>
+ void compileContiguousPutByVal(Node*, BaseOperandType&, PropertyOperandType&, ValueOperandType&, GPRReg valuePayloadReg, TagType valueTag);
+#endif
+ void compileDoublePutByVal(Node*, SpeculateCellOperand& base, SpeculateStrictInt32Operand& property);
+ bool putByValWillNeedExtraRegister(ArrayMode arrayMode)
+ {
+ return arrayMode.mayStoreToHole();
+ }
+ GPRReg temporaryRegisterForPutByVal(GPRTemporary&, ArrayMode);
+ GPRReg temporaryRegisterForPutByVal(GPRTemporary& temporary, Node* node)
+ {
+ return temporaryRegisterForPutByVal(temporary, node->arrayMode());
+ }
+
+ void compileGetCharCodeAt(Node*);
+ void compileGetByValOnString(Node*);
+ void compileFromCharCode(Node*);
+
+ void compileGetByValOnDirectArguments(Node*);
+ void compileGetByValOnScopedArguments(Node*);
+
+ void compileGetScope(Node*);
+ void compileSkipScope(Node*);
+
+ void compileGetArrayLength(Node*);
+
+ void compileValueRep(Node*);
+ void compileDoubleRep(Node*);
+
+ void compileValueToInt32(Node*);
+ void compileUInt32ToNumber(Node*);
+ void compileDoubleAsInt32(Node*);
+ void compileAdd(Node*);
+ void compileMakeRope(Node*);
+ void compileArithClz32(Node*);
+ void compileArithSub(Node*);
+ void compileArithNegate(Node*);
+ void compileArithMul(Node*);
+ void compileArithDiv(Node*);
+ void compileArithMod(Node*);
+ void compileArithPow(Node*);
+ void compileArithRound(Node*);
+ void compileArithSqrt(Node*);
+ void compileArithLog(Node*);
+ void compileConstantStoragePointer(Node*);
+ void compileGetIndexedPropertyStorage(Node*);
+ JITCompiler::Jump jumpForTypedArrayOutOfBounds(Node*, GPRReg baseGPR, GPRReg indexGPR);
+ void emitTypedArrayBoundsCheck(Node*, GPRReg baseGPR, GPRReg indexGPR);
+ void compileGetTypedArrayByteOffset(Node*);
+ void compileGetByValOnIntTypedArray(Node*, TypedArrayType);
+ void compilePutByValForIntTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType);
+ void compileGetByValOnFloatTypedArray(Node*, TypedArrayType);
+ void compilePutByValForFloatTypedArray(GPRReg base, GPRReg property, Node*, TypedArrayType);
+ void compileNewFunction(Node*);
+ void compileForwardVarargs(Node*);
+ void compileCreateActivation(Node*);
+ void compileCreateDirectArguments(Node*);
+ void compileGetFromArguments(Node*);
+ void compilePutToArguments(Node*);
+ void compileCreateScopedArguments(Node*);
+ void compileCreateClonedArguments(Node*);
+ void compileNotifyWrite(Node*);
+ bool compileRegExpExec(Node*);
+ void compileIsObjectOrNull(Node*);
+ void compileIsFunction(Node*);
+ void compileTypeOf(Node*);
+
+ void moveTrueTo(GPRReg);
+ void moveFalseTo(GPRReg);
+ void blessBoolean(GPRReg);
+
+ // size can be an immediate or a register, and must be in bytes. If size is a register,
+ // it must be a different register than resultGPR. Emits code that place a pointer to
+ // the end of the allocation. The returned jump is the jump to the slow path.
+ template<typename SizeType>
+ MacroAssembler::Jump emitAllocateBasicStorage(SizeType size, GPRReg resultGPR)
+ {
+ CopiedAllocator* copiedAllocator = &m_jit.vm()->heap.storageAllocator();
+
+ // It's invalid to allocate zero bytes in CopiedSpace.
+#ifndef NDEBUG
+ m_jit.move(size, resultGPR);
+ MacroAssembler::Jump nonZeroSize = m_jit.branchTest32(MacroAssembler::NonZero, resultGPR);
+ m_jit.abortWithReason(DFGBasicStorageAllocatorZeroSize);
+ nonZeroSize.link(&m_jit);
#endif
- bool isInteger(NodeIndex nodeIndex)
- {
- Node& node = at(nodeIndex);
- if (node.hasInt32Result())
- return true;
-
- if (isInt32Constant(nodeIndex))
- return true;
-
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
+ m_jit.loadPtr(&copiedAllocator->m_currentRemaining, resultGPR);
+ MacroAssembler::Jump slowPath = m_jit.branchSubPtr(JITCompiler::Signed, size, resultGPR);
+ m_jit.storePtr(resultGPR, &copiedAllocator->m_currentRemaining);
+ m_jit.negPtr(resultGPR);
+ m_jit.addPtr(JITCompiler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), resultGPR);
- return info.isJSInteger();
- }
-
- bool compare(Node&, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_DFGOperation_EJJ);
- bool compilePeepHoleBranch(Node&, MacroAssembler::RelationalCondition, MacroAssembler::DoubleCondition, S_DFGOperation_EJJ);
- void compilePeepHoleIntegerBranch(Node&, NodeIndex branchNodeIndex, JITCompiler::RelationalCondition);
- void compilePeepHoleDoubleBranch(Node&, NodeIndex branchNodeIndex, JITCompiler::DoubleCondition);
- void compilePeepHoleObjectEquality(Node&, NodeIndex branchNodeIndex, const ClassInfo*, PredictionChecker);
- void compilePeepHoleObjectToObjectOrOtherEquality(
- Edge leftChild, Edge rightChild, NodeIndex branchNodeIndex, const ClassInfo*, PredictionChecker);
- void compileObjectEquality(Node&, const ClassInfo*, PredictionChecker);
- void compileObjectToObjectOrOtherEquality(
- Edge leftChild, Edge rightChild, const ClassInfo*, PredictionChecker);
- void compileValueAdd(Node&);
- void compileObjectOrOtherLogicalNot(Edge value, const ClassInfo*, bool needSpeculationCheck);
- void compileLogicalNot(Node&);
- void emitObjectOrOtherBranch(Edge value, BlockIndex taken, BlockIndex notTaken, const ClassInfo*, bool needSpeculationCheck);
- void emitBranch(Node&);
-
- void compileIntegerCompare(Node&, MacroAssembler::RelationalCondition);
- void compileDoubleCompare(Node&, MacroAssembler::DoubleCondition);
-
- bool compileStrictEqForConstant(Node&, Edge value, JSValue constant);
-
- bool compileStrictEq(Node&);
-
- void compileGetCharCodeAt(Node&);
- void compileGetByValOnString(Node&);
- void compileValueToInt32(Node&);
- void compileUInt32ToNumber(Node&);
- void compileDoubleAsInt32(Node&);
- void compileInt32ToDouble(Node&);
- void compileAdd(Node&);
- void compileArithSub(Node&);
- void compileArithNegate(Node&);
- void compileArithMul(Node&);
-#if CPU(X86) || CPU(X86_64)
- void compileIntegerArithDivForX86(Node&);
-#endif
- void compileArithMod(Node&);
- void compileSoftModulo(Node&);
- void compileGetTypedArrayLength(const TypedArrayDescriptor&, Node&, bool needsSpeculationCheck);
- enum TypedArraySpeculationRequirements {
- NoTypedArraySpecCheck,
- NoTypedArrayTypeSpecCheck,
- AllTypedArraySpecChecks
- };
- enum TypedArraySignedness {
- SignedTypedArray,
- UnsignedTypedArray
- };
- enum TypedArrayRounding {
- TruncateRounding,
- ClampRounding
- };
- void compileGetIndexedPropertyStorage(Node&);
- void compileGetByValOnIntTypedArray(const TypedArrayDescriptor&, Node&, size_t elementSize, TypedArraySpeculationRequirements, TypedArraySignedness);
- void compilePutByValForIntTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySpeculationRequirements, TypedArraySignedness, TypedArrayRounding = TruncateRounding);
- void compileGetByValOnFloatTypedArray(const TypedArrayDescriptor&, Node&, size_t elementSize, TypedArraySpeculationRequirements);
- void compilePutByValForFloatTypedArray(const TypedArrayDescriptor&, GPRReg base, GPRReg property, Node&, size_t elementSize, TypedArraySpeculationRequirements);
- void compileNewFunctionNoCheck(Node&);
- void compileNewFunctionExpression(Node&);
- bool compileRegExpExec(Node&);
-
- template <typename ClassType, bool destructor, typename StructureType>
- void emitAllocateBasicJSObject(StructureType structure, GPRReg resultGPR, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
- {
- MarkedAllocator* allocator = 0;
- if (destructor)
- allocator = &m_jit.globalData()->heap.allocatorForObjectWithDestructor(sizeof(ClassType));
- else
- allocator = &m_jit.globalData()->heap.allocatorForObjectWithoutDestructor(sizeof(ClassType));
+ return slowPath;
+ }
- m_jit.loadPtr(&allocator->m_freeList.head, resultGPR);
+ // Allocator for a cell of a specific size.
+ template <typename StructureType> // StructureType can be GPR or ImmPtr.
+ void emitAllocateJSCell(GPRReg resultGPR, GPRReg allocatorGPR, StructureType structure,
+ GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
+ {
+ m_jit.loadPtr(MacroAssembler::Address(allocatorGPR, MarkedAllocator::offsetOfFreeListHead()), resultGPR);
slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, resultGPR));
// The object is half-allocated: we have what we know is a fresh object, but
// it's still on the GC's free list.
-
- // Ditch the structure by placing it into the structure slot, so that we can reuse
- // scratchGPR.
- m_jit.storePtr(structure, MacroAssembler::Address(resultGPR, JSObject::structureOffset()));
-
- // Now that we have scratchGPR back, remove the object from the free list
m_jit.loadPtr(MacroAssembler::Address(resultGPR), scratchGPR);
- m_jit.storePtr(scratchGPR, &allocator->m_freeList.head);
-
- // Initialize the object's classInfo pointer
- m_jit.storePtr(MacroAssembler::TrustedImmPtr(&ClassType::s_info), MacroAssembler::Address(resultGPR, JSCell::classInfoOffset()));
-
- // Initialize the object's inheritorID.
- m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::Address(resultGPR, JSObject::offsetOfInheritorID()));
-
- // Initialize the object's property storage pointer.
- m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSObject)), resultGPR, scratchGPR);
- m_jit.storePtr(scratchGPR, MacroAssembler::Address(resultGPR, ClassType::offsetOfPropertyStorage()));
- }
+ m_jit.storePtr(scratchGPR, MacroAssembler::Address(allocatorGPR, MarkedAllocator::offsetOfFreeListHead()));
- // It is acceptable to have structure be equal to scratch, so long as you're fine
- // with the structure GPR being clobbered.
- template<typename T>
- void emitAllocateJSFinalObject(T structure, GPRReg resultGPR, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
- {
- return emitAllocateBasicJSObject<JSFinalObject, false>(structure, resultGPR, scratchGPR, slowPath);
+ // Initialize the object's Structure.
+ m_jit.emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR);
}
-#if USE(JSVALUE64)
- JITCompiler::Jump convertToDouble(GPRReg value, FPRReg result, GPRReg tmp);
-#elif USE(JSVALUE32_64)
- JITCompiler::Jump convertToDouble(JSValueOperand&, FPRReg result);
-#endif
-
- // Add a speculation check without additional recovery.
- void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail)
- {
- if (!m_compileOkay)
- return;
- m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this));
- }
- void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail)
- {
- speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail);
- }
- // Add a set of speculation checks without additional recovery.
- void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail)
- {
- Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
- for (unsigned i = 0; i < jumpVector.size(); ++i)
- speculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i]);
- }
- void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::JumpList& jumpsToFail)
+ // Allocator for an object of a specific size.
+ template <typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr.
+ void emitAllocateJSObject(GPRReg resultGPR, GPRReg allocatorGPR, StructureType structure,
+ StorageType storage, GPRReg scratchGPR, MacroAssembler::JumpList& slowPath)
{
- speculationCheck(kind, jsValueSource, nodeUse.index(), jumpsToFail);
- }
- // Add a speculation check with additional recovery.
- void speculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
- {
- if (!m_compileOkay)
- return;
- m_jit.codeBlock()->appendSpeculationRecovery(recovery);
- m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this, m_jit.codeBlock()->numberOfSpeculationRecoveries()));
- }
- void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery)
- {
- speculationCheck(kind, jsValueSource, nodeUse.index(), jumpToFail, recovery);
+ emitAllocateJSCell(resultGPR, allocatorGPR, structure, scratchGPR, slowPath);
+
+ // Initialize the object's property storage pointer.
+ m_jit.storePtr(storage, MacroAssembler::Address(resultGPR, JSObject::butterflyOffset()));
}
- void forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::Jump jumpToFail, const ValueRecovery& valueRecovery)
+
+ template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr.
+ void emitAllocateJSObjectWithKnownSize(
+ GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1,
+ GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath, size_t size)
{
- speculationCheck(kind, jsValueSource, nodeIndex, jumpToFail);
-
- unsigned setLocalIndexInBlock = m_indexInBlock + 1;
-
- Node* setLocal = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock));
-
- if (setLocal->op() == Int32ToDouble) {
- setLocal = &at(m_jit.graph().m_blocks[m_block]->at(++setLocalIndexInBlock));
- ASSERT(at(setLocal->child1()).child1() == m_compileIndex);
- } else
- ASSERT(setLocal->child1() == m_compileIndex);
-
- ASSERT(setLocal->op() == SetLocal);
- ASSERT(setLocal->codeOrigin == at(m_compileIndex).codeOrigin);
-
- Node* nextNode = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 1));
- if (nextNode->codeOrigin == at(m_compileIndex).codeOrigin) {
- ASSERT(nextNode->op() == Flush);
- nextNode = &at(m_jit.graph().m_blocks[m_block]->at(setLocalIndexInBlock + 2));
- ASSERT(nextNode->codeOrigin != at(m_compileIndex).codeOrigin); // duplicate the same assertion as below so that if we fail, we'll know we came down this path.
- }
- ASSERT(nextNode->codeOrigin != at(m_compileIndex).codeOrigin);
-
- OSRExit& exit = m_jit.codeBlock()->lastOSRExit();
- exit.m_codeOrigin = nextNode->codeOrigin;
- exit.m_lastSetOperand = setLocal->local();
-
- exit.valueRecoveryForOperand(setLocal->local()) = valueRecovery;
+ MarkedAllocator* allocator = &m_jit.vm()->heap.allocatorForObjectOfType<ClassType>(size);
+ m_jit.move(TrustedImmPtr(allocator), scratchGPR1);
+ emitAllocateJSObject(resultGPR, scratchGPR1, structure, storage, scratchGPR2, slowPath);
}
- void forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery)
+
+ // Convenience allocator for a built-in object.
+ template <typename ClassType, typename StructureType, typename StorageType> // StructureType and StorageType can be GPR or ImmPtr.
+ void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage,
+ GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
{
- Vector<MacroAssembler::Jump, 16> jumpVector = jumpsToFail.jumps();
- for (unsigned i = 0; i < jumpVector.size(); ++i)
- forwardSpeculationCheck(kind, jsValueSource, nodeIndex, jumpVector[i], valueRecovery);
+ emitAllocateJSObjectWithKnownSize<ClassType>(
+ resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath,
+ ClassType::allocationSize(0));
}
- // Called when we statically determine that a speculation will fail.
- void terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, NodeIndex nodeIndex)
+ template <typename ClassType, typename StructureType> // StructureType and StorageType can be GPR or ImmPtr.
+ void emitAllocateVariableSizedJSObject(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
{
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("SpeculativeJIT was terminated.\n");
-#endif
- if (!m_compileOkay)
- return;
- speculationCheck(kind, jsValueRegs, nodeIndex, m_jit.jump());
- m_compileOkay = false;
+ static_assert(!(MarkedSpace::preciseStep & (MarkedSpace::preciseStep - 1)), "MarkedSpace::preciseStep must be a power of two.");
+ static_assert(!(MarkedSpace::impreciseStep & (MarkedSpace::impreciseStep - 1)), "MarkedSpace::impreciseStep must be a power of two.");
+
+ MarkedSpace::Subspace& subspace = m_jit.vm()->heap.subspaceForObjectOfType<ClassType>();
+ m_jit.add32(TrustedImm32(MarkedSpace::preciseStep - 1), allocationSize);
+ MacroAssembler::Jump notSmall = m_jit.branch32(MacroAssembler::AboveOrEqual, allocationSize, TrustedImm32(MarkedSpace::preciseCutoff));
+ m_jit.rshift32(allocationSize, TrustedImm32(getLSBSet(MarkedSpace::preciseStep)), scratchGPR1);
+ m_jit.mul32(TrustedImm32(sizeof(MarkedAllocator)), scratchGPR1, scratchGPR1);
+ m_jit.addPtr(MacroAssembler::TrustedImmPtr(&subspace.preciseAllocators[0]), scratchGPR1);
+
+ MacroAssembler::Jump selectedSmallSpace = m_jit.jump();
+ notSmall.link(&m_jit);
+ slowPath.append(m_jit.branch32(MacroAssembler::AboveOrEqual, allocationSize, TrustedImm32(MarkedSpace::impreciseCutoff)));
+ m_jit.rshift32(allocationSize, TrustedImm32(getLSBSet(MarkedSpace::impreciseStep)), scratchGPR1);
+ m_jit.mul32(TrustedImm32(sizeof(MarkedAllocator)), scratchGPR1, scratchGPR1);
+ m_jit.addPtr(MacroAssembler::TrustedImmPtr(&subspace.impreciseAllocators[0]), scratchGPR1);
+
+ selectedSmallSpace.link(&m_jit);
+
+ emitAllocateJSObject(resultGPR, scratchGPR1, structure, TrustedImmPtr(0), scratchGPR2, slowPath);
}
- void terminateSpeculativeExecution(ExitKind kind, JSValueRegs jsValueRegs, Edge nodeUse)
+
+ template <typename T>
+ void emitAllocateDestructibleObject(GPRReg resultGPR, Structure* structure,
+ GPRReg scratchGPR1, GPRReg scratchGPR2, MacroAssembler::JumpList& slowPath)
{
- terminateSpeculativeExecution(kind, jsValueRegs, nodeUse.index());
+ emitAllocateJSObject<T>(resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR1, scratchGPR2, slowPath);
+ m_jit.storePtr(TrustedImmPtr(structure->classInfo()), MacroAssembler::Address(resultGPR, JSDestructibleObject::classInfoOffset()));
}
+
+ void emitAllocateJSArray(GPRReg resultGPR, Structure*, GPRReg storageGPR, unsigned numElements);
- template<bool strict>
- GPRReg fillSpeculateIntInternal(NodeIndex, DataFormat& returnFormat);
+ void emitGetLength(InlineCallFrame*, GPRReg lengthGPR, bool includeThis = false);
+ void emitGetLength(CodeOrigin, GPRReg lengthGPR, bool includeThis = false);
+ void emitGetCallee(CodeOrigin, GPRReg calleeGPR);
+ void emitGetArgumentStart(CodeOrigin, GPRReg startGPR);
- // It is possible, during speculative generation, to reach a situation in which we
- // can statically determine a speculation will fail (for example, when two nodes
- // will make conflicting speculations about the same operand). In such cases this
- // flag is cleared, indicating no further code generation should take place.
- bool m_compileOkay;
+ // Generate an OSR exit fuzz check. Returns Jump() if OSR exit fuzz is not enabled, or if
+ // it's in training mode.
+ MacroAssembler::Jump emitOSRExitFuzzCheck();
- // Tracking for which nodes are currently holding the values of arguments and bytecode
- // operand-indexed variables.
+ // Add a speculation check.
+ void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail);
+ void speculationCheck(ExitKind, JSValueSource, Node*, const MacroAssembler::JumpList& jumpsToFail);
+
+ // Add a speculation check without additional recovery, and with a promise to supply a jump later.
+ OSRExitJumpPlaceholder speculationCheck(ExitKind, JSValueSource, Node*);
+ OSRExitJumpPlaceholder speculationCheck(ExitKind, JSValueSource, Edge);
+ void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail);
+ void speculationCheck(ExitKind, JSValueSource, Edge, const MacroAssembler::JumpList& jumpsToFail);
+ // Add a speculation check with additional recovery.
+ void speculationCheck(ExitKind, JSValueSource, Node*, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&);
+ void speculationCheck(ExitKind, JSValueSource, Edge, MacroAssembler::Jump jumpToFail, const SpeculationRecovery&);
- ValueSource valueSourceForOperand(int operand)
- {
- return valueSourceReferenceForOperand(operand);
- }
+ void emitInvalidationPoint(Node*);
- void setNodeIndexForOperand(NodeIndex nodeIndex, int operand)
- {
- valueSourceReferenceForOperand(operand) = ValueSource(nodeIndex);
- }
+ // Called when we statically determine that a speculation will fail.
+ void terminateSpeculativeExecution(ExitKind, JSValueRegs, Node*);
+ void terminateSpeculativeExecution(ExitKind, JSValueRegs, Edge);
- // Call this with care, since it both returns a reference into an array
- // and potentially resizes the array. So it would not be right to call this
- // twice and then perform operands on both references, since the one from
- // the first call may no longer be valid.
- ValueSource& valueSourceReferenceForOperand(int operand)
- {
- if (operandIsArgument(operand)) {
- int argument = operandToArgument(operand);
- return m_arguments[argument];
- }
-
- if ((unsigned)operand >= m_variables.size())
- m_variables.resize(operand + 1);
-
- return m_variables[operand];
- }
+ // Helpers for performing type checks on an edge stored in the given registers.
+ bool needsTypeCheck(Edge edge, SpeculatedType typesPassedThrough) { return m_interpreter.needsTypeCheck(edge, typesPassedThrough); }
+ void typeCheck(JSValueSource, Edge, SpeculatedType typesPassedThrough, MacroAssembler::Jump jumpToFail);
- // The JIT, while also provides MacroAssembler functionality.
- JITCompiler& m_jit;
- // The current node being generated.
- BlockIndex m_block;
- NodeIndex m_compileIndex;
- unsigned m_indexInBlock;
- // Virtual and physical register maps.
- Vector<GenerationInfo, 32> m_generationInfo;
- RegisterBank<GPRInfo> m_gprs;
- RegisterBank<FPRInfo> m_fprs;
-
- Vector<MacroAssembler::Label> m_blockHeads;
- Vector<MacroAssembler::Label> m_osrEntryHeads;
+ void speculateCellTypeWithoutTypeFiltering(Edge, GPRReg cellGPR, JSType);
+ void speculateCellType(Edge, GPRReg cellGPR, SpeculatedType, JSType);
- struct BranchRecord {
- BranchRecord(MacroAssembler::Jump jump, BlockIndex destination)
- : jump(jump)
- , destination(destination)
- {
- }
-
- MacroAssembler::Jump jump;
- BlockIndex destination;
- };
- Vector<BranchRecord, 8> m_branches;
-
- Vector<ValueSource, 0> m_arguments;
- Vector<ValueSource, 0> m_variables;
- int m_lastSetOperand;
- CodeOrigin m_codeOriginForOSR;
+ void speculateInt32(Edge);
+#if USE(JSVALUE64)
+ void convertMachineInt(Edge, GPRReg resultGPR);
+ void speculateMachineInt(Edge);
+ void speculateDoubleRepMachineInt(Edge);
+#endif // USE(JSVALUE64)
+ void speculateNumber(Edge);
+ void speculateRealNumber(Edge);
+ void speculateDoubleRepReal(Edge);
+ void speculateBoolean(Edge);
+ void speculateCell(Edge);
+ void speculateObject(Edge);
+ void speculateFunction(Edge);
+ void speculateFinalObject(Edge);
+ void speculateObjectOrOther(Edge);
+ void speculateString(Edge edge, GPRReg cell);
+ void speculateStringIdentAndLoadStorage(Edge edge, GPRReg string, GPRReg storage);
+ void speculateStringIdent(Edge edge, GPRReg string);
+ void speculateStringIdent(Edge);
+ void speculateString(Edge);
+ void speculateNotStringVar(Edge);
+ template<typename StructureLocationType>
+ void speculateStringObjectForStructure(Edge, StructureLocationType);
+ void speculateStringObject(Edge, GPRReg);
+ void speculateStringObject(Edge);
+ void speculateStringOrStringObject(Edge);
+ void speculateNotCell(Edge);
+ void speculateOther(Edge);
+ void speculateMisc(Edge, JSValueRegs);
+ void speculateMisc(Edge);
+ void speculate(Node*, Edge);
+
+ JITCompiler::Jump jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, ArrayMode, IndexingType);
+ JITCompiler::JumpList jumpSlowForUnwantedArrayMode(GPRReg tempWithIndexingTypeReg, ArrayMode);
+ void checkArray(Node*);
+ void arrayify(Node*, GPRReg baseReg, GPRReg propertyReg);
+ void arrayify(Node*);
- AbstractState m_state;
+ template<bool strict>
+ GPRReg fillSpeculateInt32Internal(Edge, DataFormat& returnFormat);
- ValueRecovery computeValueRecoveryFor(const ValueSource&);
-
- ValueRecovery computeValueRecoveryFor(int operand)
- {
- return computeValueRecoveryFor(valueSourceForOperand(operand));
- }
-};
-
-
-// === Operand types ===
-//
-// IntegerOperand, DoubleOperand and JSValueOperand.
-//
-// These classes are used to lock the operands to a node into machine
-// registers. These classes implement of pattern of locking a value
-// into register at the point of construction only if it is already in
-// registers, and otherwise loading it lazily at the point it is first
-// used. We do so in order to attempt to avoid spilling one operand
-// in order to make space available for another.
-
-class IntegerOperand {
-public:
- explicit IntegerOperand(SpeculativeJIT* jit, Edge use)
- : m_jit(jit)
- , m_index(use.index())
- , m_gprOrInvalid(InvalidGPRReg)
-#ifndef NDEBUG
- , m_format(DataFormatNone)
-#endif
- {
- ASSERT(m_jit);
- ASSERT(use.useKind() != DoubleUse);
- if (jit->isFilled(m_index))
- gpr();
- }
-
- ~IntegerOperand()
+ // It is possible, during speculative generation, to reach a situation in which we
+ // can statically determine a speculation will fail (for example, when two nodes
+ // will make conflicting speculations about the same operand). In such cases this
+ // flag is cleared, indicating no further code generation should take place.
+ bool m_compileOkay;
+
+ void recordSetLocal(
+ VirtualRegister bytecodeReg, VirtualRegister machineReg, DataFormat format)
{
- ASSERT(m_gprOrInvalid != InvalidGPRReg);
- m_jit->unlock(m_gprOrInvalid);
+ m_stream->appendAndLog(VariableEvent::setLocal(bytecodeReg, machineReg, format));
}
-
- NodeIndex index() const
+
+ void recordSetLocal(DataFormat format)
{
- return m_index;
+ VariableAccessData* variable = m_currentNode->variableAccessData();
+ recordSetLocal(variable->local(), variable->machineLocal(), format);
}
- DataFormat format()
+ GenerationInfo& generationInfoFromVirtualRegister(VirtualRegister virtualRegister)
{
- gpr(); // m_format is set when m_gpr is locked.
- ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger);
- return m_format;
+ return m_generationInfo[virtualRegister.toLocal()];
}
-
- GPRReg gpr()
+
+ GenerationInfo& generationInfo(Node* node)
{
- if (m_gprOrInvalid == InvalidGPRReg)
- m_gprOrInvalid = m_jit->fillInteger(index(), m_format);
- return m_gprOrInvalid;
+ return generationInfoFromVirtualRegister(node->virtualRegister());
}
- void use()
+ GenerationInfo& generationInfo(Edge edge)
{
- m_jit->use(m_index);
+ return generationInfo(edge.node());
}
-private:
- SpeculativeJIT* m_jit;
- NodeIndex m_index;
- GPRReg m_gprOrInvalid;
- DataFormat m_format;
-};
+ // The JIT, while also provides MacroAssembler functionality.
+ JITCompiler& m_jit;
-class DoubleOperand {
-public:
- explicit DoubleOperand(SpeculativeJIT* jit, Edge use)
- : m_jit(jit)
- , m_index(use.index())
- , m_fprOrInvalid(InvalidFPRReg)
- {
- ASSERT(m_jit);
-
- // This is counter-intuitive but correct. DoubleOperand is intended to
- // be used only when you're a node that is happy to accept an untyped
- // value, but will special-case for doubles (using DoubleOperand) if the
- // value happened to already be represented as a double. The implication
- // is that you will not try to force the value to become a double if it
- // is not one already.
- ASSERT(use.useKind() != DoubleUse);
-
- if (jit->isFilledDouble(m_index))
- fpr();
- }
+ // The current node being generated.
+ BasicBlock* m_block;
+ Node* m_currentNode;
+ NodeType m_lastGeneratedNode;
+ bool m_canExit;
+ unsigned m_indexInBlock;
+ // Virtual and physical register maps.
+ Vector<GenerationInfo, 32> m_generationInfo;
+ RegisterBank<GPRInfo> m_gprs;
+ RegisterBank<FPRInfo> m_fprs;
- ~DoubleOperand()
- {
- ASSERT(m_fprOrInvalid != InvalidFPRReg);
- m_jit->unlock(m_fprOrInvalid);
- }
+ Vector<MacroAssembler::Label> m_osrEntryHeads;
+
+ struct BranchRecord {
+ BranchRecord(MacroAssembler::Jump jump, BasicBlock* destination)
+ : jump(jump)
+ , destination(destination)
+ {
+ }
- NodeIndex index() const
- {
- return m_index;
- }
+ MacroAssembler::Jump jump;
+ BasicBlock* destination;
+ };
+ Vector<BranchRecord, 8> m_branches;
- FPRReg fpr()
- {
- if (m_fprOrInvalid == InvalidFPRReg)
- m_fprOrInvalid = m_jit->fillDouble(index());
- return m_fprOrInvalid;
- }
+ CodeOrigin m_codeOriginForExitTarget;
+ CodeOrigin m_codeOriginForExitProfile;
- void use()
- {
- m_jit->use(m_index);
- }
-
-private:
- SpeculativeJIT* m_jit;
- NodeIndex m_index;
- FPRReg m_fprOrInvalid;
+ InPlaceAbstractState m_state;
+ AbstractInterpreter<InPlaceAbstractState> m_interpreter;
+
+ VariableEventStream* m_stream;
+ MinifiedGraph* m_minifiedGraph;
+
+ bool m_isCheckingArgumentTypes;
+
+ Vector<std::unique_ptr<SlowPathGenerator>, 8> m_slowPathGenerators;
+ Vector<SilentRegisterSavePlan> m_plans;
};
+
+// === Operand types ===
+//
+// These classes are used to lock the operands to a node into machine
+// registers. These classes implement of pattern of locking a value
+// into register at the point of construction only if it is already in
+// registers, and otherwise loading it lazily at the point it is first
+// used. We do so in order to attempt to avoid spilling one operand
+// in order to make space available for another.
+
class JSValueOperand {
public:
- explicit JSValueOperand(SpeculativeJIT* jit, Edge use)
+ explicit JSValueOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
: m_jit(jit)
- , m_index(use.index())
+ , m_edge(edge)
#if USE(JSVALUE64)
, m_gprOrInvalid(InvalidGPRReg)
#elif USE(JSVALUE32_64)
#endif
{
ASSERT(m_jit);
- ASSERT(use.useKind() != DoubleUse);
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == UntypedUse);
#if USE(JSVALUE64)
- if (jit->isFilled(m_index))
+ if (jit->isFilled(node()))
gpr();
#elif USE(JSVALUE32_64)
m_register.pair.tagGPR = InvalidGPRReg;
m_register.pair.payloadGPR = InvalidGPRReg;
- if (jit->isFilled(m_index))
+ if (jit->isFilled(node()))
fill();
#endif
}
}
#endif
}
+
+ Edge edge() const
+ {
+ return m_edge;
+ }
- NodeIndex index() const
+ Node* node() const
{
- return m_index;
+ return edge().node();
}
#if USE(JSVALUE64)
GPRReg gpr()
{
if (m_gprOrInvalid == InvalidGPRReg)
- m_gprOrInvalid = m_jit->fillJSValue(index());
+ m_gprOrInvalid = m_jit->fillJSValue(m_edge);
return m_gprOrInvalid;
}
JSValueRegs jsValueRegs()
void fill()
{
if (m_register.pair.tagGPR == InvalidGPRReg && m_register.pair.payloadGPR == InvalidGPRReg)
- m_isDouble = !m_jit->fillJSValue(index(), m_register.pair.tagGPR, m_register.pair.payloadGPR, m_register.fpr);
+ m_isDouble = !m_jit->fillJSValue(m_edge, m_register.pair.tagGPR, m_register.pair.payloadGPR, m_register.fpr);
}
GPRReg tagGPR()
fill();
ASSERT(!m_isDouble);
return m_register.pair.tagGPR;
- }
+ }
GPRReg payloadGPR()
{
ASSERT(!m_isDouble);
return m_register.pair.payloadGPR;
}
-
+
JSValueRegs jsValueRegs()
{
return JSValueRegs(tagGPR(), payloadGPR());
}
+ GPRReg gpr(WhichValueWord which)
+ {
+ return jsValueRegs().gpr(which);
+ }
+
FPRReg fpr()
{
fill();
void use()
{
- m_jit->use(m_index);
+ m_jit->use(node());
}
private:
SpeculativeJIT* m_jit;
- NodeIndex m_index;
+ Edge m_edge;
#if USE(JSVALUE64)
GPRReg m_gprOrInvalid;
#elif USE(JSVALUE32_64)
class StorageOperand {
public:
- explicit StorageOperand(SpeculativeJIT* jit, Edge use)
+ explicit StorageOperand(SpeculativeJIT* jit, Edge edge)
: m_jit(jit)
- , m_index(use.index())
+ , m_edge(edge)
, m_gprOrInvalid(InvalidGPRReg)
{
ASSERT(m_jit);
- ASSERT(use.useKind() != DoubleUse);
- if (jit->isFilled(m_index))
+ ASSERT(edge.useKind() == UntypedUse || edge.useKind() == KnownCellUse);
+ if (jit->isFilled(node()))
gpr();
}
m_jit->unlock(m_gprOrInvalid);
}
- NodeIndex index() const
+ Edge edge() const
{
- return m_index;
+ return m_edge;
+ }
+
+ Node* node() const
+ {
+ return edge().node();
}
GPRReg gpr()
{
if (m_gprOrInvalid == InvalidGPRReg)
- m_gprOrInvalid = m_jit->fillStorage(index());
+ m_gprOrInvalid = m_jit->fillStorage(edge());
return m_gprOrInvalid;
}
void use()
{
- m_jit->use(m_index);
+ m_jit->use(node());
}
private:
SpeculativeJIT* m_jit;
- NodeIndex m_index;
+ Edge m_edge;
GPRReg m_gprOrInvalid;
};
// currently allocated to child nodes whose value is consumed
// by, and not live after, this operation.
+enum ReuseTag { Reuse };
+
class GPRTemporary {
public:
GPRTemporary();
GPRTemporary(SpeculativeJIT*);
GPRTemporary(SpeculativeJIT*, GPRReg specific);
- GPRTemporary(SpeculativeJIT*, SpeculateIntegerOperand&);
- GPRTemporary(SpeculativeJIT*, SpeculateIntegerOperand&, SpeculateIntegerOperand&);
- GPRTemporary(SpeculativeJIT*, SpeculateStrictInt32Operand&);
- GPRTemporary(SpeculativeJIT*, IntegerOperand&);
- GPRTemporary(SpeculativeJIT*, IntegerOperand&, IntegerOperand&);
- GPRTemporary(SpeculativeJIT*, SpeculateCellOperand&);
- GPRTemporary(SpeculativeJIT*, SpeculateBooleanOperand&);
-#if USE(JSVALUE64)
- GPRTemporary(SpeculativeJIT*, JSValueOperand&);
-#elif USE(JSVALUE32_64)
- GPRTemporary(SpeculativeJIT*, JSValueOperand&, bool tag = true);
+ template<typename T>
+ GPRTemporary(SpeculativeJIT* jit, ReuseTag, T& operand)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+ {
+ if (m_jit->canReuse(operand.node()))
+ m_gpr = m_jit->reuse(operand.gpr());
+ else
+ m_gpr = m_jit->allocate();
+ }
+ template<typename T1, typename T2>
+ GPRTemporary(SpeculativeJIT* jit, ReuseTag, T1& op1, T2& op2)
+ : m_jit(jit)
+ , m_gpr(InvalidGPRReg)
+ {
+ if (m_jit->canReuse(op1.node()))
+ m_gpr = m_jit->reuse(op1.gpr());
+ else if (m_jit->canReuse(op2.node()))
+ m_gpr = m_jit->reuse(op2.gpr());
+ else
+ m_gpr = m_jit->allocate();
+ }
+#if USE(JSVALUE32_64)
+ GPRTemporary(SpeculativeJIT*, ReuseTag, JSValueOperand&, WhichValueWord);
#endif
- GPRTemporary(SpeculativeJIT*, StorageOperand&);
void adopt(GPRTemporary&);
GPRReg m_gpr;
};
+class JSValueRegsTemporary {
+public:
+ JSValueRegsTemporary();
+ JSValueRegsTemporary(SpeculativeJIT*);
+ ~JSValueRegsTemporary();
+
+ JSValueRegs regs();
+
+private:
+#if USE(JSVALUE64)
+ GPRTemporary m_gpr;
+#else
+ GPRTemporary m_payloadGPR;
+ GPRTemporary m_tagGPR;
+#endif
+};
+
class FPRTemporary {
public:
FPRTemporary(SpeculativeJIT*);
- FPRTemporary(SpeculativeJIT*, DoubleOperand&);
- FPRTemporary(SpeculativeJIT*, DoubleOperand&, DoubleOperand&);
FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&);
FPRTemporary(SpeculativeJIT*, SpeculateDoubleOperand&, SpeculateDoubleOperand&);
#if USE(JSVALUE32_64)
//
// These classes lock the result of a call to a C++ helper function.
-class GPRResult : public GPRTemporary {
+class GPRFlushedCallResult : public GPRTemporary {
public:
- GPRResult(SpeculativeJIT* jit)
+ GPRFlushedCallResult(SpeculativeJIT* jit)
: GPRTemporary(jit, GPRInfo::returnValueGPR)
{
}
};
#if USE(JSVALUE32_64)
-class GPRResult2 : public GPRTemporary {
+class GPRFlushedCallResult2 : public GPRTemporary {
public:
- GPRResult2(SpeculativeJIT* jit)
+ GPRFlushedCallResult2(SpeculativeJIT* jit)
: GPRTemporary(jit, GPRInfo::returnValueGPR2)
{
}
// === Speculative Operand types ===
//
-// SpeculateIntegerOperand, SpeculateStrictInt32Operand and SpeculateCellOperand.
+// SpeculateInt32Operand, SpeculateStrictInt32Operand and SpeculateCellOperand.
//
// These are used to lock the operands to a node into machine registers within the
// SpeculativeJIT. The classes operate like those above, however these will
// determine the operand to have. If the operand does not have the requested type,
// a bail-out to the non-speculative path will be taken.
-class SpeculateIntegerOperand {
+class SpeculateInt32Operand {
public:
- explicit SpeculateIntegerOperand(SpeculativeJIT* jit, Edge use)
+ explicit SpeculateInt32Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
: m_jit(jit)
- , m_index(use.index())
+ , m_edge(edge)
, m_gprOrInvalid(InvalidGPRReg)
#ifndef NDEBUG
, m_format(DataFormatNone)
#endif
{
ASSERT(m_jit);
- ASSERT(use.useKind() != DoubleUse);
- if (jit->isFilled(m_index))
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
+ if (jit->isFilled(node()))
gpr();
}
- ~SpeculateIntegerOperand()
+ ~SpeculateInt32Operand()
{
ASSERT(m_gprOrInvalid != InvalidGPRReg);
m_jit->unlock(m_gprOrInvalid);
}
+
+ Edge edge() const
+ {
+ return m_edge;
+ }
- NodeIndex index() const
+ Node* node() const
{
- return m_index;
+ return edge().node();
}
DataFormat format()
{
gpr(); // m_format is set when m_gpr is locked.
- ASSERT(m_format == DataFormatInteger || m_format == DataFormatJSInteger);
+ ASSERT(m_format == DataFormatInt32 || m_format == DataFormatJSInt32);
return m_format;
}
GPRReg gpr()
{
if (m_gprOrInvalid == InvalidGPRReg)
- m_gprOrInvalid = m_jit->fillSpeculateInt(index(), m_format);
+ m_gprOrInvalid = m_jit->fillSpeculateInt32(edge(), m_format);
return m_gprOrInvalid;
}
+
+ void use()
+ {
+ m_jit->use(node());
+ }
private:
SpeculativeJIT* m_jit;
- NodeIndex m_index;
+ Edge m_edge;
GPRReg m_gprOrInvalid;
DataFormat m_format;
};
class SpeculateStrictInt32Operand {
public:
- explicit SpeculateStrictInt32Operand(SpeculativeJIT* jit, Edge use)
+ explicit SpeculateStrictInt32Operand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
: m_jit(jit)
- , m_index(use.index())
+ , m_edge(edge)
, m_gprOrInvalid(InvalidGPRReg)
{
ASSERT(m_jit);
- ASSERT(use.useKind() != DoubleUse);
- if (jit->isFilled(m_index))
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || (edge.useKind() == Int32Use || edge.useKind() == KnownInt32Use));
+ if (jit->isFilled(node()))
gpr();
}
ASSERT(m_gprOrInvalid != InvalidGPRReg);
m_jit->unlock(m_gprOrInvalid);
}
+
+ Edge edge() const
+ {
+ return m_edge;
+ }
+
+ Node* node() const
+ {
+ return edge().node();
+ }
+
+ GPRReg gpr()
+ {
+ if (m_gprOrInvalid == InvalidGPRReg)
+ m_gprOrInvalid = m_jit->fillSpeculateInt32Strict(edge());
+ return m_gprOrInvalid;
+ }
+
+ void use()
+ {
+ m_jit->use(node());
+ }
- NodeIndex index() const
+private:
+ SpeculativeJIT* m_jit;
+ Edge m_edge;
+ GPRReg m_gprOrInvalid;
+};
+
+// Gives you a canonical Int52 (i.e. it's left-shifted by 16, low bits zero).
+class SpeculateInt52Operand {
+public:
+ explicit SpeculateInt52Operand(SpeculativeJIT* jit, Edge edge)
+ : m_jit(jit)
+ , m_edge(edge)
+ , m_gprOrInvalid(InvalidGPRReg)
+ {
+ RELEASE_ASSERT(edge.useKind() == Int52RepUse);
+ if (jit->isFilled(node()))
+ gpr();
+ }
+
+ ~SpeculateInt52Operand()
+ {
+ ASSERT(m_gprOrInvalid != InvalidGPRReg);
+ m_jit->unlock(m_gprOrInvalid);
+ }
+
+ Edge edge() const
+ {
+ return m_edge;
+ }
+
+ Node* node() const
+ {
+ return edge().node();
+ }
+
+ GPRReg gpr()
{
- return m_index;
+ if (m_gprOrInvalid == InvalidGPRReg)
+ m_gprOrInvalid = m_jit->fillSpeculateInt52(edge(), DataFormatInt52);
+ return m_gprOrInvalid;
+ }
+
+ void use()
+ {
+ m_jit->use(node());
}
+
+private:
+ SpeculativeJIT* m_jit;
+ Edge m_edge;
+ GPRReg m_gprOrInvalid;
+};
+// Gives you a strict Int52 (i.e. the payload is in the low 48 bits, high 16 bits are sign-extended).
+class SpeculateStrictInt52Operand {
+public:
+ explicit SpeculateStrictInt52Operand(SpeculativeJIT* jit, Edge edge)
+ : m_jit(jit)
+ , m_edge(edge)
+ , m_gprOrInvalid(InvalidGPRReg)
+ {
+ RELEASE_ASSERT(edge.useKind() == Int52RepUse);
+ if (jit->isFilled(node()))
+ gpr();
+ }
+
+ ~SpeculateStrictInt52Operand()
+ {
+ ASSERT(m_gprOrInvalid != InvalidGPRReg);
+ m_jit->unlock(m_gprOrInvalid);
+ }
+
+ Edge edge() const
+ {
+ return m_edge;
+ }
+
+ Node* node() const
+ {
+ return edge().node();
+ }
+
GPRReg gpr()
{
if (m_gprOrInvalid == InvalidGPRReg)
- m_gprOrInvalid = m_jit->fillSpeculateIntStrict(index());
+ m_gprOrInvalid = m_jit->fillSpeculateInt52(edge(), DataFormatStrictInt52);
+ return m_gprOrInvalid;
+ }
+
+ void use()
+ {
+ m_jit->use(node());
+ }
+
+private:
+ SpeculativeJIT* m_jit;
+ Edge m_edge;
+ GPRReg m_gprOrInvalid;
+};
+
+enum OppositeShiftTag { OppositeShift };
+
+class SpeculateWhicheverInt52Operand {
+public:
+ explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge)
+ : m_jit(jit)
+ , m_edge(edge)
+ , m_gprOrInvalid(InvalidGPRReg)
+ , m_strict(jit->betterUseStrictInt52(edge))
+ {
+ RELEASE_ASSERT(edge.useKind() == Int52RepUse);
+ if (jit->isFilled(node()))
+ gpr();
+ }
+
+ explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, const SpeculateWhicheverInt52Operand& other)
+ : m_jit(jit)
+ , m_edge(edge)
+ , m_gprOrInvalid(InvalidGPRReg)
+ , m_strict(other.m_strict)
+ {
+ RELEASE_ASSERT(edge.useKind() == Int52RepUse);
+ if (jit->isFilled(node()))
+ gpr();
+ }
+
+ explicit SpeculateWhicheverInt52Operand(SpeculativeJIT* jit, Edge edge, OppositeShiftTag, const SpeculateWhicheverInt52Operand& other)
+ : m_jit(jit)
+ , m_edge(edge)
+ , m_gprOrInvalid(InvalidGPRReg)
+ , m_strict(!other.m_strict)
+ {
+ RELEASE_ASSERT(edge.useKind() == Int52RepUse);
+ if (jit->isFilled(node()))
+ gpr();
+ }
+
+ ~SpeculateWhicheverInt52Operand()
+ {
+ ASSERT(m_gprOrInvalid != InvalidGPRReg);
+ m_jit->unlock(m_gprOrInvalid);
+ }
+
+ Edge edge() const
+ {
+ return m_edge;
+ }
+
+ Node* node() const
+ {
+ return edge().node();
+ }
+
+ GPRReg gpr()
+ {
+ if (m_gprOrInvalid == InvalidGPRReg) {
+ m_gprOrInvalid = m_jit->fillSpeculateInt52(
+ edge(), m_strict ? DataFormatStrictInt52 : DataFormatInt52);
+ }
return m_gprOrInvalid;
}
void use()
{
- m_jit->use(m_index);
+ m_jit->use(node());
+ }
+
+ DataFormat format() const
+ {
+ return m_strict ? DataFormatStrictInt52 : DataFormatInt52;
}
private:
SpeculativeJIT* m_jit;
- NodeIndex m_index;
+ Edge m_edge;
GPRReg m_gprOrInvalid;
+ bool m_strict;
};
class SpeculateDoubleOperand {
public:
- explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge use)
+ explicit SpeculateDoubleOperand(SpeculativeJIT* jit, Edge edge)
: m_jit(jit)
- , m_index(use.index())
+ , m_edge(edge)
, m_fprOrInvalid(InvalidFPRReg)
{
ASSERT(m_jit);
- ASSERT(use.useKind() == DoubleUse);
- if (jit->isFilled(m_index))
+ RELEASE_ASSERT(isDouble(edge.useKind()));
+ if (jit->isFilled(node()))
fpr();
}
ASSERT(m_fprOrInvalid != InvalidFPRReg);
m_jit->unlock(m_fprOrInvalid);
}
+
+ Edge edge() const
+ {
+ return m_edge;
+ }
- NodeIndex index() const
+ Node* node() const
{
- return m_index;
+ return edge().node();
}
FPRReg fpr()
{
if (m_fprOrInvalid == InvalidFPRReg)
- m_fprOrInvalid = m_jit->fillSpeculateDouble(index());
+ m_fprOrInvalid = m_jit->fillSpeculateDouble(edge());
return m_fprOrInvalid;
}
+
+ void use()
+ {
+ m_jit->use(node());
+ }
private:
SpeculativeJIT* m_jit;
- NodeIndex m_index;
+ Edge m_edge;
FPRReg m_fprOrInvalid;
};
class SpeculateCellOperand {
public:
- explicit SpeculateCellOperand(SpeculativeJIT* jit, Edge use)
+ explicit SpeculateCellOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
: m_jit(jit)
- , m_index(use.index())
+ , m_edge(edge)
, m_gprOrInvalid(InvalidGPRReg)
{
ASSERT(m_jit);
- ASSERT(use.useKind() != DoubleUse);
- if (jit->isFilled(m_index))
+ if (!edge)
+ return;
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || isCell(edge.useKind()));
+ if (jit->isFilled(node()))
gpr();
}
~SpeculateCellOperand()
{
+ if (!m_edge)
+ return;
ASSERT(m_gprOrInvalid != InvalidGPRReg);
m_jit->unlock(m_gprOrInvalid);
}
+
+ Edge edge() const
+ {
+ return m_edge;
+ }
- NodeIndex index() const
+ Node* node() const
{
- return m_index;
+ return edge().node();
}
GPRReg gpr()
{
+ ASSERT(m_edge);
if (m_gprOrInvalid == InvalidGPRReg)
- m_gprOrInvalid = m_jit->fillSpeculateCell(index());
+ m_gprOrInvalid = m_jit->fillSpeculateCell(edge());
return m_gprOrInvalid;
}
void use()
{
- m_jit->use(m_index);
+ ASSERT(m_edge);
+ m_jit->use(node());
}
private:
SpeculativeJIT* m_jit;
- NodeIndex m_index;
+ Edge m_edge;
GPRReg m_gprOrInvalid;
};
class SpeculateBooleanOperand {
public:
- explicit SpeculateBooleanOperand(SpeculativeJIT* jit, Edge use)
+ explicit SpeculateBooleanOperand(SpeculativeJIT* jit, Edge edge, OperandSpeculationMode mode = AutomaticOperandSpeculation)
: m_jit(jit)
- , m_index(use.index())
+ , m_edge(edge)
, m_gprOrInvalid(InvalidGPRReg)
{
ASSERT(m_jit);
- ASSERT(use.useKind() != DoubleUse);
- if (jit->isFilled(m_index))
+ ASSERT_UNUSED(mode, mode == ManualOperandSpeculation || edge.useKind() == BooleanUse);
+ if (jit->isFilled(node()))
gpr();
}
m_jit->unlock(m_gprOrInvalid);
}
- NodeIndex index() const
+ Edge edge() const
+ {
+ return m_edge;
+ }
+
+ Node* node() const
{
- return m_index;
+ return edge().node();
}
GPRReg gpr()
{
if (m_gprOrInvalid == InvalidGPRReg)
- m_gprOrInvalid = m_jit->fillSpeculateBoolean(index());
+ m_gprOrInvalid = m_jit->fillSpeculateBoolean(edge());
return m_gprOrInvalid;
}
void use()
{
- m_jit->use(m_index);
+ m_jit->use(node());
}
private:
SpeculativeJIT* m_jit;
- NodeIndex m_index;
+ Edge m_edge;
GPRReg m_gprOrInvalid;
};
-inline SpeculativeJIT::SpeculativeJIT(JITCompiler& jit)
- : m_compileOkay(true)
- , m_jit(jit)
- , m_compileIndex(0)
- , m_indexInBlock(0)
- , m_generationInfo(m_jit.codeBlock()->m_numCalleeRegisters)
- , m_blockHeads(jit.graph().m_blocks.size())
- , m_arguments(jit.codeBlock()->numParameters())
- , m_variables(jit.graph().m_localVars)
- , m_lastSetOperand(std::numeric_limits<int>::max())
- , m_state(m_jit.graph())
+template<typename StructureLocationType>
+void SpeculativeJIT::speculateStringObjectForStructure(Edge edge, StructureLocationType structureLocation)
{
+ Structure* stringObjectStructure =
+ m_jit.globalObjectFor(m_currentNode->origin.semantic)->stringObjectStructure();
+
+ if (!m_state.forNode(edge).m_structure.isSubsetOf(StructureSet(stringObjectStructure))) {
+ speculationCheck(
+ NotStringObject, JSValueRegs(), 0,
+ m_jit.branchStructurePtr(
+ JITCompiler::NotEqual, structureLocation, stringObjectStructure));
+ }
}
+#define DFG_TYPE_CHECK(source, edge, typesPassedThrough, jumpToFail) do { \
+ JSValueSource _dtc_source = (source); \
+ Edge _dtc_edge = (edge); \
+ SpeculatedType _dtc_typesPassedThrough = typesPassedThrough; \
+ if (!needsTypeCheck(_dtc_edge, _dtc_typesPassedThrough)) \
+ break; \
+ typeCheck(_dtc_source, _dtc_edge, _dtc_typesPassedThrough, (jumpToFail)); \
+ } while (0)
+
} } // namespace JSC::DFG
#endif