/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
* Copyright (C) 2011 Intel Corporation. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
#if ENABLE(DFG_JIT)
+#include "ArrayPrototype.h"
+#include "DFGAbstractInterpreterInlines.h"
+#include "DFGCallArrayAllocatorSlowPathGenerator.h"
+#include "DFGOperations.h"
+#include "DFGSlowPathGenerator.h"
+#include "Debugger.h"
+#include "DirectArguments.h"
+#include "GetterSetter.h"
+#include "JSEnvironmentRecord.h"
+#include "JSLexicalEnvironment.h"
+#include "JSPropertyNameEnumerator.h"
+#include "ObjectPrototype.h"
+#include "JSCInlines.h"
+#include "SetupVarargsFrame.h"
+#include "TypeProfilerLog.h"
+
namespace JSC { namespace DFG {
#if USE(JSVALUE32_64)
-GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat)
-{
- Node& node = at(nodeIndex);
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
-
- if (info.registerFormat() == DataFormatNone) {
- GPRReg gpr = allocate();
-
- if (node.hasConstant()) {
- m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- if (isInt32Constant(nodeIndex))
- m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
- else if (isNumberConstant(nodeIndex))
- ASSERT_NOT_REACHED();
- else {
- ASSERT(isJSConstant(nodeIndex));
- JSValue jsValue = valueOfJSConstant(nodeIndex);
- m_jit.move(MacroAssembler::Imm32(jsValue.payload()), gpr);
- }
- } else {
- ASSERT(info.spillFormat() == DataFormatJS || info.spillFormat() == DataFormatJSInteger || info.spillFormat() == DataFormatInteger);
- m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
- }
-
- info.fillInteger(gpr);
- returnFormat = DataFormatInteger;
- return gpr;
- }
-
- switch (info.registerFormat()) {
- case DataFormatNone:
- // Should have filled, above.
- case DataFormatJSDouble:
- case DataFormatDouble:
- case DataFormatJS:
- case DataFormatCell:
- case DataFormatJSCell:
- case DataFormatBoolean:
- case DataFormatJSBoolean:
- case DataFormatStorage:
- // Should only be calling this function if we know this operand to be integer.
- ASSERT_NOT_REACHED();
-
- case DataFormatJSInteger: {
- GPRReg tagGPR = info.tagGPR();
- GPRReg payloadGPR = info.payloadGPR();
- m_gprs.lock(tagGPR);
- m_jit.jitAssertIsJSInt32(tagGPR);
- m_gprs.unlock(tagGPR);
- m_gprs.lock(payloadGPR);
- m_gprs.release(tagGPR);
- m_gprs.release(payloadGPR);
- m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger);
- info.fillInteger(payloadGPR);
- returnFormat = DataFormatInteger;
- return payloadGPR;
- }
-
- case DataFormatInteger: {
- GPRReg gpr = info.gpr();
- m_gprs.lock(gpr);
- m_jit.jitAssertIsInt32(gpr);
- returnFormat = DataFormatInteger;
- return gpr;
- }
- }
-
- ASSERT_NOT_REACHED();
- return InvalidGPRReg;
-}
-
-FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex)
-{
- Node& node = at(nodeIndex);
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
-
- if (info.registerFormat() == DataFormatNone) {
-
- if (node.hasConstant()) {
- if (isInt32Constant(nodeIndex)) {
- // FIXME: should not be reachable?
- GPRReg gpr = allocate();
- m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
- m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- info.fillInteger(gpr);
- unlock(gpr);
- } else if (isNumberConstant(nodeIndex)) {
- FPRReg fpr = fprAllocate();
- m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), fpr);
- m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
- info.fillDouble(fpr);
- return fpr;
- } else {
- // FIXME: should not be reachable?
- ASSERT_NOT_REACHED();
- }
- } else {
- DataFormat spillFormat = info.spillFormat();
- ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
- if (spillFormat == DataFormatJSDouble) {
- FPRReg fpr = fprAllocate();
- m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
- m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
- info.fillDouble(fpr);
- return fpr;
- }
-
- FPRReg fpr = fprAllocate();
- JITCompiler::Jump hasUnboxedDouble;
-
- if (spillFormat != DataFormatJSInteger && spillFormat != DataFormatInteger) {
- JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag));
- m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
- hasUnboxedDouble = m_jit.jump();
- isInteger.link(&m_jit);
- }
-
- m_jit.convertInt32ToDouble(JITCompiler::payloadFor(virtualRegister), fpr);
-
- if (hasUnboxedDouble.isSet())
- hasUnboxedDouble.link(&m_jit);
-
- m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
- info.fillDouble(fpr);
- return fpr;
- }
- }
-
- switch (info.registerFormat()) {
- case DataFormatNone:
- // Should have filled, above.
- case DataFormatCell:
- case DataFormatJSCell:
- case DataFormatBoolean:
- case DataFormatJSBoolean:
- case DataFormatStorage:
- // Should only be calling this function if we know this operand to be numeric.
- ASSERT_NOT_REACHED();
-
- case DataFormatJSInteger:
- case DataFormatJS: {
- GPRReg tagGPR = info.tagGPR();
- GPRReg payloadGPR = info.payloadGPR();
- FPRReg fpr = fprAllocate();
- m_gprs.lock(tagGPR);
- m_gprs.lock(payloadGPR);
-
- JITCompiler::Jump hasUnboxedDouble;
-
- if (info.registerFormat() != DataFormatJSInteger) {
- FPRTemporary scratch(this);
- JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
- m_jit.jitAssertIsJSDouble(tagGPR);
- unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
- hasUnboxedDouble = m_jit.jump();
- isInteger.link(&m_jit);
- }
-
- m_jit.convertInt32ToDouble(payloadGPR, fpr);
-
- if (hasUnboxedDouble.isSet())
- hasUnboxedDouble.link(&m_jit);
-
- m_gprs.release(tagGPR);
- m_gprs.release(payloadGPR);
- m_gprs.unlock(tagGPR);
- m_gprs.unlock(payloadGPR);
- m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
- info.fillDouble(fpr);
- info.killSpilled();
- return fpr;
- }
-
- case DataFormatInteger: {
- FPRReg fpr = fprAllocate();
- GPRReg gpr = info.gpr();
- m_gprs.lock(gpr);
- m_jit.convertInt32ToDouble(gpr, fpr);
- m_gprs.unlock(gpr);
- return fpr;
- }
-
- case DataFormatJSDouble:
- case DataFormatDouble: {
- FPRReg fpr = info.fpr();
- m_fprs.lock(fpr);
- return fpr;
- }
- }
-
- ASSERT_NOT_REACHED();
- return InvalidFPRReg;
-}
-
-bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr)
+bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr)
{
// FIXME: For double we could fill with a FPR.
UNUSED_PARAM(fpr);
- Node& node = at(nodeIndex);
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
+ VirtualRegister virtualRegister = edge->virtualRegister();
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
switch (info.registerFormat()) {
case DataFormatNone: {
- if (node.hasConstant()) {
+ if (edge->hasConstant()) {
tagGPR = allocate();
payloadGPR = allocate();
- m_jit.move(Imm32(valueOfJSConstant(nodeIndex).tag()), tagGPR);
- m_jit.move(Imm32(valueOfJSConstant(nodeIndex).payload()), payloadGPR);
+ JSValue value = edge->asJSValue();
+ m_jit.move(Imm32(value.tag()), tagGPR);
+ m_jit.move(Imm32(value.payload()), payloadGPR);
m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant);
m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant);
- info.fillJSValue(tagGPR, payloadGPR, isInt32Constant(nodeIndex) ? DataFormatJSInteger : DataFormatJS);
+ info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS);
} else {
DataFormat spillFormat = info.spillFormat();
ASSERT(spillFormat != DataFormatNone && spillFormat != DataFormatStorage);
tagGPR = allocate();
payloadGPR = allocate();
switch (spillFormat) {
- case DataFormatInteger:
+ case DataFormatInt32:
m_jit.move(TrustedImm32(JSValue::Int32Tag), tagGPR);
- spillFormat = DataFormatJSInteger; // This will be used as the new register format.
+ spillFormat = DataFormatJSInt32; // This will be used as the new register format.
break;
case DataFormatCell:
m_jit.move(TrustedImm32(JSValue::CellTag), tagGPR);
m_jit.load32(JITCompiler::payloadFor(virtualRegister), payloadGPR);
m_gprs.retain(tagGPR, virtualRegister, SpillOrderSpilled);
m_gprs.retain(payloadGPR, virtualRegister, SpillOrderSpilled);
- info.fillJSValue(tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat);
+ info.fillJSValue(*m_stream, tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat);
}
return true;
}
- case DataFormatInteger:
+ case DataFormatInt32:
case DataFormatCell:
case DataFormatBoolean: {
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
}
tagGPR = allocate();
- uint32_t tag = JSValue::EmptyValueTag;
+ int32_t tag = JSValue::EmptyValueTag;
DataFormat fillFormat = DataFormatJS;
switch (info.registerFormat()) {
- case DataFormatInteger:
+ case DataFormatInt32:
tag = JSValue::Int32Tag;
- fillFormat = DataFormatJSInteger;
+ fillFormat = DataFormatJSInt32;
break;
case DataFormatCell:
tag = JSValue::CellTag;
fillFormat = DataFormatJSBoolean;
break;
default:
- ASSERT_NOT_REACHED();
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
m_jit.move(TrustedImm32(tag), tagGPR);
m_gprs.release(gpr);
m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS);
m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS);
- info.fillJSValue(tagGPR, payloadGPR, fillFormat);
+ info.fillJSValue(*m_stream, tagGPR, payloadGPR, fillFormat);
return true;
}
case DataFormatJSDouble:
- case DataFormatDouble: {
- FPRReg oldFPR = info.fpr();
- m_fprs.lock(oldFPR);
- tagGPR = allocate();
- payloadGPR = allocate();
- boxDouble(oldFPR, tagGPR, payloadGPR);
- m_fprs.unlock(oldFPR);
- m_fprs.release(oldFPR);
- m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS);
- m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS);
- info.fillJSValue(tagGPR, payloadGPR, DataFormatJS);
- return true;
- }
-
case DataFormatJS:
- case DataFormatJSInteger:
+ case DataFormatJSInt32:
case DataFormatJSCell:
case DataFormatJSBoolean: {
tagGPR = info.tagGPR();
}
case DataFormatStorage:
+ case DataFormatDouble:
// this type currently never occurs
- ASSERT_NOT_REACHED();
- }
-
- ASSERT_NOT_REACHED();
- return true;
-}
+ RELEASE_ASSERT_NOT_REACHED();
-void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node)
-{
- if (isKnownNumeric(node.child1().index())) {
- JSValueOperand op1(this, node.child1());
- op1.fill();
- if (op1.isDouble()) {
- FPRTemporary result(this, op1);
- m_jit.moveDouble(op1.fpr(), result.fpr());
- doubleResult(result.fpr(), m_compileIndex);
- } else {
- GPRTemporary resultTag(this, op1);
- GPRTemporary resultPayload(this, op1, false);
- m_jit.move(op1.tagGPR(), resultTag.gpr());
- m_jit.move(op1.payloadGPR(), resultPayload.gpr());
- jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
- }
- return;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return true;
}
-
- JSValueOperand op1(this, node.child1());
- GPRTemporary resultTag(this, op1);
- GPRTemporary resultPayload(this, op1, false);
-
- ASSERT(!isInt32Constant(node.child1().index()));
- ASSERT(!isNumberConstant(node.child1().index()));
-
- GPRReg tagGPR = op1.tagGPR();
- GPRReg payloadGPR = op1.payloadGPR();
- GPRReg resultTagGPR = resultTag.gpr();
- GPRReg resultPayloadGPR = resultPayload.gpr();
- op1.use();
-
- JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
- JITCompiler::Jump nonNumeric = m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag));
-
- // First, if we get here we have a double encoded as a JSValue
- JITCompiler::Jump hasUnboxedDouble = m_jit.jump();
-
- // Next handle cells (& other JS immediates)
- nonNumeric.link(&m_jit);
- silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
- callOperation(dfgConvertJSValueToNumber, FPRInfo::returnValueFPR, tagGPR, payloadGPR);
- boxDouble(FPRInfo::returnValueFPR, resultTagGPR, resultPayloadGPR);
- silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
- JITCompiler::Jump hasCalledToNumber = m_jit.jump();
-
- // Finally, handle integers.
- isInteger.link(&m_jit);
- hasUnboxedDouble.link(&m_jit);
- m_jit.move(tagGPR, resultTagGPR);
- m_jit.move(payloadGPR, resultPayloadGPR);
- hasCalledToNumber.link(&m_jit);
- jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
}
-void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node)
+void SpeculativeJIT::cachedGetById(
+ CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR,
+ unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
- ASSERT(!isInt32Constant(node.child1().index()));
-
- if (isKnownInteger(node.child1().index())) {
- IntegerOperand op1(this, node.child1());
- GPRTemporary result(this, op1);
- m_jit.move(op1.gpr(), result.gpr());
- integerResult(result.gpr(), m_compileIndex);
- return;
- }
-
- GenerationInfo& childInfo = m_generationInfo[at(node.child1()).virtualRegister()];
- if (childInfo.isJSDouble()) {
- DoubleOperand op1(this, node.child1());
- GPRTemporary result(this);
- FPRReg fpr = op1.fpr();
- GPRReg gpr = result.gpr();
- op1.use();
- JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful);
-
- silentSpillAllRegisters(gpr);
- callOperation(toInt32, gpr, fpr);
- silentFillAllRegisters(gpr);
-
- truncatedToInteger.link(&m_jit);
- integerResult(gpr, m_compileIndex, UseChildrenCalledExplicitly);
- return;
+ // This is a hacky fix for when the register allocator decides to alias the base payload with the result tag. This only happens
+ // in the case of GetByIdFlush, which has a relatively expensive register allocation story already so we probably don't need to
+ // trip over one move instruction.
+ if (basePayloadGPR == resultTagGPR) {
+ RELEASE_ASSERT(basePayloadGPR != resultPayloadGPR);
+
+ if (baseTagGPROrNone == resultPayloadGPR) {
+ m_jit.swap(basePayloadGPR, baseTagGPROrNone);
+ baseTagGPROrNone = resultTagGPR;
+ } else
+ m_jit.move(basePayloadGPR, resultPayloadGPR);
+ basePayloadGPR = resultPayloadGPR;
}
-
- JSValueOperand op1(this, node.child1());
- GPRTemporary result(this);
- GPRReg tagGPR = op1.tagGPR();
- GPRReg payloadGPR = op1.payloadGPR();
- GPRReg resultGPR = result.gpr();
- op1.use();
-
- JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
-
- // First handle non-integers
- silentSpillAllRegisters(resultGPR);
- callOperation(dfgConvertJSValueToInt32, GPRInfo::returnValueGPR, tagGPR, payloadGPR);
- m_jit.move(GPRInfo::returnValueGPR, resultGPR);
- silentFillAllRegisters(resultGPR);
- JITCompiler::Jump hasCalledToInt32 = m_jit.jump();
-
- // Then handle integers.
- isInteger.link(&m_jit);
- m_jit.move(payloadGPR, resultGPR);
- hasCalledToInt32.link(&m_jit);
- integerResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
-}
-
-void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
-{
- IntegerOperand op1(this, node.child1());
- FPRTemporary boxer(this);
- GPRTemporary resultTag(this, op1);
- GPRTemporary resultPayload(this);
-
- JITCompiler::Jump positive = m_jit.branch32(MacroAssembler::GreaterThanOrEqual, op1.gpr(), TrustedImm32(0));
-
- m_jit.convertInt32ToDouble(op1.gpr(), boxer.fpr());
- m_jit.move(JITCompiler::TrustedImmPtr(&AssemblyHelpers::twoToThe32), resultPayload.gpr()); // reuse resultPayload register here.
- m_jit.addDouble(JITCompiler::Address(resultPayload.gpr(), 0), boxer.fpr());
-
- boxDouble(boxer.fpr(), resultTag.gpr(), resultPayload.gpr());
-
- JITCompiler::Jump done = m_jit.jump();
-
- positive.link(&m_jit);
-
- m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTag.gpr());
- m_jit.move(op1.gpr(), resultPayload.gpr());
-
- done.link(&m_jit);
-
- jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
-}
-
-JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
-{
- JITCompiler::DataLabelPtr structureToCompare;
- JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
- m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR);
- JITCompiler::DataLabelCompact tagLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- JITCompiler::DataLabelCompact payloadLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+ JITGetByIdGenerator gen(
+ m_jit.codeBlock(), codeOrigin, usedRegisters(),
+ JSValueRegs(baseTagGPROrNone, basePayloadGPR),
+ JSValueRegs(resultTagGPR, resultPayloadGPR), spillMode);
- JITCompiler::Jump done = m_jit.jump();
-
- structureCheck.m_jump.link(&m_jit);
+ gen.generateFastPath(m_jit);
+ JITCompiler::JumpList slowCases;
if (slowPathTarget.isSet())
- slowPathTarget.link(&m_jit);
-
- JITCompiler::Label slowCase = m_jit.label();
-
- if (spillMode == NeedToSpill)
- silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
- JITCompiler::Call functionCall;
- if (baseTagGPROrNone == InvalidGPRReg)
- functionCall = callOperation(operationGetByIdOptimize, resultTagGPR, resultPayloadGPR, JSValue::CellTag, basePayloadGPR, identifier(identifierNumber));
- else
- functionCall = callOperation(operationGetByIdOptimize, resultTagGPR, resultPayloadGPR, baseTagGPROrNone, basePayloadGPR, identifier(identifierNumber));
- if (spillMode == NeedToSpill)
- silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
-
- done.link(&m_jit);
-
- JITCompiler::Label doneLabel = m_jit.label();
+ slowCases.append(slowPathTarget);
+ slowCases.append(gen.slowPathJump());
+
+ std::unique_ptr<SlowPathGenerator> slowPath;
+ if (baseTagGPROrNone == InvalidGPRReg) {
+ slowPath = slowPathCall(
+ slowCases, this, operationGetByIdOptimize,
+ JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(),
+ static_cast<int32_t>(JSValue::CellTag), basePayloadGPR,
+ identifierUID(identifierNumber));
+ } else {
+ slowPath = slowPathCall(
+ slowCases, this, operationGetByIdOptimize,
+ JSValueRegs(resultTagGPR, resultPayloadGPR), gen.stubInfo(), baseTagGPROrNone,
+ basePayloadGPR, identifierUID(identifierNumber));
+ }
- m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, tagLoadWithPatch, payloadLoadWithPatch, slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR), safeCast<int8_t>(resultPayloadGPR), safeCast<int8_t>(scratchGPR), spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
-
- return functionCall;
+ m_jit.addGetById(gen, slowPath.get());
+ addSlowPathGenerator(WTF::move(slowPath));
}
-void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
+void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
- JITCompiler::DataLabelPtr structureToCompare;
- JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
-
- writeBarrier(basePayloadGPR, valueTagGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR);
-
- m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), scratchGPR);
- JITCompiler::DataLabel32 tagStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valueTagGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- JITCompiler::DataLabel32 payloadStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valuePayloadGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
-
- JITCompiler::Jump done = m_jit.jump();
-
- structureCheck.m_jump.link(&m_jit);
-
+ JITPutByIdGenerator gen(
+ m_jit.codeBlock(), codeOrigin, usedRegisters(),
+ JSValueRegs::payloadOnly(basePayloadGPR), JSValueRegs(valueTagGPR, valuePayloadGPR),
+ scratchGPR, spillMode, m_jit.ecmaModeFor(codeOrigin), putKind);
+
+ gen.generateFastPath(m_jit);
+
+ JITCompiler::JumpList slowCases;
if (slowPathTarget.isSet())
- slowPathTarget.link(&m_jit);
-
- JITCompiler::Label slowCase = m_jit.label();
-
- silentSpillAllRegisters(InvalidGPRReg);
- V_DFGOperation_EJCI optimizedCall;
- if (m_jit.strictModeFor(at(m_compileIndex).codeOrigin)) {
- if (putKind == Direct)
- optimizedCall = operationPutByIdDirectStrictOptimize;
- else
- optimizedCall = operationPutByIdStrictOptimize;
- } else {
- if (putKind == Direct)
- optimizedCall = operationPutByIdDirectNonStrictOptimize;
- else
- optimizedCall = operationPutByIdNonStrictOptimize;
- }
- JITCompiler::Call functionCall = callOperation(optimizedCall, valueTagGPR, valuePayloadGPR, basePayloadGPR, identifier(identifierNumber));
- silentFillAllRegisters(InvalidGPRReg);
+ slowCases.append(slowPathTarget);
+ slowCases.append(gen.slowPathJump());
- done.link(&m_jit);
- JITCompiler::Label doneLabel = m_jit.label();
+ auto slowPath = slowPathCall(
+ slowCases, this, gen.slowPathFunction(), NoResult, gen.stubInfo(), valueTagGPR,
+ valuePayloadGPR, basePayloadGPR, identifierUID(identifierNumber));
- m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(tagStoreWithPatch.label()), JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()), slowCase, doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR), safeCast<int8_t>(scratchGPR)));
+ m_jit.addPutById(gen, slowPath.get());
+ addSlowPathGenerator(WTF::move(slowPath));
}
void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert)
GPRReg argTagGPR = arg.tagGPR();
GPRReg argPayloadGPR = arg.payloadGPR();
- GPRTemporary resultPayload(this, arg, false);
+ GPRTemporary resultPayload(this, Reuse, arg, PayloadWord);
GPRReg resultPayloadGPR = resultPayload.gpr();
JITCompiler::Jump notCell;
- if (!isKnownCell(operand.index()))
- notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));
-
- m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultPayloadGPR);
- m_jit.test8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultPayloadGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), resultPayloadGPR);
-
- if (!isKnownCell(operand.index())) {
+ JITCompiler::Jump notMasqueradesAsUndefined;
+ if (masqueradesAsUndefinedWatchpointIsStillValid()) {
+ if (!isKnownCell(operand.node()))
+ notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
+
+ m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR);
+ notMasqueradesAsUndefined = m_jit.jump();
+ } else {
+ GPRTemporary localGlobalObject(this);
+ GPRTemporary remoteGlobalObject(this);
+
+ if (!isKnownCell(operand.node()))
+ notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
+
+ JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(
+ JITCompiler::NonZero,
+ JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()),
+ JITCompiler::TrustedImm32(MasqueradesAsUndefined));
+
+ m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR);
+ notMasqueradesAsUndefined = m_jit.jump();
+
+ isMasqueradesAsUndefined.link(&m_jit);
+ GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
+ GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
+ m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
+ m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultPayloadGPR);
+ m_jit.loadPtr(JITCompiler::Address(resultPayloadGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
+ m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultPayloadGPR);
+ }
+
+ if (!isKnownCell(operand.node())) {
JITCompiler::Jump done = m_jit.jump();
notCell.link(&m_jit);
// null or undefined?
COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
- m_jit.move(argTagGPR, resultPayloadGPR);
- m_jit.or32(TrustedImm32(1), resultPayloadGPR);
+ m_jit.or32(TrustedImm32(1), argTagGPR, resultPayloadGPR);
m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultPayloadGPR, TrustedImm32(JSValue::NullTag), resultPayloadGPR);
done.link(&m_jit);
}
- booleanResult(resultPayloadGPR, m_compileIndex);
+ notMasqueradesAsUndefined.link(&m_jit);
+
+ booleanResult(resultPayloadGPR, m_currentNode);
}
-void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex branchNodeIndex, bool invert)
+void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert)
{
- Node& branchNode = at(branchNodeIndex);
- BlockIndex taken = branchNode.takenBlockIndex();
- BlockIndex notTaken = branchNode.notTakenBlockIndex();
+ BasicBlock* taken = branchNode->branchData()->taken.block;
+ BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
invert = !invert;
- BlockIndex tmp = taken;
+ BasicBlock* tmp = taken;
taken = notTaken;
notTaken = tmp;
}
GPRReg argTagGPR = arg.tagGPR();
GPRReg argPayloadGPR = arg.payloadGPR();
- GPRTemporary result(this, arg);
+ GPRTemporary result(this, Reuse, arg, TagWord);
GPRReg resultGPR = result.gpr();
-
+
JITCompiler::Jump notCell;
-
- if (!isKnownCell(operand.index()))
- notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag));
-
- m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultGPR);
- branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), taken);
-
- if (!isKnownCell(operand.index())) {
+
+ if (masqueradesAsUndefinedWatchpointIsStillValid()) {
+ if (!isKnownCell(operand.node()))
+ notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
+
+ jump(invert ? taken : notTaken, ForceJump);
+ } else {
+ GPRTemporary localGlobalObject(this);
+ GPRTemporary remoteGlobalObject(this);
+
+ if (!isKnownCell(operand.node()))
+ notCell = m_jit.branchIfNotCell(arg.jsValueRegs());
+
+ branchTest8(JITCompiler::Zero,
+ JITCompiler::Address(argPayloadGPR, JSCell::typeInfoFlagsOffset()),
+ JITCompiler::TrustedImm32(MasqueradesAsUndefined),
+ invert ? taken : notTaken);
+
+ GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
+ GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
+ m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic)), localGlobalObjectGPR);
+ m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureIDOffset()), resultGPR);
+ m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR);
+ branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken);
+ }
+
+ if (!isKnownCell(operand.node())) {
jump(notTaken, ForceJump);
notCell.link(&m_jit);
// null or undefined?
COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
- m_jit.move(argTagGPR, resultGPR);
- m_jit.or32(TrustedImm32(1), resultGPR);
+ m_jit.or32(TrustedImm32(1), argTagGPR, resultGPR);
branch32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, resultGPR, JITCompiler::TrustedImm32(JSValue::NullTag), taken);
}
jump(notTaken);
}
-bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, Edge operand, bool invert)
+bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool invert)
{
unsigned branchIndexInBlock = detectPeepHoleBranch();
if (branchIndexInBlock != UINT_MAX) {
- NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock);
+ Node* branchNode = m_block->at(branchIndexInBlock);
- ASSERT(node.adjustedRefCount() == 1);
+ ASSERT(node->adjustedRefCount() == 1);
- nonSpeculativePeepholeBranchNull(operand, branchNodeIndex, invert);
+ nonSpeculativePeepholeBranchNull(operand, branchNode, invert);
- use(node.child1());
- use(node.child2());
+ use(node->child1());
+ use(node->child2());
m_indexInBlock = branchIndexInBlock;
- m_compileIndex = branchNodeIndex;
+ m_currentNode = branchNode;
return true;
}
return false;
}
-void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
+void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
{
- Node& branchNode = at(branchNodeIndex);
- BlockIndex taken = branchNode.takenBlockIndex();
- BlockIndex notTaken = branchNode.notTakenBlockIndex();
+ BasicBlock* taken = branchNode->branchData()->taken.block;
+ BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero;
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
cond = JITCompiler::invert(cond);
callResultCondition = JITCompiler::Zero;
- BlockIndex tmp = taken;
+ BasicBlock* tmp = taken;
taken = notTaken;
notTaken = tmp;
}
- JSValueOperand arg1(this, node.child1());
- JSValueOperand arg2(this, node.child2());
+ JSValueOperand arg1(this, node->child1());
+ JSValueOperand arg2(this, node->child2());
GPRReg arg1TagGPR = arg1.tagGPR();
GPRReg arg1PayloadGPR = arg1.payloadGPR();
GPRReg arg2TagGPR = arg2.tagGPR();
JITCompiler::JumpList slowPath;
- if (isKnownNotInteger(node.child1().index()) || isKnownNotInteger(node.child2().index())) {
- GPRResult result(this);
+ if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
+ GPRFlushedCallResult result(this);
GPRReg resultGPR = result.gpr();
arg1.use();
arg1.use();
arg2.use();
- if (!isKnownInteger(node.child1().index()))
+ if (!isKnownInteger(node->child1().node()))
slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
- if (!isKnownInteger(node.child2().index()))
+ if (!isKnownInteger(node->child2().node()))
slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
branch32(cond, arg1PayloadGPR, arg2PayloadGPR, taken);
- if (!isKnownInteger(node.child1().index()) || !isKnownInteger(node.child2().index())) {
+ if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
jump(notTaken, ForceJump);
slowPath.link(&m_jit);
jump(notTaken);
- m_indexInBlock = m_jit.graph().m_blocks[m_block]->size() - 1;
- m_compileIndex = branchNodeIndex;
+ m_indexInBlock = m_block->size() - 1;
+ m_currentNode = branchNode;
}
-void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction)
+template<typename JumpType>
+class CompareAndBoxBooleanSlowPathGenerator
+ : public CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg> {
+public:
+ CompareAndBoxBooleanSlowPathGenerator(
+ JumpType from, SpeculativeJIT* jit,
+ S_JITOperation_EJJ function, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload,
+ GPRReg arg2Tag, GPRReg arg2Payload)
+ : CallSlowPathGenerator<JumpType, S_JITOperation_EJJ, GPRReg>(
+ from, jit, function, NeedToSpill, result)
+ , m_arg1Tag(arg1Tag)
+ , m_arg1Payload(arg1Payload)
+ , m_arg2Tag(arg2Tag)
+ , m_arg2Payload(arg2Payload)
+ {
+ }
+
+protected:
+ virtual void generateInternal(SpeculativeJIT* jit)
+ {
+ this->setUp(jit);
+ this->recordCall(
+ jit->callOperation(
+ this->m_function, this->m_result, m_arg1Tag, m_arg1Payload, m_arg2Tag,
+ m_arg2Payload));
+ jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result);
+ this->tearDown(jit);
+ }
+
+private:
+ GPRReg m_arg1Tag;
+ GPRReg m_arg1Payload;
+ GPRReg m_arg2Tag;
+ GPRReg m_arg2Payload;
+};
+
+void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_JITOperation_EJJ helperFunction)
{
- JSValueOperand arg1(this, node.child1());
- JSValueOperand arg2(this, node.child2());
+ JSValueOperand arg1(this, node->child1());
+ JSValueOperand arg2(this, node->child2());
GPRReg arg1TagGPR = arg1.tagGPR();
GPRReg arg1PayloadGPR = arg1.payloadGPR();
GPRReg arg2TagGPR = arg2.tagGPR();
JITCompiler::JumpList slowPath;
- if (isKnownNotInteger(node.child1().index()) || isKnownNotInteger(node.child2().index())) {
- GPRResult result(this);
+ if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) {
+ GPRFlushedCallResult result(this);
GPRReg resultPayloadGPR = result.gpr();
arg1.use();
flushRegisters();
callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
- booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
} else {
- GPRTemporary resultPayload(this, arg1, false);
+ GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
GPRReg resultPayloadGPR = resultPayload.gpr();
arg1.use();
arg2.use();
- if (!isKnownInteger(node.child1().index()))
+ if (!isKnownInteger(node->child1().node()))
slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
- if (!isKnownInteger(node.child2().index()))
+ if (!isKnownInteger(node->child2().node()))
slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)));
m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR);
- if (!isKnownInteger(node.child1().index()) || !isKnownInteger(node.child2().index())) {
- JITCompiler::Jump haveResult = m_jit.jump();
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(resultPayloadGPR);
- callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR);
- silentFillAllRegisters(resultPayloadGPR);
-
- m_jit.andPtr(TrustedImm32(1), resultPayloadGPR);
-
- haveResult.link(&m_jit);
+ if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) {
+ addSlowPathGenerator(std::make_unique<CompareAndBoxBooleanSlowPathGenerator<JITCompiler::JumpList>>(
+ slowPath, this, helperFunction, resultPayloadGPR, arg1TagGPR,
+ arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR));
}
- booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
}
}
-void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branchNodeIndex, bool invert)
+void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert)
{
- Node& branchNode = at(branchNodeIndex);
- BlockIndex taken = branchNode.takenBlockIndex();
- BlockIndex notTaken = branchNode.notTakenBlockIndex();
+ BasicBlock* taken = branchNode->branchData()->taken.block;
+ BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
// The branch instruction will branch to the taken block.
// If taken is next, switch taken with notTaken & invert the branch condition so we can fall through.
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
invert = !invert;
- BlockIndex tmp = taken;
+ BasicBlock* tmp = taken;
taken = notTaken;
notTaken = tmp;
}
- JSValueOperand arg1(this, node.child1());
- JSValueOperand arg2(this, node.child2());
+ JSValueOperand arg1(this, node->child1());
+ JSValueOperand arg2(this, node->child2());
GPRReg arg1TagGPR = arg1.tagGPR();
GPRReg arg1PayloadGPR = arg1.payloadGPR();
GPRReg arg2TagGPR = arg2.tagGPR();
GPRReg arg2PayloadGPR = arg2.payloadGPR();
- GPRTemporary resultPayload(this, arg1, false);
+ GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
GPRReg resultPayloadGPR = resultPayload.gpr();
arg1.use();
arg2.use();
- if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) {
+ if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
// see if we get lucky: if the arguments are cells and they reference the same
// cell, then they must be strictly equal.
branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR, invert ? notTaken : taken);
jump(notTaken);
}
-void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert)
+void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert)
{
- JSValueOperand arg1(this, node.child1());
- JSValueOperand arg2(this, node.child2());
+ JSValueOperand arg1(this, node->child1());
+ JSValueOperand arg2(this, node->child2());
GPRReg arg1TagGPR = arg1.tagGPR();
GPRReg arg1PayloadGPR = arg1.payloadGPR();
GPRReg arg2TagGPR = arg2.tagGPR();
GPRReg arg2PayloadGPR = arg2.payloadGPR();
- GPRTemporary resultPayload(this, arg1, false);
+ GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
GPRReg resultPayloadGPR = resultPayload.gpr();
arg1.use();
arg2.use();
- if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) {
+ if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) {
// see if we get lucky: if the arguments are cells and they reference the same
// cell, then they must be strictly equal.
+ // FIXME: this should flush registers instead of silent spill/fill.
JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1PayloadGPR, arg2PayloadGPR);
m_jit.move(JITCompiler::TrustedImm32(!invert), resultPayloadGPR);
m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR);
}
- booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
}
-void SpeculativeJIT::emitCall(Node& node)
+void SpeculativeJIT::compileMiscStrictEq(Node* node)
{
- P_DFGOperation_E slowCallFunction;
+ JSValueOperand op1(this, node->child1(), ManualOperandSpeculation);
+ JSValueOperand op2(this, node->child2(), ManualOperandSpeculation);
+ GPRTemporary result(this);
+
+ if (node->child1().useKind() == MiscUse)
+ speculateMisc(node->child1(), op1.jsValueRegs());
+ if (node->child2().useKind() == MiscUse)
+ speculateMisc(node->child2(), op2.jsValueRegs());
+
+ m_jit.move(TrustedImm32(0), result.gpr());
+ JITCompiler::Jump notEqual = m_jit.branch32(JITCompiler::NotEqual, op1.tagGPR(), op2.tagGPR());
+ m_jit.compare32(JITCompiler::Equal, op1.payloadGPR(), op2.payloadGPR(), result.gpr());
+ notEqual.link(&m_jit);
+ booleanResult(result.gpr(), node);
+}
- if (node.op() == Call)
- slowCallFunction = operationLinkCall;
- else {
- ASSERT(node.op() == Construct);
- slowCallFunction = operationLinkConstruct;
+void SpeculativeJIT::emitCall(Node* node)
+{
+ CallLinkInfo::CallType callType;
+ bool isVarargs = false;
+ bool isForwardVarargs = false;
+ switch (node->op()) {
+ case Call:
+ callType = CallLinkInfo::Call;
+ break;
+ case Construct:
+ callType = CallLinkInfo::Construct;
+ break;
+ case CallVarargs:
+ callType = CallLinkInfo::CallVarargs;
+ isVarargs = true;
+ break;
+ case ConstructVarargs:
+ callType = CallLinkInfo::ConstructVarargs;
+ isVarargs = true;
+ break;
+ case CallForwardVarargs:
+ callType = CallLinkInfo::CallVarargs;
+ isForwardVarargs = true;
+ break;
+ case ConstructForwardVarargs:
+ callType = CallLinkInfo::ConstructVarargs;
+ isForwardVarargs = true;
+ break;
+ default:
+ DFG_CRASH(m_jit.graph(), node, "bad node type");
+ break;
}
- // For constructors, the this argument is not passed but we have to make space
- // for it.
- int dummyThisArgument = node.op() == Call ? 0 : 1;
+ Edge calleeEdge = m_jit.graph().child(node, 0);
+
+ // Gotta load the arguments somehow. Varargs is trickier.
+ if (isVarargs || isForwardVarargs) {
+ CallVarargsData* data = node->callVarargsData();
+
+ GPRReg resultGPR;
+ unsigned numUsedStackSlots = m_jit.graph().m_nextMachineLocal;
+
+ if (isForwardVarargs) {
+ flushRegisters();
+ use(node->child2());
+
+ GPRReg scratchGPR1;
+ GPRReg scratchGPR2;
+ GPRReg scratchGPR3;
+
+ scratchGPR1 = JITCompiler::selectScratchGPR();
+ scratchGPR2 = JITCompiler::selectScratchGPR(scratchGPR1);
+ scratchGPR3 = JITCompiler::selectScratchGPR(scratchGPR1, scratchGPR2);
+
+ m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR2);
+ JITCompiler::JumpList slowCase;
+ emitSetupVarargsFrameFastCase(m_jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase);
+ JITCompiler::Jump done = m_jit.jump();
+ slowCase.link(&m_jit);
+ callOperation(operationThrowStackOverflowForVarargs);
+ m_jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow);
+ done.link(&m_jit);
+ resultGPR = scratchGPR2;
+ } else {
+ GPRReg argumentsPayloadGPR;
+ GPRReg argumentsTagGPR;
+ GPRReg scratchGPR1;
+ GPRReg scratchGPR2;
+ GPRReg scratchGPR3;
+
+ auto loadArgumentsGPR = [&] (GPRReg reservedGPR) {
+ if (reservedGPR != InvalidGPRReg)
+ lock(reservedGPR);
+ JSValueOperand arguments(this, node->child2());
+ argumentsTagGPR = arguments.tagGPR();
+ argumentsPayloadGPR = arguments.payloadGPR();
+ if (reservedGPR != InvalidGPRReg)
+ unlock(reservedGPR);
+ flushRegisters();
+
+ scratchGPR1 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, reservedGPR);
+ scratchGPR2 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, reservedGPR);
+ scratchGPR3 = JITCompiler::selectScratchGPR(argumentsPayloadGPR, argumentsTagGPR, scratchGPR1, scratchGPR2, reservedGPR);
+ };
+
+ loadArgumentsGPR(InvalidGPRReg);
+
+ DFG_ASSERT(m_jit.graph(), node, isFlushed());
- CallLinkInfo::CallType callType = node.op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct;
+ // Right now, arguments is in argumentsTagGPR/argumentsPayloadGPR and the register file is
+ // flushed.
+ callOperation(operationSizeFrameForVarargs, GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR, numUsedStackSlots, data->firstVarArgOffset);
+
+ // Now we have the argument count of the callee frame, but we've lost the arguments operand.
+ // Reconstruct the arguments operand while preserving the callee frame.
+ loadArgumentsGPR(GPRInfo::returnValueGPR);
+ m_jit.move(TrustedImm32(numUsedStackSlots), scratchGPR1);
+ emitSetVarargsFrame(m_jit, GPRInfo::returnValueGPR, false, scratchGPR1, scratchGPR1);
+ m_jit.addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), scratchGPR1, JITCompiler::stackPointerRegister);
+
+ callOperation(operationSetupVarargsFrame, GPRInfo::returnValueGPR, scratchGPR1, argumentsTagGPR, argumentsPayloadGPR, data->firstVarArgOffset, GPRInfo::returnValueGPR);
+ resultGPR = GPRInfo::returnValueGPR;
+ }
+
+ m_jit.addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), resultGPR, JITCompiler::stackPointerRegister);
+
+ DFG_ASSERT(m_jit.graph(), node, isFlushed());
+
+ // We don't need the arguments array anymore.
+ if (isVarargs)
+ use(node->child2());
+
+ // Now set up the "this" argument.
+ JSValueOperand thisArgument(this, node->child3());
+ GPRReg thisArgumentTagGPR = thisArgument.tagGPR();
+ GPRReg thisArgumentPayloadGPR = thisArgument.payloadGPR();
+ thisArgument.use();
+
+ m_jit.store32(thisArgumentTagGPR, JITCompiler::calleeArgumentTagSlot(0));
+ m_jit.store32(thisArgumentPayloadGPR, JITCompiler::calleeArgumentPayloadSlot(0));
+ } else {
+ // The call instruction's first child is either the function (normal call) or the
+ // receiver (method call). subsequent children are the arguments.
+ int numPassedArgs = node->numChildren() - 1;
+
+ m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs), m_jit.calleeFramePayloadSlot(JSStack::ArgumentCount));
+
+ for (int i = 0; i < numPassedArgs; i++) {
+ Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i];
+ JSValueOperand arg(this, argEdge);
+ GPRReg argTagGPR = arg.tagGPR();
+ GPRReg argPayloadGPR = arg.payloadGPR();
+ use(argEdge);
+
+ m_jit.store32(argTagGPR, m_jit.calleeArgumentTagSlot(i));
+ m_jit.store32(argPayloadGPR, m_jit.calleeArgumentPayloadSlot(i));
+ }
+ }
- Edge calleeEdge = m_jit.graph().m_varArgChildren[node.firstChild()];
JSValueOperand callee(this, calleeEdge);
GPRReg calleeTagGPR = callee.tagGPR();
GPRReg calleePayloadGPR = callee.payloadGPR();
use(calleeEdge);
-
- // The call instruction's first child is either the function (normal call) or the
- // receiver (method call). subsequent children are the arguments.
- int numPassedArgs = node.numChildren() - 1;
-
- m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(RegisterFile::ArgumentCount));
- m_jit.storePtr(GPRInfo::callFrameRegister, callFramePayloadSlot(RegisterFile::CallerFrame));
- m_jit.store32(calleePayloadGPR, callFramePayloadSlot(RegisterFile::Callee));
- m_jit.store32(calleeTagGPR, callFrameTagSlot(RegisterFile::Callee));
-
- for (int i = 0; i < numPassedArgs; i++) {
- Edge argEdge = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i];
- JSValueOperand arg(this, argEdge);
- GPRReg argTagGPR = arg.tagGPR();
- GPRReg argPayloadGPR = arg.payloadGPR();
- use(argEdge);
-
- m_jit.store32(argTagGPR, argumentTagSlot(i + dummyThisArgument));
- m_jit.store32(argPayloadGPR, argumentPayloadSlot(i + dummyThisArgument));
- }
+ m_jit.store32(calleePayloadGPR, m_jit.calleeFramePayloadSlot(JSStack::Callee));
+ m_jit.store32(calleeTagGPR, m_jit.calleeFrameTagSlot(JSStack::Callee));
flushRegisters();
- GPRResult resultPayload(this);
- GPRResult2 resultTag(this);
+ GPRFlushedCallResult resultPayload(this);
+ GPRFlushedCallResult2 resultTag(this);
GPRReg resultPayloadGPR = resultPayload.gpr();
GPRReg resultTagGPR = resultTag.gpr();
JITCompiler::DataLabelPtr targetToCheck;
JITCompiler::JumpList slowPath;
- slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck));
- slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, calleeTagGPR, TrustedImm32(JSValue::CellTag)));
- m_jit.loadPtr(MacroAssembler::Address(calleePayloadGPR, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), resultPayloadGPR);
- m_jit.storePtr(resultPayloadGPR, callFramePayloadSlot(RegisterFile::ScopeChain));
- m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), callFrameTagSlot(RegisterFile::ScopeChain));
+ m_jit.emitStoreCodeOrigin(node->origin.semantic);
+
+ CallLinkInfo* info = m_jit.codeBlock()->addCallLinkInfo();
- m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
+ slowPath.append(m_jit.branchIfNotCell(callee.jsValueRegs()));
+ slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck));
- CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin;
- CallBeginToken token = m_jit.beginCall();
JITCompiler::Call fastCall = m_jit.nearCall();
- m_jit.notifyCall(fastCall, codeOrigin, token);
JITCompiler::Jump done = m_jit.jump();
slowPath.link(&m_jit);
- m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- m_jit.poke(GPRInfo::argumentGPR0);
- token = m_jit.beginCall();
- JITCompiler::Call slowCall = m_jit.appendCall(slowCallFunction);
- m_jit.addFastExceptionCheck(slowCall, codeOrigin, token);
- m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister);
- token = m_jit.beginCall();
- JITCompiler::Call theCall = m_jit.call(GPRInfo::returnValueGPR);
- m_jit.notifyCall(theCall, codeOrigin, token);
+ // Callee payload needs to be in regT0, tag in regT1
+ if (calleeTagGPR == GPRInfo::regT0) {
+ if (calleePayloadGPR == GPRInfo::regT1)
+ m_jit.swap(GPRInfo::regT1, GPRInfo::regT0);
+ else {
+ m_jit.move(calleeTagGPR, GPRInfo::regT1);
+ m_jit.move(calleePayloadGPR, GPRInfo::regT0);
+ }
+ } else {
+ m_jit.move(calleePayloadGPR, GPRInfo::regT0);
+ m_jit.move(calleeTagGPR, GPRInfo::regT1);
+ }
+ m_jit.move(MacroAssembler::TrustedImmPtr(info), GPRInfo::regT2);
+ JITCompiler::Call slowCall = m_jit.nearCall();
done.link(&m_jit);
m_jit.setupResults(resultPayloadGPR, resultTagGPR);
- jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, DataFormatJS, UseChildrenCalledExplicitly);
+ jsValueResult(resultTagGPR, resultPayloadGPR, node, DataFormatJS, UseChildrenCalledExplicitly);
- m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, at(m_compileIndex).codeOrigin);
+ info->setUpCall(callType, node->origin.semantic, calleePayloadGPR);
+ m_jit.addJSCall(fastCall, slowCall, targetToCheck, info);
+
+ // If we were varargs, then after the calls are done, we need to reestablish our stack pointer.
+ if (isVarargs || isForwardVarargs)
+ m_jit.addPtr(TrustedImm32(m_jit.graph().stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, JITCompiler::stackPointerRegister);
}
template<bool strict>
-GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& returnFormat)
+GPRReg SpeculativeJIT::fillSpeculateInt32Internal(Edge edge, DataFormat& returnFormat)
{
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("SpecInt@%d ", nodeIndex);
-#endif
- if (isKnownNotInteger(nodeIndex)) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
- returnFormat = DataFormatInteger;
+ AbstractValue& value = m_state.forNode(edge);
+ SpeculatedType type = value.m_type;
+ ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32));
+
+ m_interpreter.filter(value, SpecInt32);
+ if (value.isClear()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
+ returnFormat = DataFormatInt32;
return allocate();
}
- Node& node = at(nodeIndex);
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
+ VirtualRegister virtualRegister = edge->virtualRegister();
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
switch (info.registerFormat()) {
case DataFormatNone: {
-
- if (node.hasConstant()) {
- ASSERT(isInt32Constant(nodeIndex));
+ if (edge->hasConstant()) {
+ ASSERT(edge->isInt32Constant());
GPRReg gpr = allocate();
- m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
+ m_jit.move(MacroAssembler::Imm32(edge->asInt32()), gpr);
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- info.fillInteger(gpr);
- returnFormat = DataFormatInteger;
+ info.fillInt32(*m_stream, gpr);
+ returnFormat = DataFormatInt32;
return gpr;
}
DataFormat spillFormat = info.spillFormat();
- ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
+
+ ASSERT_UNUSED(spillFormat, (spillFormat & DataFormatJS) || spillFormat == DataFormatInt32);
// If we know this was spilled as an integer we can fill without checking.
- if (spillFormat != DataFormatJSInteger && spillFormat != DataFormatInteger)
- speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
+ if (type & ~SpecInt32)
+ speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)));
GPRReg gpr = allocate();
m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- info.fillInteger(gpr);
- returnFormat = DataFormatInteger;
+ info.fillInt32(*m_stream, gpr);
+ returnFormat = DataFormatInt32;
return gpr;
}
- case DataFormatJSInteger:
+ case DataFormatJSInt32:
case DataFormatJS: {
// Check the value is an integer.
GPRReg tagGPR = info.tagGPR();
GPRReg payloadGPR = info.payloadGPR();
m_gprs.lock(tagGPR);
m_gprs.lock(payloadGPR);
- if (info.registerFormat() != DataFormatJSInteger)
- speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag)));
+ if (type & ~SpecInt32)
+ speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag)));
m_gprs.unlock(tagGPR);
m_gprs.release(tagGPR);
m_gprs.release(payloadGPR);
m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger);
- info.fillInteger(payloadGPR);
+ info.fillInt32(*m_stream, payloadGPR);
// If !strict we're done, return.
- returnFormat = DataFormatInteger;
+ returnFormat = DataFormatInt32;
return payloadGPR;
}
- case DataFormatInteger: {
+ case DataFormatInt32: {
GPRReg gpr = info.gpr();
m_gprs.lock(gpr);
- returnFormat = DataFormatInteger;
+ returnFormat = DataFormatInt32;
return gpr;
}
- case DataFormatDouble:
case DataFormatCell:
case DataFormatBoolean:
case DataFormatJSDouble:
case DataFormatJSCell:
case DataFormatJSBoolean:
+ case DataFormatDouble:
case DataFormatStorage:
- ASSERT_NOT_REACHED();
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
}
-
- ASSERT_NOT_REACHED();
- return InvalidGPRReg;
}
-GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat)
+GPRReg SpeculativeJIT::fillSpeculateInt32(Edge edge, DataFormat& returnFormat)
{
- return fillSpeculateIntInternal<false>(nodeIndex, returnFormat);
+ return fillSpeculateInt32Internal<false>(edge, returnFormat);
}
-GPRReg SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex)
+GPRReg SpeculativeJIT::fillSpeculateInt32Strict(Edge edge)
{
- DataFormat mustBeDataFormatInteger;
- GPRReg result = fillSpeculateIntInternal<true>(nodeIndex, mustBeDataFormatInteger);
- ASSERT(mustBeDataFormatInteger == DataFormatInteger);
+ DataFormat mustBeDataFormatInt32;
+ GPRReg result = fillSpeculateInt32Internal<true>(edge, mustBeDataFormatInt32);
+ ASSERT(mustBeDataFormatInt32 == DataFormatInt32);
return result;
}
-FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex)
+FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge)
{
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("SpecDouble@%d ", nodeIndex);
-#endif
- if (isKnownNotNumber(nodeIndex)) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
- return fprAllocate();
- }
-
- Node& node = at(nodeIndex);
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
+ ASSERT(isDouble(edge.useKind()));
+ ASSERT(edge->hasDoubleResult());
+ VirtualRegister virtualRegister = edge->virtualRegister();
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
if (info.registerFormat() == DataFormatNone) {
- if (node.hasConstant()) {
- if (isInt32Constant(nodeIndex)) {
- GPRReg gpr = allocate();
- m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr);
- m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
- info.fillInteger(gpr);
- unlock(gpr);
- } else if (isNumberConstant(nodeIndex)) {
- FPRReg fpr = fprAllocate();
- m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), fpr);
- m_fprs.retain(fpr, virtualRegister, SpillOrderConstant);
- info.fillDouble(fpr);
- return fpr;
- } else
- ASSERT_NOT_REACHED();
- } else {
- DataFormat spillFormat = info.spillFormat();
- ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger);
- if (spillFormat == DataFormatJSDouble || spillFormat == DataFormatDouble) {
- FPRReg fpr = fprAllocate();
- m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
- m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
- info.fillDouble(fpr);
- return fpr;
- }
-
+ if (edge->hasConstant()) {
+ RELEASE_ASSERT(edge->isNumberConstant());
FPRReg fpr = fprAllocate();
- JITCompiler::Jump hasUnboxedDouble;
-
- if (spillFormat != DataFormatJSInteger && spillFormat != DataFormatInteger) {
- JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag));
- speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::AboveOrEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::LowestTag)));
- m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
- hasUnboxedDouble = m_jit.jump();
-
- isInteger.link(&m_jit);
- }
-
- m_jit.convertInt32ToDouble(JITCompiler::payloadFor(virtualRegister), fpr);
-
- if (hasUnboxedDouble.isSet())
- hasUnboxedDouble.link(&m_jit);
-
- m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
- info.fillDouble(fpr);
- info.killSpilled();
+ m_jit.loadDouble(TrustedImmPtr(m_jit.addressOfDoubleConstant(edge.node())), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderConstant);
+ info.fillDouble(*m_stream, fpr);
return fpr;
}
- }
-
- switch (info.registerFormat()) {
- case DataFormatJS:
- case DataFormatJSInteger: {
- GPRReg tagGPR = info.tagGPR();
- GPRReg payloadGPR = info.payloadGPR();
- FPRReg fpr = fprAllocate();
-
- m_gprs.lock(tagGPR);
- m_gprs.lock(payloadGPR);
-
- JITCompiler::Jump hasUnboxedDouble;
-
- if (info.registerFormat() != DataFormatJSInteger) {
- FPRTemporary scratch(this);
- JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag));
- speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)));
- unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr());
- hasUnboxedDouble = m_jit.jump();
- isInteger.link(&m_jit);
- }
-
- m_jit.convertInt32ToDouble(payloadGPR, fpr);
-
- if (hasUnboxedDouble.isSet())
- hasUnboxedDouble.link(&m_jit);
-
- m_gprs.release(tagGPR);
- m_gprs.release(payloadGPR);
- m_gprs.unlock(tagGPR);
- m_gprs.unlock(payloadGPR);
- m_fprs.retain(fpr, virtualRegister, SpillOrderDouble);
- info.fillDouble(fpr);
- info.killSpilled();
- return fpr;
- }
-
- case DataFormatInteger: {
+
+ RELEASE_ASSERT(info.spillFormat() == DataFormatDouble);
FPRReg fpr = fprAllocate();
- GPRReg gpr = info.gpr();
- m_gprs.lock(gpr);
- m_jit.convertInt32ToDouble(gpr, fpr);
- m_gprs.unlock(gpr);
- return fpr;
- }
-
- case DataFormatJSDouble:
- case DataFormatDouble: {
- FPRReg fpr = info.fpr();
- m_fprs.lock(fpr);
+ m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr);
+ m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled);
+ info.fillDouble(*m_stream, fpr);
return fpr;
}
- case DataFormatNone:
- case DataFormatStorage:
- case DataFormatCell:
- case DataFormatJSCell:
- case DataFormatBoolean:
- case DataFormatJSBoolean:
- ASSERT_NOT_REACHED();
- }
-
- ASSERT_NOT_REACHED();
- return InvalidFPRReg;
+ RELEASE_ASSERT(info.registerFormat() == DataFormatDouble);
+ FPRReg fpr = info.fpr();
+ m_fprs.lock(fpr);
+ return fpr;
}
-GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex)
+GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge)
{
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("SpecCell@%d ", nodeIndex);
-#endif
- if (isKnownNotCell(nodeIndex)) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ AbstractValue& value = m_state.forNode(edge);
+ SpeculatedType type = value.m_type;
+ ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell));
+
+ m_interpreter.filter(value, SpecCell);
+ if (value.isClear()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
return allocate();
}
- Node& node = at(nodeIndex);
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
+ VirtualRegister virtualRegister = edge->virtualRegister();
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
switch (info.registerFormat()) {
case DataFormatNone: {
-
- if (node.hasConstant()) {
- JSValue jsValue = valueOfJSConstant(nodeIndex);
- ASSERT(jsValue.isCell());
+ if (edge->hasConstant()) {
+ JSValue jsValue = edge->asJSValue();
GPRReg gpr = allocate();
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr);
- info.fillCell(gpr);
+ info.fillCell(*m_stream, gpr);
return gpr;
}
ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatCell);
- if (info.spillFormat() != DataFormatJSCell && info.spillFormat() != DataFormatCell)
- speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag)));
+ if (type & ~SpecCell) {
+ speculationCheck(
+ BadType,
+ JSValueSource(JITCompiler::addressFor(virtualRegister)),
+ edge,
+ m_jit.branch32(
+ MacroAssembler::NotEqual,
+ JITCompiler::tagFor(virtualRegister),
+ TrustedImm32(JSValue::CellTag)));
+ }
GPRReg gpr = allocate();
m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- info.fillCell(gpr);
+ info.fillCell(*m_stream, gpr);
return gpr;
}
GPRReg payloadGPR = info.payloadGPR();
m_gprs.lock(tagGPR);
m_gprs.lock(payloadGPR);
- if (info.spillFormat() != DataFormatJSCell)
- speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag)));
+ if (type & ~SpecCell) {
+ speculationCheck(
+ BadType, JSValueRegs(tagGPR, payloadGPR), edge,
+ m_jit.branchIfNotCell(info.jsValueRegs()));
+ }
m_gprs.unlock(tagGPR);
m_gprs.release(tagGPR);
m_gprs.release(payloadGPR);
m_gprs.retain(payloadGPR, virtualRegister, SpillOrderCell);
- info.fillCell(payloadGPR);
+ info.fillCell(*m_stream, payloadGPR);
return payloadGPR;
}
- case DataFormatJSInteger:
- case DataFormatInteger:
+ case DataFormatJSInt32:
+ case DataFormatInt32:
case DataFormatJSDouble:
- case DataFormatDouble:
case DataFormatJSBoolean:
case DataFormatBoolean:
+ case DataFormatDouble:
case DataFormatStorage:
- ASSERT_NOT_REACHED();
- }
+ RELEASE_ASSERT_NOT_REACHED();
- ASSERT_NOT_REACHED();
- return InvalidGPRReg;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+ }
}
-GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex)
+GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge)
{
-#if DFG_ENABLE(DEBUG_VERBOSE)
- dataLog("SpecBool@%d ", nodeIndex);
-#endif
- Node& node = m_jit.graph()[nodeIndex];
- VirtualRegister virtualRegister = node.virtualRegister();
- GenerationInfo& info = m_generationInfo[virtualRegister];
- if ((node.hasConstant() && !valueOfJSConstant(nodeIndex).isBoolean())
- || !(info.isJSBoolean() || info.isUnknownJS())) {
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ AbstractValue& value = m_state.forNode(edge);
+ SpeculatedType type = value.m_type;
+
+ m_interpreter.filter(value, SpecBoolean);
+ if (value.isClear()) {
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
return allocate();
}
+ VirtualRegister virtualRegister = edge->virtualRegister();
+ GenerationInfo& info = generationInfoFromVirtualRegister(virtualRegister);
+
switch (info.registerFormat()) {
case DataFormatNone: {
-
- if (node.hasConstant()) {
- JSValue jsValue = valueOfJSConstant(nodeIndex);
- ASSERT(jsValue.isBoolean());
+ if (edge->hasConstant()) {
+ JSValue jsValue = edge->asJSValue();
GPRReg gpr = allocate();
m_gprs.retain(gpr, virtualRegister, SpillOrderConstant);
m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr);
- info.fillBoolean(gpr);
+ info.fillBoolean(*m_stream, gpr);
return gpr;
}
ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatBoolean);
- if (info.spillFormat() != DataFormatJSBoolean && info.spillFormat() != DataFormatBoolean)
- speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
+ if (type & ~SpecBoolean)
+ speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag)));
GPRReg gpr = allocate();
m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr);
m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled);
- info.fillBoolean(gpr);
+ info.fillBoolean(*m_stream, gpr);
return gpr;
}
GPRReg payloadGPR = info.payloadGPR();
m_gprs.lock(tagGPR);
m_gprs.lock(payloadGPR);
- if (info.registerFormat() != DataFormatJSBoolean)
- speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::BooleanTag)));
+ if (type & ~SpecBoolean)
+ speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::BooleanTag)));
m_gprs.unlock(tagGPR);
m_gprs.release(tagGPR);
m_gprs.release(payloadGPR);
m_gprs.retain(payloadGPR, virtualRegister, SpillOrderBoolean);
- info.fillBoolean(payloadGPR);
+ info.fillBoolean(*m_stream, payloadGPR);
return payloadGPR;
}
- case DataFormatJSInteger:
- case DataFormatInteger:
+ case DataFormatJSInt32:
+ case DataFormatInt32:
case DataFormatJSDouble:
- case DataFormatDouble:
case DataFormatJSCell:
case DataFormatCell:
+ case DataFormatDouble:
case DataFormatStorage:
- ASSERT_NOT_REACHED();
- }
+ RELEASE_ASSERT_NOT_REACHED();
- ASSERT_NOT_REACHED();
- return InvalidGPRReg;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return InvalidGPRReg;
+ }
}
-JITCompiler::Jump SpeculativeJIT::convertToDouble(JSValueOperand& op, FPRReg result)
+void SpeculativeJIT::compileBaseValueStoreBarrier(Edge& baseEdge, Edge& valueEdge)
{
- FPRTemporary scratch(this);
-
- JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, op.tagGPR(), TrustedImm32(JSValue::Int32Tag));
- JITCompiler::Jump notNumber = m_jit.branch32(MacroAssembler::AboveOrEqual, op.payloadGPR(), TrustedImm32(JSValue::LowestTag));
-
- unboxDouble(op.tagGPR(), op.payloadGPR(), result, scratch.fpr());
- JITCompiler::Jump done = m_jit.jump();
+#if ENABLE(GGC)
+ ASSERT(!isKnownNotCell(valueEdge.node()));
- isInteger.link(&m_jit);
- m_jit.convertInt32ToDouble(op.payloadGPR(), result);
+ SpeculateCellOperand base(this, baseEdge);
+ JSValueOperand value(this, valueEdge);
+ GPRTemporary scratch1(this);
+ GPRTemporary scratch2(this);
- done.link(&m_jit);
-
- return notNumber;
+ writeBarrier(base.gpr(), value.tagGPR(), valueEdge, scratch1.gpr(), scratch2.gpr());
+#else
+ UNUSED_PARAM(baseEdge);
+ UNUSED_PARAM(valueEdge);
+#endif
}
-void SpeculativeJIT::compileObjectEquality(Node& node, const ClassInfo* classInfo, PredictionChecker predictionCheck)
+void SpeculativeJIT::compileObjectEquality(Node* node)
{
- SpeculateCellOperand op1(this, node.child1());
- SpeculateCellOperand op2(this, node.child2());
+ SpeculateCellOperand op1(this, node->child1());
+ SpeculateCellOperand op2(this, node->child2());
GPRReg op1GPR = op1.gpr();
GPRReg op2GPR = op2.gpr();
- if (!predictionCheck(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
- if (!predictionCheck(m_state.forNode(node.child2()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node.child2(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+ if (masqueradesAsUndefinedWatchpointIsStillValid()) {
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
+ } else {
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchIfNotObject(op1GPR));
+ speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(),
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
+
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchIfNotObject(op2GPR));
+ speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(),
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(op2GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
+ }
- GPRTemporary resultPayload(this, op2);
+ GPRTemporary resultPayload(this, Reuse, op2);
GPRReg resultPayloadGPR = resultPayload.gpr();
MacroAssembler::Jump falseCase = m_jit.branchPtr(MacroAssembler::NotEqual, op1GPR, op2GPR);
m_jit.move(TrustedImm32(0), resultPayloadGPR);
done.link(&m_jit);
- booleanResult(resultPayloadGPR, m_compileIndex);
+ booleanResult(resultPayloadGPR, node);
+}
+
+void SpeculativeJIT::compileObjectStrictEquality(Edge objectChild, Edge otherChild)
+{
+ SpeculateCellOperand op1(this, objectChild);
+ JSValueOperand op2(this, otherChild);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.payloadGPR();
+
+ DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+
+ GPRTemporary resultPayload(this, Reuse, op1);
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ MacroAssembler::Jump op2CellJump = m_jit.branchIfCell(op2.jsValueRegs());
+
+ m_jit.move(TrustedImm32(0), resultPayloadGPR);
+ MacroAssembler::Jump op2NotCellJump = m_jit.jump();
+
+ // At this point we know that we can perform a straight-forward equality comparison on pointer
+ // values because we are doing strict equality.
+ op2CellJump.link(&m_jit);
+ m_jit.compare32(MacroAssembler::Equal, op1GPR, op2GPR, resultPayloadGPR);
+
+ op2NotCellJump.link(&m_jit);
+ booleanResult(resultPayloadGPR, m_currentNode);
+}
+
+void SpeculativeJIT::compilePeepHoleObjectStrictEquality(Edge objectChild, Edge otherChild, Node* branchNode)
+{
+ BasicBlock* taken = branchNode->branchData()->taken.block;
+ BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
+
+ SpeculateCellOperand op1(this, objectChild);
+ JSValueOperand op2(this, otherChild);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.payloadGPR();
+
+ DFG_TYPE_CHECK(JSValueSource::unboxedCell(op1GPR), objectChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+
+ branch32(MacroAssembler::NotEqual, op2.tagGPR(), TrustedImm32(JSValue::CellTag), notTaken);
+
+ if (taken == nextBlock()) {
+ branch32(MacroAssembler::NotEqual, op1GPR, op2GPR, notTaken);
+ jump(taken);
+ } else {
+ branch32(MacroAssembler::Equal, op1GPR, op2GPR, taken);
+ jump(notTaken);
+ }
}
-void SpeculativeJIT::compileObjectToObjectOrOtherEquality(
- Edge leftChild, Edge rightChild,
- const ClassInfo* classInfo, PredictionChecker predictionCheck)
+void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild)
{
SpeculateCellOperand op1(this, leftChild);
- JSValueOperand op2(this, rightChild);
+ JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
GPRTemporary result(this);
GPRReg op1GPR = op1.gpr();
GPRReg op2TagGPR = op2.tagGPR();
GPRReg op2PayloadGPR = op2.payloadGPR();
GPRReg resultGPR = result.gpr();
-
- if (!predictionCheck(m_state.forNode(leftChild).m_type)) {
- speculationCheck(
- BadType, JSValueSource::unboxedCell(op1GPR), leftChild.index(),
- m_jit.branchPtr(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()),
- MacroAssembler::TrustedImmPtr(classInfo)));
+
+ bool masqueradesAsUndefinedWatchpointValid =
+ masqueradesAsUndefinedWatchpointIsStillValid();
+
+ if (masqueradesAsUndefinedWatchpointValid) {
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ } else {
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
+
// It seems that most of the time when programs do a == b where b may be either null/undefined
// or an object, b is usually an object. Balance the branches to make that case fast.
- MacroAssembler::Jump rightNotCell =
- m_jit.branch32(MacroAssembler::NotEqual, op2TagGPR, TrustedImm32(JSValue::CellTag));
-
- // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the
- // proof, when filtered on cell, demonstrates that we have an object of the desired type
- // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the
- // speculation.
- if (!predictionCheck(m_state.forNode(rightChild).m_type & PredictCell)) {
- speculationCheck(
- BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(),
- m_jit.branchPtr(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(op2PayloadGPR, JSCell::classInfoOffset()),
- MacroAssembler::TrustedImmPtr(classInfo)));
+ MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs());
+
+ // We know that within this branch, rightChild must be a cell.
+ if (masqueradesAsUndefinedWatchpointValid) {
+ DFG_TYPE_CHECK(
+ JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR));
+ } else {
+ DFG_TYPE_CHECK(
+ JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchIfNotObject(op2PayloadGPR));
+ speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild,
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
// At this point we know that we can perform a straight-forward equality comparison on pointer
// We know that within this branch, rightChild must not be a cell. Check if that is enough to
// prove that it is either null or undefined.
- if (!isOtherPrediction(m_state.forNode(rightChild).m_type & ~PredictCell)) {
- m_jit.move(op2TagGPR, resultGPR);
- m_jit.or32(TrustedImm32(1), resultGPR);
+ if (needsTypeCheck(rightChild, SpecCell | SpecOther)) {
+ m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR);
- speculationCheck(
- BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(),
+ typeCheck(
+ JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther,
m_jit.branch32(
MacroAssembler::NotEqual, resultGPR,
MacroAssembler::TrustedImm32(JSValue::NullTag)));
m_jit.move(TrustedImm32(1), resultGPR);
done.link(&m_jit);
- booleanResult(resultGPR, m_compileIndex);
+ booleanResult(resultGPR, m_currentNode);
}
-void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(
- Edge leftChild, Edge rightChild, NodeIndex branchNodeIndex,
- const ClassInfo* classInfo, PredictionChecker predictionCheck)
+void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode)
{
- Node& branchNode = at(branchNodeIndex);
- BlockIndex taken = branchNode.takenBlockIndex();
- BlockIndex notTaken = branchNode.notTakenBlockIndex();
+ BasicBlock* taken = branchNode->branchData()->taken.block;
+ BasicBlock* notTaken = branchNode->branchData()->notTaken.block;
SpeculateCellOperand op1(this, leftChild);
- JSValueOperand op2(this, rightChild);
+ JSValueOperand op2(this, rightChild, ManualOperandSpeculation);
GPRTemporary result(this);
GPRReg op1GPR = op1.gpr();
GPRReg op2TagGPR = op2.tagGPR();
GPRReg op2PayloadGPR = op2.payloadGPR();
GPRReg resultGPR = result.gpr();
-
- if (!predictionCheck(m_state.forNode(leftChild).m_type)) {
- speculationCheck(
- BadType, JSValueSource::unboxedCell(op1GPR), leftChild.index(),
- m_jit.branchPtr(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()),
- MacroAssembler::TrustedImmPtr(classInfo)));
+
+ bool masqueradesAsUndefinedWatchpointValid =
+ masqueradesAsUndefinedWatchpointIsStillValid();
+
+ if (masqueradesAsUndefinedWatchpointValid) {
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ } else {
+ DFG_TYPE_CHECK(
+ JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchIfNotObject(op1GPR));
+ speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild,
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(op1GPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
// It seems that most of the time when programs do a == b where b may be either null/undefined
// or an object, b is usually an object. Balance the branches to make that case fast.
- MacroAssembler::Jump rightNotCell =
- m_jit.branch32(MacroAssembler::NotEqual, op2TagGPR, TrustedImm32(JSValue::CellTag));
-
- // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the
- // proof, when filtered on cell, demonstrates that we have an object of the desired type
- // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the
- // speculation.
- if (!predictionCheck(m_state.forNode(rightChild).m_type & PredictCell)) {
- speculationCheck(
- BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(),
- m_jit.branchPtr(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(op2PayloadGPR, JSCell::classInfoOffset()),
- MacroAssembler::TrustedImmPtr(classInfo)));
+ MacroAssembler::Jump rightNotCell = m_jit.branchIfNotCell(op2.jsValueRegs());
+
+ // We know that within this branch, rightChild must be a cell.
+ if (masqueradesAsUndefinedWatchpointValid) {
+ DFG_TYPE_CHECK(
+ JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject,
+ m_jit.branchIfNotObject(op2PayloadGPR));
+ } else {
+ DFG_TYPE_CHECK(
+ JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject,
+ m_jit.branchIfNotObject(op2PayloadGPR));
+ speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild,
+ m_jit.branchTest8(
+ MacroAssembler::NonZero,
+ MacroAssembler::Address(op2PayloadGPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(MasqueradesAsUndefined)));
}
// At this point we know that we can perform a straight-forward equality comparison on pointer
// We know that within this branch, rightChild must not be a cell. Check if that is enough to
// prove that it is either null or undefined.
- if (isOtherPrediction(m_state.forNode(rightChild).m_type & ~PredictCell))
+ if (!needsTypeCheck(rightChild, SpecCell | SpecOther))
rightNotCell.link(&m_jit);
else {
jump(notTaken, ForceJump);
rightNotCell.link(&m_jit);
- m_jit.move(op2TagGPR, resultGPR);
- m_jit.or32(TrustedImm32(1), resultGPR);
+ m_jit.or32(TrustedImm32(1), op2TagGPR, resultGPR);
- speculationCheck(
- BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(),
+ typeCheck(
+ JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther,
m_jit.branch32(
MacroAssembler::NotEqual, resultGPR,
MacroAssembler::TrustedImm32(JSValue::NullTag)));
jump(notTaken);
}
-void SpeculativeJIT::compileIntegerCompare(Node& node, MacroAssembler::RelationalCondition condition)
+void SpeculativeJIT::compileInt32Compare(Node* node, MacroAssembler::RelationalCondition condition)
{
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
+ SpeculateInt32Operand op1(this, node->child1());
+ SpeculateInt32Operand op2(this, node->child2());
GPRTemporary resultPayload(this);
m_jit.compare32(condition, op1.gpr(), op2.gpr(), resultPayload.gpr());
// If we add a DataFormatBool, we should use it here.
- booleanResult(resultPayload.gpr(), m_compileIndex);
+ booleanResult(resultPayload.gpr(), node);
}
-void SpeculativeJIT::compileDoubleCompare(Node& node, MacroAssembler::DoubleCondition condition)
+void SpeculativeJIT::compileDoubleCompare(Node* node, MacroAssembler::DoubleCondition condition)
{
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
+ SpeculateDoubleOperand op1(this, node->child1());
+ SpeculateDoubleOperand op2(this, node->child2());
GPRTemporary resultPayload(this);
m_jit.move(TrustedImm32(1), resultPayload.gpr());
m_jit.move(TrustedImm32(0), resultPayload.gpr());
trueCase.link(&m_jit);
- booleanResult(resultPayload.gpr(), m_compileIndex);
-}
-
-void SpeculativeJIT::compileValueAdd(Node& node)
-{
- JSValueOperand op1(this, node.child1());
- JSValueOperand op2(this, node.child2());
-
- GPRReg op1TagGPR = op1.tagGPR();
- GPRReg op1PayloadGPR = op1.payloadGPR();
- GPRReg op2TagGPR = op2.tagGPR();
- GPRReg op2PayloadGPR = op2.payloadGPR();
-
- flushRegisters();
-
- GPRResult2 resultTag(this);
- GPRResult resultPayload(this);
- if (isKnownNotNumber(node.child1().index()) || isKnownNotNumber(node.child2().index()))
- callOperation(operationValueAddNotNumber, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR);
- else
- callOperation(operationValueAdd, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR);
-
- jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ booleanResult(resultPayload.gpr(), node);
}
-void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse, const ClassInfo* classInfo, bool needSpeculationCheck)
+void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse)
{
- JSValueOperand value(this, nodeUse);
+ JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
GPRTemporary resultPayload(this);
GPRReg valueTagGPR = value.tagGPR();
GPRReg valuePayloadGPR = value.payloadGPR();
GPRReg resultPayloadGPR = resultPayload.gpr();
-
- MacroAssembler::Jump notCell = m_jit.branch32(MacroAssembler::NotEqual, valueTagGPR, TrustedImm32(JSValue::CellTag));
- if (needSpeculationCheck)
- speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valuePayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+ GPRTemporary structure;
+ GPRReg structureGPR = InvalidGPRReg;
+
+ bool masqueradesAsUndefinedWatchpointValid =
+ masqueradesAsUndefinedWatchpointIsStillValid();
+
+ if (!masqueradesAsUndefinedWatchpointValid) {
+ // The masquerades as undefined case will use the structure register, so allocate it here.
+ // Do this at the top of the function to avoid branching around a register allocation.
+ GPRTemporary realStructure(this);
+ structure.adopt(realStructure);
+ structureGPR = structure.gpr();
+ }
+
+ MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs());
+ if (masqueradesAsUndefinedWatchpointValid) {
+ DFG_TYPE_CHECK(
+ JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
+ m_jit.branchIfNotObject(valuePayloadGPR));
+ } else {
+ DFG_TYPE_CHECK(
+ JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
+ m_jit.branchIfNotObject(valuePayloadGPR));
+
+ MacroAssembler::Jump isNotMasqueradesAsUndefined =
+ m_jit.branchTest8(
+ MacroAssembler::Zero,
+ MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(MasqueradesAsUndefined));
+
+ m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), structureGPR);
+ speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse,
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
+
+ isNotMasqueradesAsUndefined.link(&m_jit);
+ }
m_jit.move(TrustedImm32(0), resultPayloadGPR);
MacroAssembler::Jump done = m_jit.jump();
notCell.link(&m_jit);
COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
- if (needSpeculationCheck) {
- m_jit.move(valueTagGPR, resultPayloadGPR);
- m_jit.or32(TrustedImm32(1), resultPayloadGPR);
- speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branch32(MacroAssembler::NotEqual, resultPayloadGPR, TrustedImm32(JSValue::NullTag)));
+ if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
+ m_jit.or32(TrustedImm32(1), valueTagGPR, resultPayloadGPR);
+ typeCheck(
+ JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther,
+ m_jit.branch32(
+ MacroAssembler::NotEqual,
+ resultPayloadGPR,
+ TrustedImm32(JSValue::NullTag)));
}
m_jit.move(TrustedImm32(1), resultPayloadGPR);
done.link(&m_jit);
- booleanResult(resultPayloadGPR, m_compileIndex);
+ booleanResult(resultPayloadGPR, m_currentNode);
}
-void SpeculativeJIT::compileLogicalNot(Node& node)
+void SpeculativeJIT::compileLogicalNot(Node* node)
{
- if (at(node.child1()).shouldSpeculateBoolean()) {
- SpeculateBooleanOperand value(this, node.child1());
- GPRTemporary result(this, value);
+ switch (node->child1().useKind()) {
+ case BooleanUse: {
+ SpeculateBooleanOperand value(this, node->child1());
+ GPRTemporary result(this, Reuse, value);
m_jit.xor32(TrustedImm32(1), value.gpr(), result.gpr());
- booleanResult(result.gpr(), m_compileIndex);
+ booleanResult(result.gpr(), node);
return;
}
- if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) {
- compileObjectOrOtherLogicalNot(node.child1(), &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type));
- return;
- }
- if (at(node.child1()).shouldSpeculateArrayOrOther()) {
- compileObjectOrOtherLogicalNot(node.child1(), &JSArray::s_info, !isArrayOrOtherPrediction(m_state.forNode(node.child1()).m_type));
+
+ case ObjectOrOtherUse: {
+ compileObjectOrOtherLogicalNot(node->child1());
return;
}
- if (at(node.child1()).shouldSpeculateInteger()) {
- SpeculateIntegerOperand value(this, node.child1());
- GPRTemporary resultPayload(this, value);
+
+ case Int32Use: {
+ SpeculateInt32Operand value(this, node->child1());
+ GPRTemporary resultPayload(this, Reuse, value);
m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), resultPayload.gpr());
- booleanResult(resultPayload.gpr(), m_compileIndex);
+ booleanResult(resultPayload.gpr(), node);
return;
}
- if (at(node.child1()).shouldSpeculateNumber()) {
- SpeculateDoubleOperand value(this, node.child1());
+
+ case DoubleRepUse: {
+ SpeculateDoubleOperand value(this, node->child1());
FPRTemporary scratch(this);
GPRTemporary resultPayload(this);
m_jit.move(TrustedImm32(0), resultPayload.gpr());
MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr());
m_jit.move(TrustedImm32(1), resultPayload.gpr());
nonZero.link(&m_jit);
- booleanResult(resultPayload.gpr(), m_compileIndex);
+ booleanResult(resultPayload.gpr(), node);
return;
}
- JSValueOperand arg1(this, node.child1());
- GPRTemporary resultPayload(this, arg1, false);
- GPRReg arg1TagGPR = arg1.tagGPR();
- GPRReg arg1PayloadGPR = arg1.payloadGPR();
- GPRReg resultPayloadGPR = resultPayload.gpr();
+ case UntypedUse: {
+ JSValueOperand arg1(this, node->child1());
+ GPRTemporary resultPayload(this, Reuse, arg1, PayloadWord);
+ GPRReg arg1TagGPR = arg1.tagGPR();
+ GPRReg arg1PayloadGPR = arg1.payloadGPR();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
- arg1.use();
+ arg1.use();
- JITCompiler::Jump fastCase = m_jit.branch32(JITCompiler::Equal, arg1TagGPR, TrustedImm32(JSValue::BooleanTag));
-
- silentSpillAllRegisters(resultPayloadGPR);
- callOperation(dfgConvertJSValueToBoolean, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR);
- silentFillAllRegisters(resultPayloadGPR);
- JITCompiler::Jump doNot = m_jit.jump();
-
- fastCase.link(&m_jit);
- m_jit.move(arg1PayloadGPR, resultPayloadGPR);
+ JITCompiler::Jump slowCase = m_jit.branch32(JITCompiler::NotEqual, arg1TagGPR, TrustedImm32(JSValue::BooleanTag));
+
+ m_jit.move(arg1PayloadGPR, resultPayloadGPR);
+
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCase, this, operationConvertJSValueToBoolean, resultPayloadGPR, arg1TagGPR,
+ arg1PayloadGPR));
+
+ m_jit.xor32(TrustedImm32(1), resultPayloadGPR);
+ booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly);
+ return;
+ }
+ case StringUse:
+ return compileStringZeroLength(node);
- doNot.link(&m_jit);
- m_jit.xor32(TrustedImm32(1), resultPayloadGPR);
- booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
}
-void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BlockIndex taken, BlockIndex notTaken, const ClassInfo* classInfo, bool needSpeculationCheck)
+void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BasicBlock* taken, BasicBlock* notTaken)
{
- JSValueOperand value(this, nodeUse);
+ JSValueOperand value(this, nodeUse, ManualOperandSpeculation);
GPRTemporary scratch(this);
GPRReg valueTagGPR = value.tagGPR();
GPRReg valuePayloadGPR = value.payloadGPR();
GPRReg scratchGPR = scratch.gpr();
- MacroAssembler::Jump notCell = m_jit.branch32(MacroAssembler::NotEqual, valueTagGPR, TrustedImm32(JSValue::CellTag));
- if (needSpeculationCheck)
- speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valuePayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo)));
+ MacroAssembler::Jump notCell = m_jit.branchIfNotCell(value.jsValueRegs());
+ if (masqueradesAsUndefinedWatchpointIsStillValid()) {
+ DFG_TYPE_CHECK(
+ JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
+ m_jit.branchIfNotObject(valuePayloadGPR));
+ } else {
+ DFG_TYPE_CHECK(
+ JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject,
+ m_jit.branchIfNotObject(valuePayloadGPR));
+
+ JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(
+ JITCompiler::Zero,
+ MacroAssembler::Address(valuePayloadGPR, JSCell::typeInfoFlagsOffset()),
+ TrustedImm32(MasqueradesAsUndefined));
+
+ m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureIDOffset()), scratchGPR);
+ speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse,
+ m_jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()),
+ MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->origin.semantic))));
+
+ isNotMasqueradesAsUndefined.link(&m_jit);
+ }
jump(taken, ForceJump);
notCell.link(&m_jit);
COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
- if (needSpeculationCheck) {
- m_jit.move(valueTagGPR, scratchGPR);
- m_jit.or32(TrustedImm32(1), scratchGPR);
- speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag)));
+ if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) {
+ m_jit.or32(TrustedImm32(1), valueTagGPR, scratchGPR);
+ typeCheck(
+ JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther,
+ m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag)));
}
jump(notTaken);
- noResult(m_compileIndex);
+ noResult(m_currentNode);
}
-void SpeculativeJIT::emitBranch(Node& node)
+void SpeculativeJIT::emitBranch(Node* node)
{
- BlockIndex taken = node.takenBlockIndex();
- BlockIndex notTaken = node.notTakenBlockIndex();
+ BasicBlock* taken = node->branchData()->taken.block;
+ BasicBlock* notTaken = node->branchData()->notTaken.block;
- if (at(node.child1()).shouldSpeculateBoolean()) {
- SpeculateBooleanOperand value(this, node.child1());
+ switch (node->child1().useKind()) {
+ case BooleanUse: {
+ SpeculateBooleanOperand value(this, node->child1());
MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
condition = MacroAssembler::Zero;
- BlockIndex tmp = taken;
+ BasicBlock* tmp = taken;
taken = notTaken;
notTaken = tmp;
}
branchTest32(condition, value.gpr(), TrustedImm32(1), taken);
jump(notTaken);
- noResult(m_compileIndex);
- } else if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) {
- emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type));
- } else if (at(node.child1()).shouldSpeculateArrayOrOther()) {
- emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSArray::s_info, !isArrayOrOtherPrediction(m_state.forNode(node.child1()).m_type));
- } else if (at(node.child1()).shouldSpeculateNumber()) {
- if (at(node.child1()).shouldSpeculateInteger()) {
+ noResult(node);
+ return;
+ }
+
+ case ObjectOrOtherUse: {
+ emitObjectOrOtherBranch(node->child1(), taken, notTaken);
+ return;
+ }
+
+ case StringUse: {
+ emitStringBranch(node->child1(), taken, notTaken);
+ return;
+ }
+
+ case DoubleRepUse:
+ case Int32Use: {
+ if (node->child1().useKind() == Int32Use) {
bool invert = false;
- if (taken == (m_block + 1)) {
+ if (taken == nextBlock()) {
invert = true;
- BlockIndex tmp = taken;
+ BasicBlock* tmp = taken;
taken = notTaken;
notTaken = tmp;
}
- SpeculateIntegerOperand value(this, node.child1());
+ SpeculateInt32Operand value(this, node->child1());
branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken);
} else {
- SpeculateDoubleOperand value(this, node.child1());
+ SpeculateDoubleOperand value(this, node->child1());
FPRTemporary scratch(this);
branchDoubleNonZero(value.fpr(), scratch.fpr(), taken);
}
jump(notTaken);
- noResult(m_compileIndex);
- } else {
- JSValueOperand value(this, node.child1());
+ noResult(node);
+ return;
+ }
+
+ case UntypedUse: {
+ JSValueOperand value(this, node->child1());
value.fill();
GPRReg valueTagGPR = value.tagGPR();
GPRReg valuePayloadGPR = value.payloadGPR();
GPRTemporary result(this);
GPRReg resultGPR = result.gpr();
- use(node.child1());
+ use(node->child1());
JITCompiler::Jump fastPath = m_jit.branch32(JITCompiler::Equal, valueTagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag));
JITCompiler::Jump slowPath = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::BooleanTag));
slowPath.link(&m_jit);
silentSpillAllRegisters(resultGPR);
- callOperation(dfgConvertJSValueToBoolean, resultGPR, valueTagGPR, valuePayloadGPR);
+ callOperation(operationConvertJSValueToBoolean, resultGPR, valueTagGPR, valuePayloadGPR);
silentFillAllRegisters(resultGPR);
branchTest32(JITCompiler::NonZero, resultGPR, taken);
jump(notTaken);
- noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ noResult(node, UseChildrenCalledExplicitly);
+ return;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
}
}
-void SpeculativeJIT::compile(Node& node)
+template<typename BaseOperandType, typename PropertyOperandType, typename ValueOperandType, typename TagType>
+void SpeculativeJIT::compileContiguousPutByVal(Node* node, BaseOperandType& base, PropertyOperandType& property, ValueOperandType& value, GPRReg valuePayloadReg, TagType valueTag)
{
- NodeType op = node.op();
-
- switch (op) {
- case JSConstant:
- initConstantInfo(m_compileIndex);
- break;
+ Edge child4 = m_jit.graph().varArgChild(node, 3);
- case WeakJSConstant:
- m_jit.addWeakReference(node.weakConstant());
- initConstantInfo(m_compileIndex);
- break;
+ ArrayMode arrayMode = node->arrayMode();
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+
+ StorageOperand storage(this, child4);
+ GPRReg storageReg = storage.gpr();
- case GetLocal: {
- PredictedType prediction = node.variableAccessData()->prediction();
- AbstractValue& value = block()->valuesAtHead.operand(node.local());
+ if (node->op() == PutByValAlias) {
+ // Store the value to the array.
+ GPRReg propertyReg = property.gpr();
+ m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+
+ noResult(node);
+ return;
+ }
+
+ MacroAssembler::Jump slowCase;
- // If we have no prediction for this local, then don't attempt to compile.
- if (prediction == PredictNone || value.isClear()) {
- terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
- break;
- }
+ if (arrayMode.isInBounds()) {
+ speculationCheck(
+ OutOfBounds, JSValueRegs(), 0,
+ m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
+ } else {
+ MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
- if (!m_jit.graph().isCaptured(node.local())) {
- if (node.variableAccessData()->shouldUseDoubleFormat()) {
- FPRTemporary result(this);
- m_jit.loadDouble(JITCompiler::addressFor(node.local()), result.fpr());
- VirtualRegister virtualRegister = node.virtualRegister();
- m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble);
- m_generationInfo[virtualRegister].initDouble(m_compileIndex, node.refCount(), result.fpr());
- break;
- }
+ slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength()));
- if (isInt32Prediction(prediction)) {
- GPRTemporary result(this);
- m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
+ if (!arrayMode.isOutOfBounds())
+ speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase);
+
+ m_jit.add32(TrustedImm32(1), propertyReg);
+ m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()));
+ m_jit.sub32(TrustedImm32(1), propertyReg);
+
+ inBounds.link(&m_jit);
+ }
+
+ m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+
+ base.use();
+ property.use();
+ value.use();
+ storage.use();
+
+ if (arrayMode.isOutOfBounds()) {
+ if (node->op() == PutByValDirect) {
+ addSlowPathGenerator(slowPathCall(
+ slowCase, this,
+ m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
+ NoResult, baseReg, propertyReg, valueTag, valuePayloadReg));
+ } else {
+ addSlowPathGenerator(slowPathCall(
+ slowCase, this,
+ m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
+ NoResult, baseReg, propertyReg, valueTag, valuePayloadReg));
+ }
+ }
- // Like integerResult, but don't useChildren - our children are phi nodes,
- // and don't represent values within this dataflow with virtual registers.
- VirtualRegister virtualRegister = node.virtualRegister();
- m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
- m_generationInfo[virtualRegister].initInteger(m_compileIndex, node.refCount(), result.gpr());
- break;
- }
+ noResult(node, UseChildrenCalledExplicitly);
+}
- if (isArrayPrediction(prediction)) {
- GPRTemporary result(this);
- m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
+void SpeculativeJIT::compile(Node* node)
+{
+ NodeType op = node->op();
- // Like cellResult, but don't useChildren - our children are phi nodes,
- // and don't represent values within this dataflow with virtual registers.
- VirtualRegister virtualRegister = node.virtualRegister();
- m_gprs.retain(result.gpr(), virtualRegister, SpillOrderCell);
- m_generationInfo[virtualRegister].initCell(m_compileIndex, node.refCount(), result.gpr());
- break;
- }
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ m_jit.clearRegisterAllocationOffsets();
+#endif
- if (isBooleanPrediction(prediction)) {
- GPRTemporary result(this);
- m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
+ switch (op) {
+ case JSConstant:
+ case DoubleConstant:
+ case PhantomDirectArguments:
+ case PhantomClonedArguments:
+ initConstantInfo(node);
+ break;
- // Like booleanResult, but don't useChildren - our children are phi nodes,
- // and don't represent values within this dataflow with virtual registers.
- VirtualRegister virtualRegister = node.virtualRegister();
- m_gprs.retain(result.gpr(), virtualRegister, SpillOrderBoolean);
- m_generationInfo[virtualRegister].initBoolean(m_compileIndex, node.refCount(), result.gpr());
- break;
- }
+ case Identity: {
+ speculate(node, node->child1());
+ switch (node->child1().useKind()) {
+ case DoubleRepUse:
+ case DoubleRepRealUse: {
+ SpeculateDoubleOperand op(this, node->child1());
+ doubleResult(op.fpr(), node);
+ break;
}
-
- GPRTemporary result(this);
- GPRTemporary tag(this);
- m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr());
- m_jit.load32(JITCompiler::tagFor(node.local()), tag.gpr());
-
- // Like jsValueResult, but don't useChildren - our children are phi nodes,
- // and don't represent values within this dataflow with virtual registers.
- VirtualRegister virtualRegister = node.virtualRegister();
- m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
- m_gprs.retain(tag.gpr(), virtualRegister, SpillOrderJS);
-
- DataFormat format;
- if (isCellPrediction(value.m_type)
- && !m_jit.graph().isCaptured(node.local()))
- format = DataFormatJSCell;
- else
- format = DataFormatJS;
- m_generationInfo[virtualRegister].initJSValue(m_compileIndex, node.refCount(), tag.gpr(), result.gpr(), format);
+ case Int52RepUse:
+ case MachineIntUse:
+ case DoubleRepMachineIntUse: {
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ default: {
+ JSValueOperand op(this, node->child1());
+ jsValueResult(op.tagGPR(), op.payloadGPR(), node);
+ break;
+ }
+ } // switch
break;
}
- case SetLocal: {
- // SetLocal doubles as a hint as to where a node will be stored and
- // as a speculation point. So before we speculate make sure that we
- // know where the child of this node needs to go in the virtual
- // register file.
- compileMovHint(node);
-
- // As far as OSR is concerned, we're on the bytecode index corresponding
- // to the *next* instruction, since we've already "executed" the
- // SetLocal and whatever other DFG Nodes are associated with the same
- // bytecode index as the SetLocal.
- ASSERT(m_codeOriginForOSR == node.codeOrigin);
- Node* nextNode = &at(block()->at(m_indexInBlock + 1));
-
- // But even more oddly, we need to be super careful about the following
- // sequence:
- //
- // a: Foo()
- // b: SetLocal(@a)
- // c: Flush(@b)
- //
- // This next piece of crazy takes care of this.
- if (nextNode->op() == Flush && nextNode->child1() == m_compileIndex)
- nextNode = &at(block()->at(m_indexInBlock + 2));
-
- // Oddly, it's possible for the bytecode index for the next node to be
- // equal to ours. This will happen for op_post_inc. And, even more oddly,
- // this is just fine. Ordinarily, this wouldn't be fine, since if the
- // next node failed OSR then we'd be OSR-ing with this SetLocal's local
- // variable already set even though from the standpoint of the old JIT,
- // this SetLocal should not have executed. But for op_post_inc, it's just
- // fine, because this SetLocal's local (i.e. the LHS in a x = y++
- // statement) would be dead anyway - so the fact that DFG would have
- // already made the assignment, and baked it into the register file during
- // OSR exit, would not be visible to the old JIT in any way.
- m_codeOriginForOSR = nextNode->codeOrigin;
-
- if (!m_jit.graph().isCaptured(node.local())) {
- if (node.variableAccessData()->shouldUseDoubleFormat()) {
- SpeculateDoubleOperand value(this, node.child1());
- m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local()));
- noResult(m_compileIndex);
- // Indicate that it's no longer necessary to retrieve the value of
- // this bytecode variable from registers or other locations in the register file,
- // but that it is stored as a double.
- valueSourceReferenceForOperand(node.local()) = ValueSource(DoubleInRegisterFile);
- break;
- }
- PredictedType predictedType = node.variableAccessData()->argumentAwarePrediction();
- if (m_generationInfo[at(node.child1()).virtualRegister()].registerFormat() == DataFormatDouble) {
- DoubleOperand value(this, node.child1());
- m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local()));
- noResult(m_compileIndex);
- valueSourceReferenceForOperand(node.local()) = ValueSource(DoubleInRegisterFile);
- break;
- }
- if (isInt32Prediction(predictedType)) {
- SpeculateIntegerOperand value(this, node.child1());
- m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local()));
- noResult(m_compileIndex);
- valueSourceReferenceForOperand(node.local()) = ValueSource(Int32InRegisterFile);
- break;
- }
- if (isArrayPrediction(predictedType)) {
- SpeculateCellOperand cell(this, node.child1());
- GPRReg cellGPR = cell.gpr();
- if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(cellGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
- m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node.local()));
- noResult(m_compileIndex);
- valueSourceReferenceForOperand(node.local()) = ValueSource(CellInRegisterFile);
- break;
- }
- if (isBooleanPrediction(predictedType)) {
- SpeculateBooleanOperand value(this, node.child1());
- m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local()));
- noResult(m_compileIndex);
- valueSourceReferenceForOperand(node.local()) = ValueSource(BooleanInRegisterFile);
- break;
- }
+ case GetLocal: {
+ AbstractValue& value = m_state.variables().operand(node->local());
+
+ // If the CFA is tracking this variable and it found that the variable
+ // cannot have been assigned, then don't attempt to proceed.
+ if (value.isClear()) {
+ m_compileOkay = false;
+ break;
+ }
+
+ switch (node->variableAccessData()->flushFormat()) {
+ case FlushedDouble: {
+ FPRTemporary result(this);
+ m_jit.loadDouble(JITCompiler::addressFor(node->machineLocal()), result.fpr());
+ VirtualRegister virtualRegister = node->virtualRegister();
+ m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble);
+ generationInfoFromVirtualRegister(virtualRegister).initDouble(node, node->refCount(), result.fpr());
+ break;
+ }
+
+ case FlushedInt32: {
+ GPRTemporary result(this);
+ m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
+
+ // Like int32Result, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ VirtualRegister virtualRegister = node->virtualRegister();
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger);
+ generationInfoFromVirtualRegister(virtualRegister).initInt32(node, node->refCount(), result.gpr());
+ break;
+ }
+
+ case FlushedCell: {
+ GPRTemporary result(this);
+ m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
+
+ // Like cellResult, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ VirtualRegister virtualRegister = node->virtualRegister();
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderCell);
+ generationInfoFromVirtualRegister(virtualRegister).initCell(node, node->refCount(), result.gpr());
+ break;
+ }
+
+ case FlushedBoolean: {
+ GPRTemporary result(this);
+ m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
+
+ // Like booleanResult, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ VirtualRegister virtualRegister = node->virtualRegister();
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderBoolean);
+ generationInfoFromVirtualRegister(virtualRegister).initBoolean(node, node->refCount(), result.gpr());
+ break;
+ }
+
+ case FlushedJSValue: {
+ GPRTemporary result(this);
+ GPRTemporary tag(this);
+ m_jit.load32(JITCompiler::payloadFor(node->machineLocal()), result.gpr());
+ m_jit.load32(JITCompiler::tagFor(node->machineLocal()), tag.gpr());
+
+ // Like jsValueResult, but don't useChildren - our children are phi nodes,
+ // and don't represent values within this dataflow with virtual registers.
+ VirtualRegister virtualRegister = node->virtualRegister();
+ m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS);
+ m_gprs.retain(tag.gpr(), virtualRegister, SpillOrderJS);
+
+ generationInfoFromVirtualRegister(virtualRegister).initJSValue(node, node->refCount(), tag.gpr(), result.gpr(), DataFormatJS);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ break;
+ }
+
+ case GetLocalUnlinked: {
+ GPRTemporary payload(this);
+ GPRTemporary tag(this);
+ m_jit.load32(JITCompiler::payloadFor(node->unlinkedMachineLocal()), payload.gpr());
+ m_jit.load32(JITCompiler::tagFor(node->unlinkedMachineLocal()), tag.gpr());
+ jsValueResult(tag.gpr(), payload.gpr(), node);
+ break;
+ }
+
+ case MovHint: {
+ compileMovHint(m_currentNode);
+ noResult(node);
+ break;
+ }
+
+ case ZombieHint: {
+ recordSetLocal(m_currentNode->unlinkedLocal(), VirtualRegister(), DataFormatDead);
+ noResult(node);
+ break;
+ }
+
+ case SetLocal: {
+ switch (node->variableAccessData()->flushFormat()) {
+ case FlushedDouble: {
+ SpeculateDoubleOperand value(this, node->child1());
+ m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->machineLocal()));
+ noResult(node);
+ // Indicate that it's no longer necessary to retrieve the value of
+ // this bytecode variable from registers or other locations in the stack,
+ // but that it is stored as a double.
+ recordSetLocal(DataFormatDouble);
+ break;
+ }
+
+ case FlushedInt32: {
+ SpeculateInt32Operand value(this, node->child1());
+ m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal()));
+ noResult(node);
+ recordSetLocal(DataFormatInt32);
+ break;
+ }
+
+ case FlushedCell: {
+ SpeculateCellOperand cell(this, node->child1());
+ GPRReg cellGPR = cell.gpr();
+ m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node->machineLocal()));
+ noResult(node);
+ recordSetLocal(DataFormatCell);
+ break;
+ }
+
+ case FlushedBoolean: {
+ SpeculateBooleanOperand value(this, node->child1());
+ m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->machineLocal()));
+ noResult(node);
+ recordSetLocal(DataFormatBoolean);
+ break;
+ }
+
+ case FlushedJSValue: {
+ JSValueOperand value(this, node->child1());
+ m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node->machineLocal()));
+ m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node->machineLocal()));
+ noResult(node);
+ recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
}
- JSValueOperand value(this, node.child1());
- m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node.local()));
- m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node.local()));
- noResult(m_compileIndex);
- valueSourceReferenceForOperand(node.local()) = ValueSource(ValueInRegisterFile);
break;
}
// But it may be profitable to use this as a hook to run speculation checks
// on arguments, thereby allowing us to trivially eliminate such checks if
// the argument is not used.
+ recordSetLocal(dataFormatFor(node->variableAccessData()->flushFormat()));
break;
case BitAnd:
case BitOr:
case BitXor:
- if (isInt32Constant(node.child1().index())) {
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this, op2);
+ if (node->child1()->isInt32Constant()) {
+ SpeculateInt32Operand op2(this, node->child2());
+ GPRTemporary result(this, Reuse, op2);
- bitOp(op, valueOfInt32Constant(node.child1().index()), op2.gpr(), result.gpr());
+ bitOp(op, node->child1()->asInt32(), op2.gpr(), result.gpr());
- integerResult(result.gpr(), m_compileIndex);
- } else if (isInt32Constant(node.child2().index())) {
- SpeculateIntegerOperand op1(this, node.child1());
- GPRTemporary result(this, op1);
+ int32Result(result.gpr(), node);
+ } else if (node->child2()->isInt32Constant()) {
+ SpeculateInt32Operand op1(this, node->child1());
+ GPRTemporary result(this, Reuse, op1);
- bitOp(op, valueOfInt32Constant(node.child2().index()), op1.gpr(), result.gpr());
+ bitOp(op, node->child2()->asInt32(), op1.gpr(), result.gpr());
- integerResult(result.gpr(), m_compileIndex);
+ int32Result(result.gpr(), node);
} else {
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this, op1, op2);
+ SpeculateInt32Operand op1(this, node->child1());
+ SpeculateInt32Operand op2(this, node->child2());
+ GPRTemporary result(this, Reuse, op1, op2);
GPRReg reg1 = op1.gpr();
GPRReg reg2 = op2.gpr();
bitOp(op, reg1, reg2, result.gpr());
- integerResult(result.gpr(), m_compileIndex);
+ int32Result(result.gpr(), node);
}
break;
case BitRShift:
case BitLShift:
case BitURShift:
- if (isInt32Constant(node.child2().index())) {
- SpeculateIntegerOperand op1(this, node.child1());
- GPRTemporary result(this, op1);
+ if (node->child2()->isInt32Constant()) {
+ SpeculateInt32Operand op1(this, node->child1());
+ GPRTemporary result(this, Reuse, op1);
- shiftOp(op, op1.gpr(), valueOfInt32Constant(node.child2().index()) & 0x1f, result.gpr());
+ shiftOp(op, op1.gpr(), node->child2()->asInt32() & 0x1f, result.gpr());
- integerResult(result.gpr(), m_compileIndex);
+ int32Result(result.gpr(), node);
} else {
// Do not allow shift amount to be used as the result, MacroAssembler does not permit this.
- SpeculateIntegerOperand op1(this, node.child1());
- SpeculateIntegerOperand op2(this, node.child2());
- GPRTemporary result(this, op1);
+ SpeculateInt32Operand op1(this, node->child1());
+ SpeculateInt32Operand op2(this, node->child2());
+ GPRTemporary result(this, Reuse, op1);
GPRReg reg1 = op1.gpr();
GPRReg reg2 = op2.gpr();
shiftOp(op, reg1, reg2, result.gpr());
- integerResult(result.gpr(), m_compileIndex);
+ int32Result(result.gpr(), node);
}
break;
break;
}
- case Int32ToDouble: {
- compileInt32ToDouble(node);
+ case DoubleRep: {
+ compileDoubleRep(node);
break;
}
- case CheckNumber: {
- if (!isNumberPrediction(m_state.forNode(node.child1()).m_type)) {
- JSValueOperand op1(this, node.child1());
- JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, op1.tagGPR(), TrustedImm32(JSValue::Int32Tag));
- speculationCheck(
- BadType, JSValueRegs(op1.tagGPR(), op1.payloadGPR()), node.child1().index(),
- m_jit.branch32(MacroAssembler::AboveOrEqual, op1.tagGPR(), TrustedImm32(JSValue::LowestTag)));
- isInteger.link(&m_jit);
- }
- noResult(m_compileIndex);
+ case ValueRep: {
+ compileValueRep(node);
+ break;
+ }
+
+ case ValueAdd: {
+ JSValueOperand op1(this, node->child1());
+ JSValueOperand op2(this, node->child2());
+
+ GPRReg op1TagGPR = op1.tagGPR();
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ GPRReg op2TagGPR = op2.tagGPR();
+ GPRReg op2PayloadGPR = op2.payloadGPR();
+
+ flushRegisters();
+
+ GPRFlushedCallResult2 resultTag(this);
+ GPRFlushedCallResult resultPayload(this);
+ if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node()))
+ callOperation(operationValueAddNotNumber, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR);
+ else
+ callOperation(operationValueAdd, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR);
+
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
break;
}
- case ValueAdd:
case ArithAdd:
compileAdd(node);
break;
+ case ArithClz32:
+ compileArithClz32(node);
+ break;
+
+ case MakeRope:
+ compileMakeRope(node);
+ break;
+
case ArithSub:
compileArithSub(node);
break;
break;
case ArithDiv: {
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
-#if CPU(X86)
- compileIntegerArithDivForX86(node);
-#else // CPU(X86) -> so non-X86 code follows
- ASSERT_NOT_REACHED(); // should have been coverted into a double divide.
-#endif // CPU(X86)
- break;
- }
-
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
- FPRTemporary result(this, op1);
-
- FPRReg reg1 = op1.fpr();
- FPRReg reg2 = op2.fpr();
- m_jit.divDouble(reg1, reg2, result.fpr());
-
- doubleResult(result.fpr(), m_compileIndex);
+ compileArithDiv(node);
break;
}
break;
}
+ case ArithPow: {
+ compileArithPow(node);
+ break;
+ }
+
case ArithAbs: {
- if (at(node.child1()).shouldSpeculateInteger() && node.canSpeculateInteger()) {
- SpeculateIntegerOperand op1(this, node.child1());
- GPRTemporary result(this, op1);
+ switch (node->child1().useKind()) {
+ case Int32Use: {
+ SpeculateStrictInt32Operand op1(this, node->child1());
+ GPRTemporary result(this, Reuse, op1);
GPRTemporary scratch(this);
- m_jit.zeroExtend32ToPtr(op1.gpr(), result.gpr());
+ m_jit.move(op1.gpr(), result.gpr());
m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr());
m_jit.add32(scratch.gpr(), result.gpr());
m_jit.xor32(scratch.gpr(), result.gpr());
- speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
- integerResult(result.gpr(), m_compileIndex);
+ speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31)));
+ int32Result(result.gpr(), node);
break;
}
- SpeculateDoubleOperand op1(this, node.child1());
- FPRTemporary result(this);
-
- m_jit.absDouble(op1.fpr(), result.fpr());
- doubleResult(result.fpr(), m_compileIndex);
+
+ case DoubleRepUse: {
+ SpeculateDoubleOperand op1(this, node->child1());
+ FPRTemporary result(this);
+
+ m_jit.absDouble(op1.fpr(), result.fpr());
+ doubleResult(result.fpr(), node);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
break;
}
case ArithMin:
case ArithMax: {
- if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) {
- SpeculateStrictInt32Operand op1(this, node.child1());
- SpeculateStrictInt32Operand op2(this, node.child2());
- GPRTemporary result(this, op1);
-
- MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1.gpr(), op2.gpr());
- m_jit.move(op2.gpr(), result.gpr());
- if (op1.gpr() != result.gpr()) {
+ switch (node->binaryUseKind()) {
+ case Int32Use: {
+ SpeculateStrictInt32Operand op1(this, node->child1());
+ SpeculateStrictInt32Operand op2(this, node->child2());
+ GPRTemporary result(this, Reuse, op1);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg op2GPR = op2.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1GPR, op2GPR);
+ m_jit.move(op2GPR, resultGPR);
+ if (op1GPR != resultGPR) {
MacroAssembler::Jump done = m_jit.jump();
op1Less.link(&m_jit);
- m_jit.move(op1.gpr(), result.gpr());
+ m_jit.move(op1GPR, resultGPR);
done.link(&m_jit);
} else
op1Less.link(&m_jit);
- integerResult(result.gpr(), m_compileIndex);
+ int32Result(resultGPR, node);
break;
}
- SpeculateDoubleOperand op1(this, node.child1());
- SpeculateDoubleOperand op2(this, node.child2());
- FPRTemporary result(this, op1);
-
- MacroAssembler::JumpList done;
+ case DoubleRepUse: {
+ SpeculateDoubleOperand op1(this, node->child1());
+ SpeculateDoubleOperand op2(this, node->child2());
+ FPRTemporary result(this, op1);
+
+ FPRReg op1FPR = op1.fpr();
+ FPRReg op2FPR = op2.fpr();
+ FPRReg resultFPR = result.fpr();
+
+ MacroAssembler::JumpList done;
- MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1.fpr(), op2.fpr());
+ MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1FPR, op2FPR);
- // op2 is eather the lesser one or one of then is NaN
- MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1.fpr(), op2.fpr());
+ // op2 is eather the lesser one or one of then is NaN
+ MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1FPR, op2FPR);
- // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
- // op1 + op2 and putting it into result.
- m_jit.addDouble(op1.fpr(), op2.fpr(), result.fpr());
- done.append(m_jit.jump());
+ // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding
+ // op1 + op2 and putting it into result.
+ m_jit.addDouble(op1FPR, op2FPR, resultFPR);
+ done.append(m_jit.jump());
- op2Less.link(&m_jit);
- m_jit.moveDouble(op2.fpr(), result.fpr());
+ op2Less.link(&m_jit);
+ m_jit.moveDouble(op2FPR, resultFPR);
- if (op1.fpr() != result.fpr()) {
- done.append(m_jit.jump());
+ if (op1FPR != resultFPR) {
+ done.append(m_jit.jump());
- op1Less.link(&m_jit);
- m_jit.moveDouble(op1.fpr(), result.fpr());
- } else
- op1Less.link(&m_jit);
+ op1Less.link(&m_jit);
+ m_jit.moveDouble(op1FPR, resultFPR);
+ } else
+ op1Less.link(&m_jit);
- done.link(&m_jit);
+ done.link(&m_jit);
- doubleResult(result.fpr(), m_compileIndex);
+ doubleResult(resultFPR, node);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
break;
}
-
- case ArithSqrt: {
- SpeculateDoubleOperand op1(this, node.child1());
+
+ case ArithSqrt:
+ compileArithSqrt(node);
+ break;
+
+ case ArithFRound: {
+ SpeculateDoubleOperand op1(this, node->child1());
FPRTemporary result(this, op1);
- m_jit.sqrtDouble(op1.fpr(), result.fpr());
+ m_jit.convertDoubleToFloat(op1.fpr(), result.fpr());
+ m_jit.convertFloatToDouble(result.fpr(), result.fpr());
+
+ doubleResult(result.fpr(), node);
+ break;
+ }
+
+ case ArithRound:
+ compileArithRound(node);
+ break;
+
+ case ArithSin: {
+ SpeculateDoubleOperand op1(this, node->child1());
+ FPRReg op1FPR = op1.fpr();
+
+ flushRegisters();
+
+ FPRResult result(this);
+ callOperation(sin, result.fpr(), op1FPR);
+ doubleResult(result.fpr(), node);
+ break;
+ }
+
+ case ArithCos: {
+ SpeculateDoubleOperand op1(this, node->child1());
+ FPRReg op1FPR = op1.fpr();
+
+ flushRegisters();
- doubleResult(result.fpr(), m_compileIndex);
+ FPRResult result(this);
+ callOperation(cos, result.fpr(), op1FPR);
+ doubleResult(result.fpr(), node);
break;
}
+ case ArithLog:
+ compileArithLog(node);
+ break;
+
case LogicalNot:
compileLogicalNot(node);
break;
if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq))
return;
break;
+
+ case CompareEqConstant:
+ ASSERT(node->child2()->asJSValue().isNull());
+ if (nonSpeculativeCompareNull(node, node->child1()))
+ return;
+ break;
case CompareEq:
- if (isNullConstant(node.child1().index())) {
- if (nonSpeculativeCompareNull(node, node.child2()))
- return;
- break;
- }
- if (isNullConstant(node.child2().index())) {
- if (nonSpeculativeCompareNull(node, node.child1()))
- return;
- break;
- }
if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq))
return;
break;
break;
}
+ case StringFromCharCode: {
+ compileFromCharCode(node);
+ break;
+ }
+
+ case CheckArray: {
+ checkArray(node);
+ break;
+ }
+
+ case Arrayify:
+ case ArrayifyToStructure: {
+ arrayify(node);
+ break;
+ }
+
case GetByVal: {
- if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) {
- terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
+ switch (node->arrayMode().type()) {
+ case Array::SelectUsingPredictions:
+ case Array::ForceExit:
+ RELEASE_ASSERT_NOT_REACHED();
+#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
+#endif
break;
- }
-
- if (!at(node.child2()).shouldSpeculateInteger() || !isActionableArrayPrediction(at(node.child1()).prediction())) {
- SpeculateCellOperand base(this, node.child1()); // Save a register, speculate cell. We'll probably be right.
- JSValueOperand property(this, node.child2());
+ case Array::Generic: {
+ SpeculateCellOperand base(this, node->child1()); // Save a register, speculate cell. We'll probably be right.
+ JSValueOperand property(this, node->child2());
GPRReg baseGPR = base.gpr();
GPRReg propertyTagGPR = property.tagGPR();
GPRReg propertyPayloadGPR = property.payloadGPR();
flushRegisters();
- GPRResult2 resultTag(this);
- GPRResult resultPayload(this);
+ GPRFlushedCallResult2 resultTag(this);
+ GPRFlushedCallResult resultPayload(this);
callOperation(operationGetByValCell, resultTag.gpr(), resultPayload.gpr(), baseGPR, propertyTagGPR, propertyPayloadGPR);
- jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
break;
}
+ case Array::Int32:
+ case Array::Contiguous: {
+ if (node->arrayMode().isInBounds()) {
+ SpeculateStrictInt32Operand property(this, node->child2());
+ StorageOperand storage(this, node->child3());
+
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
+
+ GPRTemporary resultPayload(this);
+ if (node->arrayMode().type() == Array::Int32) {
+ ASSERT(!node->arrayMode().isSaneChain());
+
+ speculationCheck(
+ OutOfBounds, JSValueRegs(), 0,
+ m_jit.branch32(
+ MacroAssembler::Equal,
+ MacroAssembler::BaseIndex(
+ storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset),
+ TrustedImm32(JSValue::EmptyValueTag)));
+ m_jit.load32(
+ MacroAssembler::BaseIndex(
+ storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset),
+ resultPayload.gpr());
+ int32Result(resultPayload.gpr(), node);
+ break;
+ }
+
+ GPRTemporary resultTag(this);
+ m_jit.load32(
+ MacroAssembler::BaseIndex(
+ storageReg, propertyReg, MacroAssembler::TimesEight, TagOffset),
+ resultTag.gpr());
+ m_jit.load32(
+ MacroAssembler::BaseIndex(
+ storageReg, propertyReg, MacroAssembler::TimesEight, PayloadOffset),
+ resultPayload.gpr());
+ if (node->arrayMode().isSaneChain()) {
+ JITCompiler::Jump notHole = m_jit.branch32(
+ MacroAssembler::NotEqual, resultTag.gpr(),
+ TrustedImm32(JSValue::EmptyValueTag));
+ m_jit.move(TrustedImm32(JSValue::UndefinedTag), resultTag.gpr());
+ m_jit.move(TrustedImm32(0), resultPayload.gpr());
+ notHole.link(&m_jit);
+ } else {
+ speculationCheck(
+ LoadFromHole, JSValueRegs(), 0,
+ m_jit.branch32(
+ MacroAssembler::Equal, resultTag.gpr(),
+ TrustedImm32(JSValue::EmptyValueTag)));
+ }
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
+ break;
+ }
- if (at(node.child1()).prediction() == PredictString) {
- compileGetByValOnString(node);
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateStrictInt32Operand property(this, node->child2());
+ StorageOperand storage(this, node->child3());
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
if (!m_compileOkay)
return;
+
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+ GPRReg resultTagReg = resultTag.gpr();
+ GPRReg resultPayloadReg = resultPayload.gpr();
+
+ MacroAssembler::JumpList slowCases;
+
+ slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
+
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg);
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg);
+ slowCases.append(m_jit.branch32(MacroAssembler::Equal, resultTagReg, TrustedImm32(JSValue::EmptyValueTag)));
+
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCases, this, operationGetByValArrayInt,
+ JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg));
+
+ jsValueResult(resultTagReg, resultPayloadReg, node);
break;
}
-
- if (at(node.child1()).shouldSpeculateInt8Array()) {
- compileGetByValOnIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ case Array::Double: {
+ if (node->arrayMode().isInBounds()) {
+ SpeculateStrictInt32Operand property(this, node->child2());
+ StorageOperand storage(this, node->child3());
+
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
+ if (!m_compileOkay)
+ return;
+
+ speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
+
+ FPRTemporary result(this);
+ m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.fpr());
+ if (!node->arrayMode().isSaneChain())
+ speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, result.fpr(), result.fpr()));
+ doubleResult(result.fpr(), node);
+ break;
+ }
+
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateStrictInt32Operand property(this, node->child2());
+ StorageOperand storage(this, node->child3());
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+
if (!m_compileOkay)
return;
- break;
+
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+ FPRTemporary temp(this);
+ GPRReg resultTagReg = resultTag.gpr();
+ GPRReg resultPayloadReg = resultPayload.gpr();
+ FPRReg tempReg = temp.fpr();
+
+ MacroAssembler::JumpList slowCases;
+
+ slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())));
+
+ m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg);
+ slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempReg, tempReg));
+ boxDouble(tempReg, resultTagReg, resultPayloadReg);
+
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCases, this, operationGetByValArrayInt,
+ JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg));
+
+ jsValueResult(resultTagReg, resultPayloadReg, node);
+ break;
}
+ case Array::ArrayStorage:
+ case Array::SlowPutArrayStorage: {
+ if (node->arrayMode().isInBounds()) {
+ SpeculateStrictInt32Operand property(this, node->child2());
+ StorageOperand storage(this, node->child3());
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
- if (at(node.child1()).shouldSpeculateInt16Array()) {
- compileGetByValOnIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), node, sizeof(int16_t), isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (!m_compileOkay)
+ return;
+
+ speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())));
+
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr());
+ speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag)));
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr());
+
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
+ break;
+ }
+
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateStrictInt32Operand property(this, node->child2());
+ StorageOperand storage(this, node->child3());
+ GPRReg propertyReg = property.gpr();
+ GPRReg storageReg = storage.gpr();
+ GPRReg baseReg = base.gpr();
+
if (!m_compileOkay)
return;
- break;
+
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+ GPRReg resultTagReg = resultTag.gpr();
+ GPRReg resultPayloadReg = resultPayload.gpr();
+
+ JITCompiler::Jump outOfBounds = m_jit.branch32(
+ MacroAssembler::AboveOrEqual, propertyReg,
+ MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
+
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg);
+ JITCompiler::Jump hole = m_jit.branch32(
+ MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag));
+ m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg);
+
+ JITCompiler::JumpList slowCases;
+ slowCases.append(outOfBounds);
+ slowCases.append(hole);
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCases, this, operationGetByValArrayInt,
+ JSValueRegs(resultTagReg, resultPayloadReg),
+ baseReg, propertyReg));
+
+ jsValueResult(resultTagReg, resultPayloadReg, node);
+ break;
}
-
- if (at(node.child1()).shouldSpeculateInt32Array()) {
- compileGetByValOnIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), node, sizeof(int32_t), isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
+ case Array::String:
+ compileGetByValOnString(node);
+ break;
+ case Array::DirectArguments:
+ compileGetByValOnDirectArguments(node);
+ break;
+ case Array::ScopedArguments:
+ compileGetByValOnScopedArguments(node);
+ break;
+ default: {
+ TypedArrayType type = node->arrayMode().typedArrayType();
+ if (isInt(type))
+ compileGetByValOnIntTypedArray(node, type);
+ else
+ compileGetByValOnFloatTypedArray(node, type);
+ } }
+ break;
+ }
+
+ case PutByValDirect:
+ case PutByVal:
+ case PutByValAlias: {
+ Edge child1 = m_jit.graph().varArgChild(node, 0);
+ Edge child2 = m_jit.graph().varArgChild(node, 1);
+ Edge child3 = m_jit.graph().varArgChild(node, 2);
+ Edge child4 = m_jit.graph().varArgChild(node, 3);
+
+ ArrayMode arrayMode = node->arrayMode().modeForPut();
+ bool alreadyHandled = false;
+
+ switch (arrayMode.type()) {
+ case Array::SelectUsingPredictions:
+ case Array::ForceExit:
+ RELEASE_ASSERT_NOT_REACHED();
+#if COMPILER_QUIRK(CONSIDERS_UNREACHABLE_CODE)
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
+ alreadyHandled = true;
+#endif
+ break;
+ case Array::Generic: {
+ ASSERT(node->op() == PutByVal || node->op() == PutByValDirect);
+
+ SpeculateCellOperand base(this, child1); // Save a register, speculate cell. We'll probably be right.
+ JSValueOperand property(this, child2);
+ JSValueOperand value(this, child3);
+ GPRReg baseGPR = base.gpr();
+ GPRReg propertyTagGPR = property.tagGPR();
+ GPRReg propertyPayloadGPR = property.payloadGPR();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+
+ flushRegisters();
+ if (node->op() == PutByValDirect)
+ callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectCellStrict : operationPutByValDirectCellNonStrict, baseGPR, propertyTagGPR, propertyPayloadGPR, valueTagGPR, valuePayloadGPR);
+ else
+ callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict, baseGPR, propertyTagGPR, propertyPayloadGPR, valueTagGPR, valuePayloadGPR);
+
+ noResult(node);
+ alreadyHandled = true;
+ break;
}
-
- if (at(node.child1()).shouldSpeculateUint8Array()) {
- compileGetByValOnIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), node, sizeof(uint8_t), isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
+ default:
+ break;
}
+
+ if (alreadyHandled)
+ break;
+
+ SpeculateCellOperand base(this, child1);
+ SpeculateStrictInt32Operand property(this, child2);
+
+ GPRReg baseReg = base.gpr();
+ GPRReg propertyReg = property.gpr();
- if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
- compileGetByValOnIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), node, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ switch (arrayMode.type()) {
+ case Array::Int32: {
+ SpeculateInt32Operand value(this, child3);
+
+ GPRReg valuePayloadReg = value.gpr();
+
if (!m_compileOkay)
return;
+
+ compileContiguousPutByVal(node, base, property, value, valuePayloadReg, TrustedImm32(JSValue::Int32Tag));
break;
}
+ case Array::Contiguous: {
+ JSValueOperand value(this, child3);
- if (at(node.child1()).shouldSpeculateUint16Array()) {
- compileGetByValOnIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
+ GPRReg valueTagReg = value.tagGPR();
+ GPRReg valuePayloadReg = value.payloadGPR();
- if (at(node.child1()).shouldSpeculateUint32Array()) {
- compileGetByValOnIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), node, sizeof(uint32_t), isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateFloat32Array()) {
- compileGetByValOnFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), node, sizeof(float), isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateFloat64Array()) {
- compileGetByValOnFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), node, sizeof(double), isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
- if (!m_compileOkay)
- return;
- break;
- }
-
- ASSERT(at(node.child1()).shouldSpeculateArray());
-
- SpeculateStrictInt32Operand property(this, node.child2());
- StorageOperand storage(this, node.child3());
- GPRReg propertyReg = property.gpr();
- GPRReg storageReg = storage.gpr();
-
- if (!m_compileOkay)
- return;
-
- // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
- // If we have predicted the base to be type array, we can skip the check.
- {
- SpeculateCellOperand base(this, node.child1());
- GPRReg baseReg = base.gpr();
- if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset())));
- }
-
- GPRTemporary resultTag(this);
- GPRTemporary resultPayload(this);
-
- // FIXME: In cases where there are subsequent by_val accesses to the same base it might help to cache
- // the storage pointer - especially if there happens to be another register free right now. If we do so,
- // then we'll need to allocate a new temporary for result.
- m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag)));
- m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr());
-
- jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
- break;
- }
- case PutByVal: {
- if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
- terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
- break;
- }
-
- if (!at(node.child2()).shouldSpeculateInteger() || !isActionableMutableArrayPrediction(at(node.child1()).prediction())) {
- SpeculateCellOperand base(this, node.child1()); // Save a register, speculate cell. We'll probably be right.
- JSValueOperand property(this, node.child2());
- JSValueOperand value(this, node.child3());
- GPRReg baseGPR = base.gpr();
- GPRReg propertyTagGPR = property.tagGPR();
- GPRReg propertyPayloadGPR = property.payloadGPR();
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
-
- flushRegisters();
- callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict, baseGPR, propertyTagGPR, propertyPayloadGPR, valueTagGPR, valuePayloadGPR);
-
- noResult(m_compileIndex);
+ compileContiguousPutByVal(node, base, property, value, valuePayloadReg, valueTagReg);
break;
}
-
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
- if (at(node.child1()).shouldSpeculateInt8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateInt16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateInt32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateUint8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding);
- if (!m_compileOkay)
- return;
+ case Array::Double: {
+ compileDoublePutByVal(node, base, property);
break;
}
+ case Array::ArrayStorage:
+ case Array::SlowPutArrayStorage: {
+ JSValueOperand value(this, child3);
- if (at(node.child1()).shouldSpeculateUint16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateUint32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateFloat32Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateFloat64Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ GPRReg valueTagReg = value.tagGPR();
+ GPRReg valuePayloadReg = value.payloadGPR();
+
if (!m_compileOkay)
return;
- break;
- }
-
- ASSERT(at(node.child1()).shouldSpeculateArray());
-
- JSValueOperand value(this, node.child3());
- GPRTemporary scratch(this);
-
- // Map base, property & value into registers, allocate a scratch register.
- GPRReg baseReg = base.gpr();
- GPRReg propertyReg = property.gpr();
- GPRReg valueTagReg = value.tagGPR();
- GPRReg valuePayloadReg = value.payloadGPR();
- GPRReg scratchReg = scratch.gpr();
-
- if (!m_compileOkay)
- return;
-
- writeBarrier(baseReg, valueTagReg, node.child3(), WriteBarrierForPropertyAccess, scratchReg);
-
- // Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
- // If we have predicted the base to be type array, we can skip the check.
- if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
-
- base.use();
- property.use();
- value.use();
-
- MacroAssembler::Jump withinArrayBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()));
-
- // Code to handle put beyond array bounds.
- silentSpillAllRegisters(scratchReg);
- callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, baseReg, propertyReg, valueTagReg, valuePayloadReg);
- silentFillAllRegisters(scratchReg);
- JITCompiler::Jump wasBeyondArrayBounds = m_jit.jump();
-
- withinArrayBounds.link(&m_jit);
-
- // Get the array storage.
- GPRReg storageReg = scratchReg;
- m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
-
- // Check if we're writing to a hole; if so increment m_numValuesInVector.
- MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
- m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
-
- // If we're writing to a hole we might be growing the array;
- MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length)));
- m_jit.add32(TrustedImm32(1), propertyReg);
- m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length)));
- m_jit.sub32(TrustedImm32(1), propertyReg);
- lengthDoesNotNeedUpdate.link(&m_jit);
- notHoleValue.link(&m_jit);
+ StorageOperand storage(this, child4);
+ GPRReg storageReg = storage.gpr();
- // Store the value to the array.
- m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
- m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
-
- wasBeyondArrayBounds.link(&m_jit);
+ if (node->op() == PutByValAlias) {
+ // Store the value to the array.
+ GPRReg propertyReg = property.gpr();
+ m_jit.store32(value.tagGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(value.payloadGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+
+ noResult(node);
+ break;
+ }
- noResult(m_compileIndex, UseChildrenCalledExplicitly);
- break;
- }
+ MacroAssembler::JumpList slowCases;
- case PutByValAlias: {
- if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
- terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
- break;
- }
-
- ASSERT(isActionableMutableArrayPrediction(at(node.child1()).prediction()));
- ASSERT(at(node.child2()).shouldSpeculateInteger());
+ MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()));
+ if (!arrayMode.isOutOfBounds())
+ speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds);
+ else
+ slowCases.append(beyondArrayBounds);
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
+ // Check if we're writing to a hole; if so increment m_numValuesInVector.
+ if (arrayMode.isInBounds()) {
+ speculationCheck(
+ StoreToHole, JSValueRegs(), 0,
+ m_jit.branch32(MacroAssembler::Equal, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)));
+ } else {
+ MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+ if (arrayMode.isSlowPut()) {
+ // This is sort of strange. If we wanted to optimize this code path, we would invert
+ // the above branch. But it's simply not worth it since this only happens if we're
+ // already having a bad time.
+ slowCases.append(m_jit.jump());
+ } else {
+ m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset()));
+
+ // If we're writing to a hole we might be growing the array;
+ MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
+ m_jit.add32(TrustedImm32(1), propertyReg);
+ m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset()));
+ m_jit.sub32(TrustedImm32(1), propertyReg);
+
+ lengthDoesNotNeedUpdate.link(&m_jit);
+ }
+ notHoleValue.link(&m_jit);
+ }
+
+ // Store the value to the array.
+ m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- if (at(node.child1()).shouldSpeculateInt8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), NoTypedArraySpecCheck, SignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateInt16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), NoTypedArraySpecCheck, SignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateInt32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), NoTypedArraySpecCheck, SignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateUint8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
+ base.use();
+ property.use();
+ value.use();
+ storage.use();
+
+ if (!slowCases.empty()) {
+ if (node->op() == PutByValDirect) {
+ addSlowPathGenerator(slowPathCall(
+ slowCases, this,
+ m_jit.codeBlock()->isStrictMode() ? operationPutByValDirectBeyondArrayBoundsStrict : operationPutByValDirectBeyondArrayBoundsNonStrict,
+ NoResult, baseReg, propertyReg, valueTagReg, valuePayloadReg));
+ } else {
+ addSlowPathGenerator(slowPathCall(
+ slowCases, this,
+ m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict,
+ NoResult, baseReg, propertyReg, valueTagReg, valuePayloadReg));
+ }
+ }
- if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray, ClampRounding);
- if (!m_compileOkay)
- return;
+ noResult(node, UseChildrenCalledExplicitly);
break;
}
-
- if (at(node.child1()).shouldSpeculateUint16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), NoTypedArraySpecCheck, UnsignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateUint32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), NoTypedArraySpecCheck, UnsignedTypedArray);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateFloat32Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), NoTypedArraySpecCheck);
- if (!m_compileOkay)
- return;
- break;
- }
-
- if (at(node.child1()).shouldSpeculateFloat64Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), NoTypedArraySpecCheck);
- if (!m_compileOkay)
- return;
- break;
- }
-
- ASSERT(at(node.child1()).shouldSpeculateArray());
-
- JSValueOperand value(this, node.child3());
- GPRTemporary scratch(this, base);
-
- GPRReg baseReg = base.gpr();
- GPRReg scratchReg = scratch.gpr();
-
- writeBarrier(baseReg, value.tagGPR(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
-
- // Get the array storage.
- GPRReg storageReg = scratchReg;
- m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg);
-
- // Store the value to the array.
- GPRReg propertyReg = property.gpr();
- m_jit.store32(value.tagGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
- m_jit.store32(value.payloadGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
-
- noResult(m_compileIndex);
+
+ default: {
+ TypedArrayType type = arrayMode.typedArrayType();
+ if (isInt(type))
+ compilePutByValForIntTypedArray(base.gpr(), property.gpr(), node, type);
+ else
+ compilePutByValForFloatTypedArray(base.gpr(), property.gpr(), node, type);
+ } }
break;
}
if (compileRegExpExec(node))
return;
- if (!node.adjustedRefCount()) {
- SpeculateCellOperand base(this, node.child1());
- SpeculateCellOperand argument(this, node.child2());
+ if (!node->adjustedRefCount()) {
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateCellOperand argument(this, node->child2());
GPRReg baseGPR = base.gpr();
GPRReg argumentGPR = argument.gpr();
flushRegisters();
- GPRResult result(this);
+ GPRFlushedCallResult result(this);
callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
// Must use jsValueResult because otherwise we screw up register
// allocation, which thinks that this node has a result.
- booleanResult(result.gpr(), m_compileIndex);
+ booleanResult(result.gpr(), node);
break;
}
- SpeculateCellOperand base(this, node.child1());
- SpeculateCellOperand argument(this, node.child2());
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateCellOperand argument(this, node->child2());
GPRReg baseGPR = base.gpr();
GPRReg argumentGPR = argument.gpr();
flushRegisters();
- GPRResult2 resultTag(this);
- GPRResult resultPayload(this);
+ GPRFlushedCallResult2 resultTag(this);
+ GPRFlushedCallResult resultPayload(this);
callOperation(operationRegExpExec, resultTag.gpr(), resultPayload.gpr(), baseGPR, argumentGPR);
- jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
break;
}
case RegExpTest: {
- SpeculateCellOperand base(this, node.child1());
- SpeculateCellOperand argument(this, node.child2());
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateCellOperand argument(this, node->child2());
GPRReg baseGPR = base.gpr();
GPRReg argumentGPR = argument.gpr();
flushRegisters();
- GPRResult result(this);
+ GPRFlushedCallResult result(this);
callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR);
// If we add a DataFormatBool, we should use it here.
- booleanResult(result.gpr(), m_compileIndex);
+ booleanResult(result.gpr(), node);
break;
}
case ArrayPush: {
- SpeculateCellOperand base(this, node.child1());
- JSValueOperand value(this, node.child2());
- GPRTemporary storage(this);
+ ASSERT(node->arrayMode().isJSArray());
+
+ SpeculateCellOperand base(this, node->child1());
GPRTemporary storageLength(this);
GPRReg baseGPR = base.gpr();
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
- GPRReg storageGPR = storage.gpr();
GPRReg storageLengthGPR = storageLength.gpr();
- writeBarrier(baseGPR, valueTagGPR, node.child2(), WriteBarrierForPropertyAccess, storageGPR, storageLengthGPR);
-
- if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ StorageOperand storage(this, node->child3());
+ GPRReg storageGPR = storage.gpr();
- m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR);
- m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR);
+ switch (node->arrayMode().type()) {
+ case Array::Int32: {
+ SpeculateInt32Operand value(this, node->child2());
+ GPRReg valuePayloadGPR = value.gpr();
+
+ m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
+ MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
+ m_jit.store32(TrustedImm32(JSValue::Int32Tag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ m_jit.add32(TrustedImm32(1), storageLengthGPR);
+ m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR);
+
+ addSlowPathGenerator(
+ slowPathCall(
+ slowPath, this, operationArrayPush,
+ JSValueRegs(storageGPR, storageLengthGPR),
+ TrustedImm32(JSValue::Int32Tag), valuePayloadGPR, baseGPR));
- // Refuse to handle bizarre lengths.
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe)));
+ jsValueResult(storageGPR, storageLengthGPR, node);
+ break;
+ }
+
+ case Array::Contiguous: {
+ JSValueOperand value(this, node->child2());
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+
+ m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
+ MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
+ m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ m_jit.add32(TrustedImm32(1), storageLengthGPR);
+ m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR);
+
+ addSlowPathGenerator(
+ slowPathCall(
+ slowPath, this, operationArrayPush,
+ JSValueRegs(storageGPR, storageLengthGPR),
+ valueTagGPR, valuePayloadGPR, baseGPR));
- MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset()));
+ jsValueResult(storageGPR, storageLengthGPR, node);
+ break;
+ }
+
+ case Array::Double: {
+ SpeculateDoubleOperand value(this, node->child2());
+ FPRReg valueFPR = value.fpr();
+
+ DFG_TYPE_CHECK(
+ JSValueRegs(), node->child2(), SpecDoubleReal,
+ m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR));
+
+ m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR);
+ MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
+ m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight));
+ m_jit.add32(TrustedImm32(1), storageLengthGPR);
+ m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR);
+
+ addSlowPathGenerator(
+ slowPathCall(
+ slowPath, this, operationArrayPushDouble,
+ JSValueRegs(storageGPR, storageLengthGPR),
+ valueFPR, baseGPR));
- m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
- m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ jsValueResult(storageGPR, storageLengthGPR, node);
+ break;
+ }
+
+ case Array::ArrayStorage: {
+ JSValueOperand value(this, node->child2());
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+
+ m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
- m_jit.add32(TrustedImm32(1), storageLengthGPR);
- m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)));
- m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
- m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR);
+ // Refuse to handle bizarre lengths.
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe)));
- MacroAssembler::Jump done = m_jit.jump();
+ MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
- slowPath.link(&m_jit);
+ m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- silentSpillAllRegisters(storageGPR, storageLengthGPR);
- callOperation(operationArrayPush, storageGPR, storageLengthGPR, valueTagGPR, valuePayloadGPR, baseGPR);
- silentFillAllRegisters(storageGPR, storageLengthGPR);
+ m_jit.add32(TrustedImm32(1), storageLengthGPR);
+ m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
+ m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR);
- done.link(&m_jit);
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationArrayPush, JSValueRegs(storageGPR, storageLengthGPR), valueTagGPR, valuePayloadGPR, baseGPR));
- jsValueResult(storageGPR, storageLengthGPR, m_compileIndex);
+ jsValueResult(storageGPR, storageLengthGPR, node);
+ break;
+ }
+
+ default:
+ CRASH();
+ break;
+ }
break;
}
case ArrayPop: {
- SpeculateCellOperand base(this, node.child1());
+ ASSERT(node->arrayMode().isJSArray());
+
+ SpeculateCellOperand base(this, node->child1());
+ StorageOperand storage(this, node->child2());
GPRTemporary valueTag(this);
GPRTemporary valuePayload(this);
- GPRTemporary storage(this);
- GPRTemporary storageLength(this);
GPRReg baseGPR = base.gpr();
GPRReg valueTagGPR = valueTag.gpr();
GPRReg valuePayloadGPR = valuePayload.gpr();
GPRReg storageGPR = storage.gpr();
- GPRReg storageLengthGPR = storageLength.gpr();
-
- if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
- m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR);
- m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR);
+ switch (node->arrayMode().type()) {
+ case Array::Int32:
+ case Array::Contiguous: {
+ m_jit.load32(
+ MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valuePayloadGPR);
+ MacroAssembler::Jump undefinedCase =
+ m_jit.branchTest32(MacroAssembler::Zero, valuePayloadGPR);
+ m_jit.sub32(TrustedImm32(1), valuePayloadGPR);
+ m_jit.store32(
+ valuePayloadGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
+ m_jit.load32(
+ MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)),
+ valueTagGPR);
+ MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
+ m_jit.store32(
+ MacroAssembler::TrustedImm32(JSValue::EmptyValueTag),
+ MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.load32(
+ MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)),
+ valuePayloadGPR);
+
+ addSlowPathGenerator(
+ slowPathMove(
+ undefinedCase, this,
+ MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR,
+ MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR));
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCase, this, operationArrayPopAndRecoverLength,
+ JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR));
+
+ jsValueResult(valueTagGPR, valuePayloadGPR, node);
+ break;
+ }
+
+ case Array::Double: {
+ FPRTemporary temp(this);
+ FPRReg tempFPR = temp.fpr();
+
+ m_jit.load32(
+ MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valuePayloadGPR);
+ MacroAssembler::Jump undefinedCase =
+ m_jit.branchTest32(MacroAssembler::Zero, valuePayloadGPR);
+ m_jit.sub32(TrustedImm32(1), valuePayloadGPR);
+ m_jit.store32(
+ valuePayloadGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
+ m_jit.loadDouble(
+ MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight),
+ tempFPR);
+ MacroAssembler::Jump slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR);
+ JSValue nan = JSValue(JSValue::EncodeAsDouble, PNaN);
+ m_jit.store32(
+ MacroAssembler::TrustedImm32(nan.u.asBits.tag),
+ MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(
+ MacroAssembler::TrustedImm32(nan.u.asBits.payload),
+ MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ boxDouble(tempFPR, valueTagGPR, valuePayloadGPR);
+
+ addSlowPathGenerator(
+ slowPathMove(
+ undefinedCase, this,
+ MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR,
+ MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR));
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCase, this, operationArrayPopAndRecoverLength,
+ JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR));
+
+ jsValueResult(valueTagGPR, valuePayloadGPR, node);
+ break;
+ }
+
+ case Array::ArrayStorage: {
+ GPRTemporary storageLength(this);
+ GPRReg storageLengthGPR = storageLength.gpr();
+
+ m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR);
- MacroAssembler::Jump emptyArrayCase = m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR);
+ JITCompiler::JumpList setUndefinedCases;
+ setUndefinedCases.append(m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR));
- m_jit.sub32(TrustedImm32(1), storageLengthGPR);
+ m_jit.sub32(TrustedImm32(1), storageLengthGPR);
- MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset()));
+ MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset()));
- m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTagGPR);
- m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), valuePayloadGPR);
+ m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTagGPR);
+ m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), valuePayloadGPR);
- m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()));
- MacroAssembler::Jump holeCase = m_jit.branch32(MacroAssembler::Equal, TrustedImm32(JSValue::EmptyValueTag), valueTagGPR);
+ setUndefinedCases.append(m_jit.branch32(MacroAssembler::Equal, TrustedImm32(JSValue::EmptyValueTag), valueTagGPR));
- m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
- m_jit.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ m_jit.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
- MacroAssembler::JumpList done;
+ addSlowPathGenerator(
+ slowPathMove(
+ setUndefinedCases, this,
+ MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR,
+ MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR));
- done.append(m_jit.jump());
-
- holeCase.link(&m_jit);
- emptyArrayCase.link(&m_jit);
- m_jit.move(MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR);
- m_jit.move(MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR);
- done.append(m_jit.jump());
-
- slowCase.link(&m_jit);
-
- silentSpillAllRegisters(valueTagGPR, valuePayloadGPR);
- callOperation(operationArrayPop, valueTagGPR, valuePayloadGPR, baseGPR);
- silentFillAllRegisters(valueTagGPR, valuePayloadGPR);
-
- done.link(&m_jit);
-
- jsValueResult(valueTagGPR, valuePayloadGPR, m_compileIndex);
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCase, this, operationArrayPop,
+ JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR));
+
+ jsValueResult(valueTagGPR, valuePayloadGPR, node);
+ break;
+ }
+
+ default:
+ CRASH();
+ break;
+ }
break;
}
case DFG::Jump: {
- BlockIndex taken = node.takenBlockIndex();
- jump(taken);
- noResult(m_compileIndex);
+ jump(node->targetBlock());
+ noResult(node);
break;
}
case Branch:
- if (isStrictInt32(node.child1().index()) || at(node.child1()).shouldSpeculateInteger()) {
- SpeculateIntegerOperand op(this, node.child1());
-
- BlockIndex taken = node.takenBlockIndex();
- BlockIndex notTaken = node.notTakenBlockIndex();
-
- MacroAssembler::ResultCondition condition = MacroAssembler::NonZero;
-
- if (taken == (m_block + 1)) {
- condition = MacroAssembler::Zero;
- BlockIndex tmp = taken;
- taken = notTaken;
- notTaken = tmp;
- }
-
- branchTest32(condition, op.gpr(), taken);
- jump(notTaken);
-
- noResult(m_compileIndex);
- break;
- }
emitBranch(node);
break;
+
+ case Switch:
+ emitSwitch(node);
+ break;
case Return: {
ASSERT(GPRInfo::callFrameRegister != GPRInfo::regT2);
ASSERT(GPRInfo::regT1 != GPRInfo::returnValueGPR);
ASSERT(GPRInfo::returnValueGPR != GPRInfo::callFrameRegister);
-#if DFG_ENABLE(SUCCESS_STATS)
- static SamplingCounter counter("SpeculativeJIT");
- m_jit.emitCount(counter);
-#endif
-
// Return the result in returnValueGPR.
- JSValueOperand op1(this, node.child1());
+ JSValueOperand op1(this, node->child1());
op1.fill();
if (op1.isDouble())
boxDouble(op1.fpr(), GPRInfo::returnValueGPR2, GPRInfo::returnValueGPR);
}
}
- // Grab the return address.
- m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, GPRInfo::regT2);
- // Restore our caller's "r".
- m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, GPRInfo::callFrameRegister);
- // Return.
- m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT2);
+ m_jit.emitFunctionEpilogue();
m_jit.ret();
- noResult(m_compileIndex);
+ noResult(node);
break;
}
case ThrowReferenceError: {
// We expect that throw statements are rare and are intended to exit the code block
// anyway, so we just OSR back to the old JIT for now.
- terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0);
break;
}
- case ToPrimitive: {
- if (at(node.child1()).shouldSpeculateInteger()) {
- // It's really profitable to speculate integer, since it's really cheap,
- // it means we don't have to do any real work, and we emit a lot less code.
+ case BooleanToNumber: {
+ switch (node->child1().useKind()) {
+ case BooleanUse: {
+ SpeculateBooleanOperand value(this, node->child1());
+ GPRTemporary result(this); // FIXME: We could reuse, but on speculation fail would need recovery to restore tag (akin to add).
+
+ m_jit.move(value.gpr(), result.gpr());
+
+ int32Result(result.gpr(), node);
+ break;
+ }
- SpeculateIntegerOperand op1(this, node.child1());
- GPRTemporary result(this, op1);
+ case UntypedUse: {
+ JSValueOperand value(this, node->child1());
- ASSERT(op1.format() == DataFormatInteger);
- m_jit.move(op1.gpr(), result.gpr());
+ if (!m_interpreter.needsTypeCheck(node->child1(), SpecBoolInt32 | SpecBoolean)) {
+ GPRTemporary result(this);
+
+ GPRReg valueGPR = value.payloadGPR();
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.move(valueGPR, resultGPR);
+ int32Result(result.gpr(), node);
+ break;
+ }
+
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this);
+
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
- integerResult(result.gpr(), m_compileIndex);
+ m_jit.move(valuePayloadGPR, resultPayloadGPR);
+ JITCompiler::Jump isBoolean = m_jit.branch32(
+ JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::BooleanTag));
+ m_jit.move(valueTagGPR, resultTagGPR);
+ JITCompiler::Jump done = m_jit.jump();
+ isBoolean.link(&m_jit);
+ m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR);
+ done.link(&m_jit);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, node);
break;
}
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ break;
+ }
- // FIXME: Add string speculation here.
-
- JSValueOperand op1(this, node.child1());
- GPRTemporary resultTag(this, op1);
- GPRTemporary resultPayload(this, op1, false);
+ case ToPrimitive: {
+ RELEASE_ASSERT(node->child1().useKind() == UntypedUse);
+ JSValueOperand op1(this, node->child1());
+ GPRTemporary resultTag(this, Reuse, op1, TagWord);
+ GPRTemporary resultPayload(this, Reuse, op1, PayloadWord);
GPRReg op1TagGPR = op1.tagGPR();
GPRReg op1PayloadGPR = op1.payloadGPR();
op1.use();
- if (!(m_state.forNode(node.child1()).m_type & ~(PredictNumber | PredictBoolean))) {
+ if (!(m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean))) {
m_jit.move(op1TagGPR, resultTagGPR);
m_jit.move(op1PayloadGPR, resultPayloadGPR);
} else {
- MacroAssembler::JumpList alreadyPrimitive;
-
- alreadyPrimitive.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag)));
- alreadyPrimitive.append(m_jit.branchPtr(MacroAssembler::Equal, MacroAssembler::Address(op1PayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info)));
-
- silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
- callOperation(operationToPrimitive, resultTagGPR, resultPayloadGPR, op1TagGPR, op1PayloadGPR);
- silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
-
- MacroAssembler::Jump done = m_jit.jump();
+ MacroAssembler::Jump alreadyPrimitive = m_jit.branchIfNotCell(op1.jsValueRegs());
+ MacroAssembler::Jump notPrimitive = m_jit.branchIfObject(op1PayloadGPR);
alreadyPrimitive.link(&m_jit);
m_jit.move(op1TagGPR, resultTagGPR);
m_jit.move(op1PayloadGPR, resultPayloadGPR);
- done.link(&m_jit);
+ addSlowPathGenerator(
+ slowPathCall(
+ notPrimitive, this, operationToPrimitive,
+ JSValueRegs(resultTagGPR, resultPayloadGPR), op1TagGPR, op1PayloadGPR));
+ }
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ case ToString:
+ case CallStringConstructor: {
+ if (node->child1().useKind() == UntypedUse) {
+ JSValueOperand op1(this, node->child1());
+ GPRReg op1PayloadGPR = op1.payloadGPR();
+ GPRReg op1TagGPR = op1.tagGPR();
+
+ GPRFlushedCallResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ flushRegisters();
+
+ JITCompiler::Jump done;
+ if (node->child1()->prediction() & SpecString) {
+ JITCompiler::Jump slowPath1 = m_jit.branchIfNotCell(op1.jsValueRegs());
+ JITCompiler::Jump slowPath2 = m_jit.branchIfNotString(op1PayloadGPR);
+ m_jit.move(op1PayloadGPR, resultGPR);
+ done = m_jit.jump();
+ slowPath1.link(&m_jit);
+ slowPath2.link(&m_jit);
+ }
+ if (op == ToString)
+ callOperation(operationToString, resultGPR, op1TagGPR, op1PayloadGPR);
+ else {
+ ASSERT(op == CallStringConstructor);
+ callOperation(operationCallStringConstructor, resultGPR, op1TagGPR, op1PayloadGPR);
+ }
+ if (done.isSet())
+ done.link(&m_jit);
+ cellResult(resultGPR, node);
+ break;
}
- jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ compileToStringOrCallStringConstructorOnCell(node);
+ break;
+ }
+
+ case NewStringObject: {
+ compileNewStringObject(node);
break;
}
- case StrCat:
case NewArray: {
- // We really don't want to grow the register file just to do a StrCat or NewArray.
- // Say we have 50 functions on the stack that all have a StrCat in them that has
- // upwards of 10 operands. In the DFG this would mean that each one gets
- // some random virtual register, and then to do the StrCat we'd need a second
- // span of 10 operands just to have somewhere to copy the 10 operands to, where
- // they'd be contiguous and we could easily tell the C code how to find them.
- // Ugly! So instead we use the scratchBuffer infrastructure in JSGlobalData. That
- // way, those 50 functions will share the same scratchBuffer for offloading their
- // StrCat operands. It's about as good as we can do, unless we start doing
- // virtual register coalescing to ensure that operands to StrCat get spilled
- // in exactly the place where StrCat wants them, or else have the StrCat
- // refer to those operands' SetLocal instructions to force them to spill in
- // the right place. Basically, any way you cut it, the current approach
- // probably has the best balance of performance and sensibility in the sense
- // that it does not increase the complexity of the DFG JIT just to make StrCat
- // fast and pretty.
-
- size_t scratchSize = sizeof(EncodedJSValue) * node.numChildren();
- ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(scratchSize);
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
+ if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
+ Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType());
+ ASSERT(structure->indexingType() == node->indexingType());
+ ASSERT(
+ hasUndecided(structure->indexingType())
+ || hasInt32(structure->indexingType())
+ || hasDouble(structure->indexingType())
+ || hasContiguous(structure->indexingType()));
+
+ unsigned numElements = node->numChildren();
+
+ GPRTemporary result(this);
+ GPRTemporary storage(this);
+
+ GPRReg resultGPR = result.gpr();
+ GPRReg storageGPR = storage.gpr();
+
+ emitAllocateJSArray(resultGPR, structure, storageGPR, numElements);
+
+ // At this point, one way or another, resultGPR and storageGPR have pointers to
+ // the JSArray and the Butterfly, respectively.
+
+ ASSERT(!hasUndecided(structure->indexingType()) || !node->numChildren());
+
+ for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) {
+ Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx];
+ switch (node->indexingType()) {
+ case ALL_BLANK_INDEXING_TYPES:
+ case ALL_UNDECIDED_INDEXING_TYPES:
+ CRASH();
+ break;
+ case ALL_DOUBLE_INDEXING_TYPES: {
+ SpeculateDoubleOperand operand(this, use);
+ FPRReg opFPR = operand.fpr();
+ DFG_TYPE_CHECK(
+ JSValueRegs(), use, SpecDoubleReal,
+ m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR));
+
+ m_jit.storeDouble(opFPR, MacroAssembler::Address(storageGPR, sizeof(double) * operandIdx));
+ break;
+ }
+ case ALL_INT32_INDEXING_TYPES: {
+ SpeculateInt32Operand operand(this, use);
+ m_jit.store32(TrustedImm32(JSValue::Int32Tag), MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(operand.gpr(), MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ break;
+ }
+ case ALL_CONTIGUOUS_INDEXING_TYPES: {
+ JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]);
+ GPRReg opTagGPR = operand.tagGPR();
+ GPRReg opPayloadGPR = operand.payloadGPR();
+ m_jit.store32(opTagGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(opPayloadGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ break;
+ }
+ default:
+ CRASH();
+ break;
+ }
+ }
+
+ // Yuck, we should *really* have a way of also returning the storageGPR. But
+ // that's the least of what's wrong with this code. We really shouldn't be
+ // allocating the array after having computed - and probably spilled to the
+ // stack - all of the things that will go into the array. The solution to that
+ // bigger problem will also likely fix the redundancy in reloading the storage
+ // pointer that we currently have.
+
+ cellResult(resultGPR, node);
+ break;
+ }
+
+ if (!node->numChildren()) {
+ flushRegisters();
+ GPRFlushedCallResult result(this);
+ callOperation(
+ operationNewEmptyArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()));
+ cellResult(result.gpr(), node);
+ break;
+ }
+
+ size_t scratchSize = sizeof(EncodedJSValue) * node->numChildren();
+ ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(scratchSize);
EncodedJSValue* buffer = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
- for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) {
- JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]);
- GPRReg opTagGPR = operand.tagGPR();
- GPRReg opPayloadGPR = operand.payloadGPR();
- operand.use();
-
- m_jit.store32(opTagGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- m_jit.store32(opPayloadGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) {
+ // Need to perform the speculations that this node promises to perform. If we're
+ // emitting code here and the indexing type is not array storage then there is
+ // probably something hilarious going on and we're already failing at all the
+ // things, but at least we're going to be sound.
+ Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx];
+ switch (node->indexingType()) {
+ case ALL_BLANK_INDEXING_TYPES:
+ case ALL_UNDECIDED_INDEXING_TYPES:
+ CRASH();
+ break;
+ case ALL_DOUBLE_INDEXING_TYPES: {
+ SpeculateDoubleOperand operand(this, use);
+ FPRReg opFPR = operand.fpr();
+ DFG_TYPE_CHECK(
+ JSValueRegs(), use, SpecFullRealNumber,
+ m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR));
+
+ m_jit.storeDouble(opFPR, TrustedImmPtr(reinterpret_cast<char*>(buffer + operandIdx)));
+ break;
+ }
+ case ALL_INT32_INDEXING_TYPES: {
+ SpeculateInt32Operand operand(this, use);
+ GPRReg opGPR = operand.gpr();
+ m_jit.store32(TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ m_jit.store32(opGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ break;
+ }
+ case ALL_CONTIGUOUS_INDEXING_TYPES:
+ case ALL_ARRAY_STORAGE_INDEXING_TYPES: {
+ JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]);
+ GPRReg opTagGPR = operand.tagGPR();
+ GPRReg opPayloadGPR = operand.payloadGPR();
+
+ m_jit.store32(opTagGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ m_jit.store32(opPayloadGPR, reinterpret_cast<char*>(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ operand.use();
+ break;
+ }
+ default:
+ CRASH();
+ break;
+ }
+ }
+
+ switch (node->indexingType()) {
+ case ALL_DOUBLE_INDEXING_TYPES:
+ case ALL_INT32_INDEXING_TYPES:
+ useChildren(node);
+ break;
+ default:
+ break;
}
flushRegisters();
m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr());
}
- GPRResult resultPayload(this);
- GPRResult2 resultTag(this);
+ GPRFlushedCallResult result(this);
- callOperation(op == StrCat ? operationStrCat : operationNewArray, resultTag.gpr(), resultPayload.gpr(), static_cast<void *>(buffer), node.numChildren());
+ callOperation(
+ operationNewArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()),
+ static_cast<void*>(buffer), node->numChildren());
if (scratchSize) {
GPRTemporary scratch(this);
m_jit.storePtr(TrustedImmPtr(0), scratch.gpr());
}
- // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag.
- cellResult(resultPayload.gpr(), m_compileIndex, UseChildrenCalledExplicitly);
+ cellResult(result.gpr(), node, UseChildrenCalledExplicitly);
break;
}
- case NewArrayBuffer: {
- flushRegisters();
- GPRResult resultPayload(this);
- GPRResult2 resultTag(this);
-
- callOperation(operationNewArrayBuffer, resultTag.gpr(), resultPayload.gpr(), node.startConstant(), node.numConstants());
+ case NewArrayWithSize: {
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
+ if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(node->indexingType())) {
+ SpeculateStrictInt32Operand size(this, node->child1());
+ GPRTemporary result(this);
+ GPRTemporary storage(this);
+ GPRTemporary scratch(this);
+ GPRTemporary scratch2(this);
+
+ GPRReg sizeGPR = size.gpr();
+ GPRReg resultGPR = result.gpr();
+ GPRReg storageGPR = storage.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+ GPRReg scratch2GPR = scratch2.gpr();
+
+ MacroAssembler::JumpList slowCases;
+ slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH)));
+
+ ASSERT((1 << 3) == sizeof(JSValue));
+ m_jit.move(sizeGPR, scratchGPR);
+ m_jit.lshift32(TrustedImm32(3), scratchGPR);
+ m_jit.add32(TrustedImm32(sizeof(IndexingHeader)), scratchGPR, resultGPR);
+ slowCases.append(
+ emitAllocateBasicStorage(resultGPR, storageGPR));
+ m_jit.subPtr(scratchGPR, storageGPR);
+ Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType());
+ emitAllocateJSObject<JSArray>(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases);
+
+ m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()));
+ m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength()));
+
+ if (hasDouble(node->indexingType())) {
+ JSValue nan = JSValue(JSValue::EncodeAsDouble, PNaN);
+
+ m_jit.move(sizeGPR, scratchGPR);
+ MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratchGPR);
+ MacroAssembler::Label loop = m_jit.label();
+ m_jit.sub32(TrustedImm32(1), scratchGPR);
+ m_jit.store32(TrustedImm32(nan.u.asBits.tag), MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(TrustedImm32(nan.u.asBits.payload), MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ m_jit.branchTest32(MacroAssembler::NonZero, scratchGPR).linkTo(loop, &m_jit);
+ done.link(&m_jit);
+ }
+
+ addSlowPathGenerator(std::make_unique<CallArrayAllocatorWithVariableSizeSlowPathGenerator>(
+ slowCases, this, operationNewArrayWithSize, resultGPR,
+ globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()),
+ globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage),
+ sizeGPR));
+
+ cellResult(resultGPR, node);
+ break;
+ }
- // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag.
- cellResult(resultPayload.gpr(), m_compileIndex);
+ SpeculateStrictInt32Operand size(this, node->child1());
+ GPRReg sizeGPR = size.gpr();
+ flushRegisters();
+ GPRFlushedCallResult result(this);
+ GPRReg resultGPR = result.gpr();
+ GPRReg structureGPR = selectScratchGPR(sizeGPR);
+ MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_ARRAY_STORAGE_CONSTRUCTION_LENGTH));
+ m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())), structureGPR);
+ MacroAssembler::Jump done = m_jit.jump();
+ bigLength.link(&m_jit);
+ m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)), structureGPR);
+ done.link(&m_jit);
+ callOperation(
+ operationNewArrayWithSize, resultGPR, structureGPR, sizeGPR);
+ cellResult(resultGPR, node);
break;
}
- case NewRegexp: {
+ case NewArrayBuffer: {
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
+ IndexingType indexingType = node->indexingType();
+ if (!globalObject->isHavingABadTime() && !hasAnyArrayStorage(indexingType)) {
+ unsigned numElements = node->numConstants();
+
+ GPRTemporary result(this);
+ GPRTemporary storage(this);
+
+ GPRReg resultGPR = result.gpr();
+ GPRReg storageGPR = storage.gpr();
+
+ emitAllocateJSArray(resultGPR, globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType), storageGPR, numElements);
+
+ if (node->indexingType() == ArrayWithDouble) {
+ JSValue* data = m_jit.codeBlock()->constantBuffer(node->startConstant());
+ for (unsigned index = 0; index < node->numConstants(); ++index) {
+ union {
+ int32_t halves[2];
+ double value;
+ } u;
+ u.value = data[index].asNumber();
+ m_jit.store32(Imm32(u.halves[0]), MacroAssembler::Address(storageGPR, sizeof(double) * index));
+ m_jit.store32(Imm32(u.halves[1]), MacroAssembler::Address(storageGPR, sizeof(double) * index + sizeof(int32_t)));
+ }
+ } else {
+ int32_t* data = bitwise_cast<int32_t*>(m_jit.codeBlock()->constantBuffer(node->startConstant()));
+ for (unsigned index = 0; index < node->numConstants() * 2; ++index) {
+ m_jit.store32(
+ Imm32(data[index]), MacroAssembler::Address(storageGPR, sizeof(int32_t) * index));
+ }
+ }
+
+ cellResult(resultGPR, node);
+ break;
+ }
+
flushRegisters();
- GPRResult resultPayload(this);
- GPRResult2 resultTag(this);
+ GPRFlushedCallResult result(this);
- callOperation(operationNewRegexp, resultTag.gpr(), resultPayload.gpr(), m_jit.codeBlock()->regexp(node.regexpIndex()));
+ callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), node->startConstant(), node->numConstants());
- // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag.
- cellResult(resultPayload.gpr(), m_compileIndex);
+ cellResult(result.gpr(), node);
break;
}
- case ConvertThis: {
- if (isObjectPrediction(m_state.forNode(node.child1()).m_type)) {
- SpeculateCellOperand thisValue(this, node.child1());
- GPRTemporary result(this, thisValue);
- m_jit.move(thisValue.gpr(), result.gpr());
- cellResult(result.gpr(), m_compileIndex);
+ case NewTypedArray: {
+ switch (node->child1().useKind()) {
+ case Int32Use:
+ compileNewTypedArray(node);
break;
- }
-
- if (isOtherPrediction(at(node.child1()).prediction())) {
- JSValueOperand thisValue(this, node.child1());
- GPRTemporary scratch(this);
+ case UntypedUse: {
+ JSValueOperand argument(this, node->child1());
+ GPRReg argumentTagGPR = argument.tagGPR();
+ GPRReg argumentPayloadGPR = argument.payloadGPR();
- GPRReg thisValueTagGPR = thisValue.tagGPR();
- GPRReg scratchGPR = scratch.gpr();
+ flushRegisters();
+
+ GPRFlushedCallResult result(this);
+ GPRReg resultGPR = result.gpr();
- COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag);
- m_jit.move(thisValueTagGPR, scratchGPR);
- m_jit.or32(TrustedImm32(1), scratchGPR);
- // This is hard. It would be better to save the value, but we can't quite do it,
- // since this operation does not otherwise get the payload.
- speculationCheck(BadType, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag)));
+ JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->origin.semantic);
+ callOperation(
+ operationNewTypedArrayWithOneArgumentForType(node->typedArrayType()),
+ resultGPR, globalObject->typedArrayStructure(node->typedArrayType()),
+ argumentTagGPR, argumentPayloadGPR);
- m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalThisObjectFor(node.codeOrigin)), scratchGPR);
- cellResult(scratchGPR, m_compileIndex);
+ cellResult(resultGPR, node);
break;
}
-
- if (isObjectPrediction(at(node.child1()).prediction())) {
- SpeculateCellOperand thisValue(this, node.child1());
- GPRReg thisValueGPR = thisValue.gpr();
-
- if (!isObjectPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(thisValueGPR), node.child1(), m_jit.branchPtr(JITCompiler::Equal, JITCompiler::Address(thisValueGPR, JSCell::classInfoOffset()), JITCompiler::TrustedImmPtr(&JSString::s_info)));
-
- GPRTemporary result(this, thisValue);
- GPRReg resultGPR = result.gpr();
- m_jit.move(thisValueGPR, resultGPR);
- cellResult(resultGPR, m_compileIndex);
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
break;
}
+ break;
+ }
- JSValueOperand thisValue(this, node.child1());
- GPRReg thisValueTagGPR = thisValue.tagGPR();
- GPRReg thisValuePayloadGPR = thisValue.payloadGPR();
-
+ case NewRegexp: {
flushRegisters();
+ GPRFlushedCallResult resultPayload(this);
+ GPRFlushedCallResult2 resultTag(this);
- GPRResult2 resultTag(this);
- GPRResult resultPayload(this);
- callOperation(operationConvertThis, resultTag.gpr(), resultPayload.gpr(), thisValueTagGPR, thisValuePayloadGPR);
+ callOperation(operationNewRegexp, resultTag.gpr(), resultPayload.gpr(), m_jit.codeBlock()->regexp(node->regexpIndex()));
+
+ // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag.
+ cellResult(resultPayload.gpr(), node);
+ break;
+ }
- cellResult(resultPayload.gpr(), m_compileIndex);
+ case ToThis: {
+ ASSERT(node->child1().useKind() == UntypedUse);
+ JSValueOperand thisValue(this, node->child1());
+ GPRTemporary temp(this);
+ GPRTemporary tempTag(this);
+ GPRReg thisValuePayloadGPR = thisValue.payloadGPR();
+ GPRReg thisValueTagGPR = thisValue.tagGPR();
+ GPRReg tempGPR = temp.gpr();
+ GPRReg tempTagGPR = tempTag.gpr();
+
+ MacroAssembler::JumpList slowCases;
+ slowCases.append(m_jit.branchIfNotCell(thisValue.jsValueRegs()));
+ slowCases.append(m_jit.branch8(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(thisValuePayloadGPR, JSCell::typeInfoTypeOffset()),
+ TrustedImm32(FinalObjectType)));
+ m_jit.move(thisValuePayloadGPR, tempGPR);
+ m_jit.move(thisValueTagGPR, tempTagGPR);
+ J_JITOperation_EJ function;
+ if (m_jit.graph().executableFor(node->origin.semantic)->isStrictMode())
+ function = operationToThisStrict;
+ else
+ function = operationToThis;
+ addSlowPathGenerator(
+ slowPathCall(
+ slowCases, this, function,
+ JSValueRegs(tempTagGPR, tempGPR), thisValueTagGPR, thisValuePayloadGPR));
+
+ jsValueResult(tempTagGPR, tempGPR, node);
break;
}
// then we speculate because we want to get recompiled if it isn't (since
// otherwise we'd start taking slow path a lot).
- SpeculateCellOperand proto(this, node.child1());
+ SpeculateCellOperand callee(this, node->child1());
GPRTemporary result(this);
+ GPRTemporary allocator(this);
+ GPRTemporary structure(this);
GPRTemporary scratch(this);
- GPRReg protoGPR = proto.gpr();
+ GPRReg calleeGPR = callee.gpr();
GPRReg resultGPR = result.gpr();
+ GPRReg allocatorGPR = allocator.gpr();
+ GPRReg structureGPR = structure.gpr();
GPRReg scratchGPR = scratch.gpr();
-
- proto.use();
+ // Rare data is only used to access the allocator & structure
+ // We can avoid using an additional GPR this way
+ GPRReg rareDataGPR = structureGPR;
MacroAssembler::JumpList slowPath;
+
+ m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfRareData()), rareDataGPR);
+ slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, rareDataGPR));
+ m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR);
+ m_jit.loadPtr(JITCompiler::Address(rareDataGPR, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR);
+ slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, allocatorGPR));
+ emitAllocateJSObject(resultGPR, allocatorGPR, structureGPR, TrustedImmPtr(0), scratchGPR, slowPath);
+
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationCreateThis, resultGPR, calleeGPR, node->inlineCapacity()));
- // Need to verify that the prototype is an object. If we have reason to believe
- // that it's a FinalObject then we speculate on that directly. Otherwise we
- // do the slow (structure-based) check.
- if (at(node.child1()).shouldSpeculateFinalObject()) {
- if (!isFinalObjectPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(protoGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(protoGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSFinalObject::s_info)));
- } else {
- m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSCell::structureOffset()), scratchGPR);
- slowPath.append(m_jit.branch8(MacroAssembler::Below, MacroAssembler::Address(scratchGPR, Structure::typeInfoTypeOffset()), MacroAssembler::TrustedImm32(ObjectType)));
- }
-
- // Load the inheritorID (the Structure that objects who have protoGPR as the prototype
- // use to refer to that prototype). If the inheritorID is not set, go to slow path.
- m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSObject::offsetOfInheritorID()), scratchGPR);
- slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, scratchGPR));
-
- emitAllocateJSFinalObject(scratchGPR, resultGPR, scratchGPR, slowPath);
-
- MacroAssembler::Jump done = m_jit.jump();
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(resultGPR);
- if (node.codeOrigin.inlineCallFrame)
- callOperation(operationCreateThisInlined, resultGPR, protoGPR, node.codeOrigin.inlineCallFrame->callee.get());
- else
- callOperation(operationCreateThis, resultGPR, protoGPR);
- silentFillAllRegisters(resultGPR);
-
- done.link(&m_jit);
-
- cellResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ cellResult(resultGPR, node);
break;
}
case NewObject: {
GPRTemporary result(this);
+ GPRTemporary allocator(this);
GPRTemporary scratch(this);
GPRReg resultGPR = result.gpr();
+ GPRReg allocatorGPR = allocator.gpr();
GPRReg scratchGPR = scratch.gpr();
MacroAssembler::JumpList slowPath;
- emitAllocateJSFinalObject(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)->emptyObjectStructure()), resultGPR, scratchGPR, slowPath);
-
- MacroAssembler::Jump done = m_jit.jump();
-
- slowPath.link(&m_jit);
-
- silentSpillAllRegisters(resultGPR);
- callOperation(operationNewObject, resultGPR);
- silentFillAllRegisters(resultGPR);
-
- done.link(&m_jit);
+ Structure* structure = node->structure();
+ size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
+ MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize);
+
+ m_jit.move(TrustedImmPtr(allocatorPtr), allocatorGPR);
+ emitAllocateJSObject(resultGPR, allocatorGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR, slowPath);
+
+ addSlowPathGenerator(slowPathCall(slowPath, this, operationNewObject, resultGPR, structure));
- cellResult(resultGPR, m_compileIndex);
+ cellResult(resultGPR, node);
break;
}
case GetCallee: {
GPRTemporary result(this);
- m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::Callee)), result.gpr());
- cellResult(result.gpr(), m_compileIndex);
+ m_jit.loadPtr(JITCompiler::payloadFor(JSStack::Callee), result.gpr());
+ cellResult(result.gpr(), node);
break;
}
-
- case GetScopeChain: {
- GPRTemporary result(this);
- GPRReg resultGPR = result.gpr();
-
- m_jit.loadPtr(JITCompiler::addressFor(static_cast<VirtualRegister>(RegisterFile::ScopeChain)), resultGPR);
- bool checkTopLevel = m_jit.codeBlock()->codeType() == FunctionCode && m_jit.codeBlock()->needsFullScopeChain();
- int skip = node.scopeChainDepth();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- JITCompiler::Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = m_jit.branchTestPtr(JITCompiler::Zero, JITCompiler::addressFor(static_cast<VirtualRegister>(m_jit.codeBlock()->activationRegister())));
- m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR);
- activationNotCreated.link(&m_jit);
- }
- while (skip--)
- m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR);
- m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, object)), resultGPR);
-
- cellResult(resultGPR, m_compileIndex);
+ case GetArgumentCount: {
+ GPRTemporary result(this);
+ m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), result.gpr());
+ int32Result(result.gpr(), node);
break;
}
- case GetScopedVar: {
- SpeculateCellOperand scopeChain(this, node.child1());
+
+ case GetScope:
+ compileGetScope(node);
+ break;
+
+ case SkipScope:
+ compileSkipScope(node);
+ break;
+
+ case GetClosureVar: {
+ SpeculateCellOperand base(this, node->child1());
GPRTemporary resultTag(this);
GPRTemporary resultPayload(this);
+ GPRReg baseGPR = base.gpr();
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), resultPayloadGPR);
- m_jit.load32(JITCompiler::Address(resultPayloadGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- m_jit.load32(JITCompiler::Address(resultPayloadGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
- jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ m_jit.load32(JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + TagOffset), resultTagGPR);
+ m_jit.load32(JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + PayloadOffset), resultPayloadGPR);
+ jsValueResult(resultTagGPR, resultPayloadGPR, node);
break;
}
- case PutScopedVar: {
- SpeculateCellOperand scopeChain(this, node.child1());
- GPRTemporary scratchRegister(this);
- GPRReg scratchGPR = scratchRegister.gpr();
- m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), scratchGPR);
- JSValueOperand value(this, node.child2());
- m_jit.store32(value.tagGPR(), JITCompiler::Address(scratchGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- m_jit.store32(value.payloadGPR(), JITCompiler::Address(scratchGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- writeBarrier(scopeChain.gpr(), value.tagGPR(), node.child2(), WriteBarrierForVariableAccess, scratchGPR);
- noResult(m_compileIndex);
+
+ case PutClosureVar: {
+ SpeculateCellOperand base(this, node->child1());
+ JSValueOperand value(this, node->child2());
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+
+ m_jit.store32(valueTagGPR, JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + TagOffset));
+ m_jit.store32(valuePayloadGPR, JITCompiler::Address(baseGPR, JSEnvironmentRecord::offsetOfVariable(node->scopeOffset()) + PayloadOffset));
+ noResult(node);
break;
}
case GetById: {
- if (!node.prediction()) {
- terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
- break;
- }
+ ASSERT(node->prediction());
- if (isCellPrediction(at(node.child1()).prediction())) {
- SpeculateCellOperand base(this, node.child1());
- GPRTemporary resultTag(this, base);
- GPRTemporary resultPayload(this);
+ switch (node->child1().useKind()) {
+ case CellUse: {
+ SpeculateCellOperand base(this, node->child1());
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this, Reuse, base);
GPRReg baseGPR = base.gpr();
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR;
-
- if (resultTagGPR == baseGPR)
- scratchGPR = resultPayloadGPR;
- else
- scratchGPR = resultTagGPR;
-
+
base.use();
- cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber());
+ cachedGetById(node->origin.semantic, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber());
- jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
break;
}
- JSValueOperand base(this, node.child1());
- GPRTemporary resultTag(this, base);
- GPRTemporary resultPayload(this);
+ case UntypedUse: {
+ JSValueOperand base(this, node->child1());
+ GPRTemporary resultTag(this);
+ GPRTemporary resultPayload(this, Reuse, base, TagWord);
- GPRReg baseTagGPR = base.tagGPR();
- GPRReg basePayloadGPR = base.payloadGPR();
- GPRReg resultTagGPR = resultTag.gpr();
- GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR;
-
- if (resultTagGPR == basePayloadGPR)
- scratchGPR = resultPayloadGPR;
- else
- scratchGPR = resultTagGPR;
+ GPRReg baseTagGPR = base.tagGPR();
+ GPRReg basePayloadGPR = base.payloadGPR();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
- base.use();
+ base.use();
- JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag));
+ JITCompiler::Jump notCell = m_jit.branchIfNotCell(base.jsValueRegs());
- cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell);
+ cachedGetById(node->origin.semantic, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell);
- jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
break;
}
case GetByIdFlush: {
- if (!node.prediction()) {
- terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
+ if (!node->prediction()) {
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
break;
}
- if (isCellPrediction(at(node.child1()).prediction())) {
- SpeculateCellOperand base(this, node.child1());
+ switch (node->child1().useKind()) {
+ case CellUse: {
+ SpeculateCellOperand base(this, node->child1());
GPRReg baseGPR = base.gpr();
- GPRResult resultTag(this);
- GPRResult2 resultPayload(this);
- GPRReg resultTagGPR = resultTag.gpr();
+ GPRFlushedCallResult resultPayload(this);
+ GPRFlushedCallResult2 resultTag(this);
GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
- GPRReg scratchGPR = selectScratchGPR(baseGPR, resultTagGPR, resultPayloadGPR);
-
base.use();
flushRegisters();
- cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill);
+ cachedGetById(node->origin.semantic, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill);
- jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
break;
}
- JSValueOperand base(this, node.child1());
- GPRReg baseTagGPR = base.tagGPR();
- GPRReg basePayloadGPR = base.payloadGPR();
+ case UntypedUse: {
+ JSValueOperand base(this, node->child1());
+ GPRReg baseTagGPR = base.tagGPR();
+ GPRReg basePayloadGPR = base.payloadGPR();
- GPRResult resultTag(this);
- GPRResult2 resultPayload(this);
- GPRReg resultTagGPR = resultTag.gpr();
- GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRFlushedCallResult resultPayload(this);
+ GPRFlushedCallResult2 resultTag(this);
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
- GPRReg scratchGPR = selectScratchGPR(baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR);
-
- base.use();
+ base.use();
- flushRegisters();
+ flushRegisters();
- JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag));
+ JITCompiler::Jump notCell = m_jit.branchIfNotCell(base.jsValueRegs());
- cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell, DontSpill);
+ cachedGetById(node->origin.semantic, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell, DontSpill);
- jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
+ jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly);
+ break;
+ }
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
break;
}
- case GetArrayLength: {
- SpeculateCellOperand base(this, node.child1());
- GPRReg baseGPR = base.gpr();
-
- if (!isArrayPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
-
- GPRTemporary result(this);
- GPRReg resultGPR = result.gpr();
-
- m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), resultGPR);
- m_jit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), resultGPR);
-
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, resultGPR, MacroAssembler::TrustedImm32(0)));
-
- integerResult(resultGPR, m_compileIndex);
+ case GetArrayLength:
+ compileGetArrayLength(node);
break;
- }
-
- case GetStringLength: {
- SpeculateCellOperand base(this, node.child1());
- GPRTemporary result(this);
-
- GPRReg baseGPR = base.gpr();
- GPRReg resultGPR = result.gpr();
-
- if (!isStringPrediction(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info)));
- m_jit.load32(MacroAssembler::Address(baseGPR, JSString::offsetOfLength()), resultGPR);
-
- integerResult(resultGPR, m_compileIndex);
+ case CheckCell: {
+ SpeculateCellOperand cell(this, node->child1());
+ speculationCheck(BadCell, JSValueSource::unboxedCell(cell.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, cell.gpr(), node->cellOperand()->cell()));
+ noResult(node);
break;
}
- case GetInt8ArrayLength: {
- compileGetTypedArrayLength(m_jit.globalData()->int8ArrayDescriptor(), node, !isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type));
- break;
- }
- case GetInt16ArrayLength: {
- compileGetTypedArrayLength(m_jit.globalData()->int16ArrayDescriptor(), node, !isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type));
- break;
- }
- case GetInt32ArrayLength: {
- compileGetTypedArrayLength(m_jit.globalData()->int32ArrayDescriptor(), node, !isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type));
- break;
- }
- case GetUint8ArrayLength: {
- compileGetTypedArrayLength(m_jit.globalData()->uint8ArrayDescriptor(), node, !isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type));
- break;
- }
- case GetUint8ClampedArrayLength: {
- compileGetTypedArrayLength(m_jit.globalData()->uint8ClampedArrayDescriptor(), node, !isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type));
- break;
- }
- case GetUint16ArrayLength: {
- compileGetTypedArrayLength(m_jit.globalData()->uint16ArrayDescriptor(), node, !isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type));
- break;
- }
- case GetUint32ArrayLength: {
- compileGetTypedArrayLength(m_jit.globalData()->uint32ArrayDescriptor(), node, !isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type));
- break;
- }
- case GetFloat32ArrayLength: {
- compileGetTypedArrayLength(m_jit.globalData()->float32ArrayDescriptor(), node, !isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type));
- break;
- }
- case GetFloat64ArrayLength: {
- compileGetTypedArrayLength(m_jit.globalData()->float64ArrayDescriptor(), node, !isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type));
+ case CheckNotEmpty: {
+ JSValueOperand operand(this, node->child1());
+ GPRReg tagGPR = operand.tagGPR();
+ speculationCheck(TDZFailure, JSValueSource(), nullptr, m_jit.branch32(JITCompiler::Equal, tagGPR, TrustedImm32(JSValue::EmptyValueTag)));
+ noResult(node);
break;
}
- case CheckFunction: {
- SpeculateCellOperand function(this, node.child1());
- speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node.function()));
- noResult(m_compileIndex);
+ case GetExecutable: {
+ SpeculateCellOperand function(this, node->child1());
+ GPRTemporary result(this, Reuse, function);
+ GPRReg functionGPR = function.gpr();
+ GPRReg resultGPR = result.gpr();
+ speculateCellType(node->child1(), functionGPR, SpecFunction, JSFunctionType);
+ m_jit.loadPtr(JITCompiler::Address(functionGPR, JSFunction::offsetOfExecutable()), resultGPR);
+ cellResult(resultGPR, node);
break;
}
-
- case CheckStructure: {
- if (m_state.forNode(node.child1()).m_structure.isSubsetOf(node.structureSet())) {
- noResult(m_compileIndex);
- break;
- }
- SpeculateCellOperand base(this, node.child1());
+ case CheckStructure: {
+ SpeculateCellOperand base(this, node->child1());
- ASSERT(node.structureSet().size());
+ ASSERT(node->structureSet().size());
- if (node.structureSet().size() == 1)
- speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(base.gpr(), JSCell::structureOffset()), node.structureSet()[0]));
- else {
+ if (node->structureSet().size() == 1) {
+ speculationCheck(
+ BadCache, JSValueSource::unboxedCell(base.gpr()), 0,
+ m_jit.branchWeakPtr(
+ JITCompiler::NotEqual,
+ JITCompiler::Address(base.gpr(), JSCell::structureIDOffset()),
+ node->structureSet()[0]));
+ } else {
GPRTemporary structure(this);
- m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr());
+ m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureIDOffset()), structure.gpr());
JITCompiler::JumpList done;
- for (size_t i = 0; i < node.structureSet().size() - 1; ++i)
- done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node.structureSet()[i]));
+ for (size_t i = 0; i < node->structureSet().size() - 1; ++i)
+ done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node->structureSet()[i]));
- speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, structure.gpr(), node.structureSet().last()));
+ speculationCheck(
+ BadCache, JSValueSource::unboxedCell(base.gpr()), 0,
+ m_jit.branchWeakPtr(
+ JITCompiler::NotEqual, structure.gpr(), node->structureSet().last()));
done.link(&m_jit);
}
- noResult(m_compileIndex);
+ noResult(node);
break;
}
case PutStructure: {
- SpeculateCellOperand base(this, node.child1());
+ Structure* oldStructure = node->transition()->previous;
+ Structure* newStructure = node->transition()->next;
+
+ m_jit.jitCode()->common.notifyCompilingStructureTransition(m_jit.graph().m_plan, m_jit.codeBlock(), node);
+
+ SpeculateCellOperand base(this, node->child1());
GPRReg baseGPR = base.gpr();
- m_jit.addWeakReferenceTransition(
- node.codeOrigin.codeOriginOwner(),
- node.structureTransitionData().previousStructure,
- node.structureTransitionData().newStructure);
-
-#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
- // Must always emit this write barrier as the structure transition itself requires it
- writeBarrier(baseGPR, node.structureTransitionData().newStructure, WriteBarrierForGenericAccess);
-#endif
-
- m_jit.storePtr(MacroAssembler::TrustedImmPtr(node.structureTransitionData().newStructure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
+ ASSERT_UNUSED(oldStructure, oldStructure->indexingType() == newStructure->indexingType());
+ ASSERT(oldStructure->typeInfo().type() == newStructure->typeInfo().type());
+ ASSERT(oldStructure->typeInfo().inlineTypeFlags() == newStructure->typeInfo().inlineTypeFlags());
+ m_jit.storePtr(MacroAssembler::TrustedImmPtr(newStructure), MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()));
- noResult(m_compileIndex);
+ noResult(node);
break;
}
- case GetPropertyStorage: {
- SpeculateCellOperand base(this, node.child1());
- GPRTemporary result(this, base);
+ case AllocatePropertyStorage:
+ compileAllocatePropertyStorage(node);
+ break;
+
+ case ReallocatePropertyStorage:
+ compileReallocatePropertyStorage(node);
+ break;
+
+ case GetButterfly: {
+ SpeculateCellOperand base(this, node->child1());
+ GPRTemporary result(this, Reuse, base);
GPRReg baseGPR = base.gpr();
GPRReg resultGPR = result.gpr();
- m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR);
+ m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
- storageResult(resultGPR, m_compileIndex);
+ storageResult(resultGPR, node);
break;
}
break;
}
+ case ConstantStoragePointer: {
+ compileConstantStoragePointer(node);
+ break;
+ }
+
+ case GetTypedArrayByteOffset: {
+ compileGetTypedArrayByteOffset(node);
+ break;
+ }
+
case GetByOffset: {
- StorageOperand storage(this, node.child1());
- GPRTemporary resultTag(this, storage);
+ StorageOperand storage(this, node->child1());
+ GPRTemporary resultTag(this, Reuse, storage);
GPRTemporary resultPayload(this);
GPRReg storageGPR = storage.gpr();
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
+ StorageAccessData& storageAccessData = node->storageAccessData();
+
+ m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+ m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, node);
+ break;
+ }
+
+ case GetGetterSetterByOffset: {
+ StorageOperand storage(this, node->child1());
+ GPRTemporary resultPayload(this);
+
+ GPRReg storageGPR = storage.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ StorageAccessData& storageAccessData = node->storageAccessData();
- m_jit.load32(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
- m_jit.load32(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ m_jit.load32(JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
- jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ cellResult(resultPayloadGPR, node);
+ break;
+ }
+
+ case GetGetter: {
+ SpeculateCellOperand op1(this, node->child1());
+ GPRTemporary result(this, Reuse, op1);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfGetter()), resultGPR);
+
+ cellResult(resultGPR, node);
+ break;
+ }
+
+ case GetSetter: {
+ SpeculateCellOperand op1(this, node->child1());
+ GPRTemporary result(this, Reuse, op1);
+
+ GPRReg op1GPR = op1.gpr();
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.loadPtr(JITCompiler::Address(op1GPR, GetterSetter::offsetOfSetter()), resultGPR);
+
+ cellResult(resultGPR, node);
break;
}
case PutByOffset: {
-#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
- SpeculateCellOperand base(this, node.child1());
-#endif
- StorageOperand storage(this, node.child2());
- JSValueOperand value(this, node.child3());
+ StorageOperand storage(this, node->child1());
+ JSValueOperand value(this, node->child3());
GPRReg storageGPR = storage.gpr();
GPRReg valueTagGPR = value.tagGPR();
GPRReg valuePayloadGPR = value.payloadGPR();
-
-#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
- writeBarrier(base.gpr(), valueTagGPR, node.child3(), WriteBarrierForPropertyAccess);
-#endif
- StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()];
+ speculate(node, node->child2());
+
+ StorageAccessData& storageAccessData = node->storageAccessData();
- m_jit.storePtr(valueTagGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- m_jit.storePtr(valuePayloadGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ m_jit.storePtr(valueTagGPR, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ m_jit.storePtr(valuePayloadGPR, JITCompiler::Address(storageGPR, offsetRelativeToBase(storageAccessData.offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- noResult(m_compileIndex);
+ noResult(node);
+ break;
+ }
+
+ case PutByIdFlush: {
+ SpeculateCellOperand base(this, node->child1());
+ JSValueOperand value(this, node->child2());
+ GPRTemporary scratch(this);
+
+ GPRReg baseGPR = base.gpr();
+ GPRReg valueTagGPR = value.tagGPR();
+ GPRReg valuePayloadGPR = value.payloadGPR();
+ GPRReg scratchGPR = scratch.gpr();
+ flushRegisters();
+
+ cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect, MacroAssembler::Jump(), DontSpill);
+
+ noResult(node);
break;
}
case PutById: {
- SpeculateCellOperand base(this, node.child1());
- JSValueOperand value(this, node.child2());
+ SpeculateCellOperand base(this, node->child1());
+ JSValueOperand value(this, node->child2());
GPRTemporary scratch(this);
GPRReg baseGPR = base.gpr();
GPRReg valuePayloadGPR = value.payloadGPR();
GPRReg scratchGPR = scratch.gpr();
- base.use();
- value.use();
-
- cachedPutById(node.codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), NotDirect);
+ cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), NotDirect);
- noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ noResult(node);
break;
}
case PutByIdDirect: {
- SpeculateCellOperand base(this, node.child1());
- JSValueOperand value(this, node.child2());
+ SpeculateCellOperand base(this, node->child1());
+ JSValueOperand value(this, node->child2());
GPRTemporary scratch(this);
GPRReg baseGPR = base.gpr();
GPRReg valuePayloadGPR = value.payloadGPR();
GPRReg scratchGPR = scratch.gpr();
- base.use();
- value.use();
-
- cachedPutById(node.codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), Direct);
+ cachedPutById(node->origin.semantic, baseGPR, valueTagGPR, valuePayloadGPR, scratchGPR, node->identifierNumber(), Direct);
- noResult(m_compileIndex, UseChildrenCalledExplicitly);
+ noResult(node);
break;
}
case GetGlobalVar: {
- GPRTemporary result(this);
- GPRTemporary scratch(this);
+ GPRTemporary resultPayload(this);
+ GPRTemporary resultTag(this);
- JSVariableObject* globalObject = m_jit.globalObjectFor(node.codeOrigin);
- m_jit.loadPtr(const_cast<WriteBarrier<Unknown>**>(globalObject->addressOfRegisters()), result.gpr());
- m_jit.load32(JITCompiler::tagForGlobalVar(result.gpr(), node.varNumber()), scratch.gpr());
- m_jit.load32(JITCompiler::payloadForGlobalVar(result.gpr(), node.varNumber()), result.gpr());
+ m_jit.move(TrustedImmPtr(node->variablePointer()), resultPayload.gpr());
+ m_jit.load32(JITCompiler::Address(resultPayload.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTag.gpr());
+ m_jit.load32(JITCompiler::Address(resultPayload.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayload.gpr());
- jsValueResult(scratch.gpr(), result.gpr(), m_compileIndex);
+ jsValueResult(resultTag.gpr(), resultPayload.gpr(), node);
break;
}
case PutGlobalVar: {
- JSValueOperand value(this, node.child1());
- GPRTemporary globalObject(this);
- GPRTemporary scratch(this);
-
- GPRReg globalObjectReg = globalObject.gpr();
- GPRReg scratchReg = scratch.gpr();
+ JSValueOperand value(this, node->child2());
- m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectReg);
+ // FIXME: if we happen to have a spare register - and _ONLY_ if we happen to have
+ // a spare register - a good optimization would be to put the register pointer into
+ // a register and then do a zero offset store followed by a four-offset store (or
+ // vice-versa depending on endianness).
+ m_jit.store32(value.tagGPR(), node->variablePointer()->tagPointer());
+ m_jit.store32(value.payloadGPR(), node->variablePointer()->payloadPointer());
- writeBarrier(m_jit.globalObjectFor(node.codeOrigin), value.tagGPR(), node.child1(), WriteBarrierForVariableAccess, scratchReg);
+ noResult(node);
+ break;
+ }
- m_jit.loadPtr(MacroAssembler::Address(globalObjectReg, JSVariableObject::offsetOfRegisters()), scratchReg);
- m_jit.store32(value.tagGPR(), JITCompiler::tagForGlobalVar(scratchReg, node.varNumber()));
- m_jit.store32(value.payloadGPR(), JITCompiler::payloadForGlobalVar(scratchReg, node.varNumber()));
+ case NotifyWrite: {
+ compileNotifyWrite(node);
+ break;
+ }
- noResult(m_compileIndex);
+ case VarInjectionWatchpoint: {
+ noResult(node);
break;
}
case CheckHasInstance: {
- SpeculateCellOperand base(this, node.child1());
+ SpeculateCellOperand base(this, node->child1());
GPRTemporary structure(this);
// Speculate that base 'ImplementsDefaultHasInstance'.
- m_jit.loadPtr(MacroAssembler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr());
- speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(structure.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance)));
+ speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest8(
+ MacroAssembler::Zero,
+ MacroAssembler::Address(base.gpr(), JSCell::typeInfoFlagsOffset()),
+ MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance)));
- noResult(m_compileIndex);
+ noResult(node);
break;
}
}
case IsUndefined: {
- JSValueOperand value(this, node.child1());
+ JSValueOperand value(this, node->child1());
GPRTemporary result(this);
-
- JITCompiler::Jump isCell = m_jit.branch32(JITCompiler::Equal, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag));
+ GPRTemporary localGlobalObject(this);
+ GPRTemporary remoteGlobalObject(this);
+
+ JITCompiler::Jump isCell = m_jit.branchIfCell(value.jsValueRegs());
m_jit.compare32(JITCompiler::Equal, value.tagGPR(), TrustedImm32(JSValue::UndefinedTag), result.gpr());
JITCompiler::Jump done = m_jit.jump();
isCell.link(&m_jit);
- m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureOffset()), result.gpr());
- m_jit.test8(JITCompiler::NonZero, JITCompiler::Address(result.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), result.gpr());
-
+ JITCompiler::Jump notMasqueradesAsUndefined;
+ if (masqueradesAsUndefinedWatchpointIsStillValid()) {
+ m_jit.move(TrustedImm32(0), result.gpr());
+ notMasqueradesAsUndefined = m_jit.jump();
+ } else {
+ JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(
+ JITCompiler::NonZero,
+ JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()),
+ TrustedImm32(MasqueradesAsUndefined));
+ m_jit.move(TrustedImm32(0), result.gpr());
+ notMasqueradesAsUndefined = m_jit.jump();
+
+ isMasqueradesAsUndefined.link(&m_jit);
+ GPRReg localGlobalObjectGPR = localGlobalObject.gpr();
+ GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr();
+ m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->origin.semantic)), localGlobalObjectGPR);
+ m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureIDOffset()), result.gpr());
+ m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR);
+ m_jit.compare32(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, result.gpr());
+ }
+
+ notMasqueradesAsUndefined.link(&m_jit);
done.link(&m_jit);
- booleanResult(result.gpr(), m_compileIndex);
+ booleanResult(result.gpr(), node);
break;
}
case IsBoolean: {
- JSValueOperand value(this, node.child1());
- GPRTemporary result(this, value);
+ JSValueOperand value(this, node->child1());
+ GPRTemporary result(this, Reuse, value, TagWord);
m_jit.compare32(JITCompiler::Equal, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::BooleanTag), result.gpr());
- booleanResult(result.gpr(), m_compileIndex);
+ booleanResult(result.gpr(), node);
break;
}
case IsNumber: {
- JSValueOperand value(this, node.child1());
- GPRTemporary result(this, value);
+ JSValueOperand value(this, node->child1());
+ GPRTemporary result(this, Reuse, value, TagWord);
m_jit.add32(TrustedImm32(1), value.tagGPR(), result.gpr());
m_jit.compare32(JITCompiler::Below, result.gpr(), JITCompiler::TrustedImm32(JSValue::LowestTag + 1), result.gpr());
- booleanResult(result.gpr(), m_compileIndex);
+ booleanResult(result.gpr(), node);
break;
}
case IsString: {
- JSValueOperand value(this, node.child1());
- GPRTemporary result(this, value);
+ JSValueOperand value(this, node->child1());
+ GPRTemporary result(this, Reuse, value, TagWord);
- JITCompiler::Jump isNotCell = m_jit.branch32(JITCompiler::NotEqual, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag));
+ JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
- m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureOffset()), result.gpr());
- m_jit.compare8(JITCompiler::Equal, JITCompiler::Address(result.gpr(), Structure::typeInfoTypeOffset()), TrustedImm32(StringType), result.gpr());
+ m_jit.compare8(JITCompiler::Equal,
+ JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoTypeOffset()),
+ TrustedImm32(StringType),
+ result.gpr());
JITCompiler::Jump done = m_jit.jump();
isNotCell.link(&m_jit);
m_jit.move(TrustedImm32(0), result.gpr());
done.link(&m_jit);
- booleanResult(result.gpr(), m_compileIndex);
+ booleanResult(result.gpr(), node);
break;
}
case IsObject: {
- JSValueOperand value(this, node.child1());
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
- GPRResult result(this);
- GPRReg resultGPR = result.gpr();
- flushRegisters();
- callOperation(operationIsObject, resultGPR, valueTagGPR, valuePayloadGPR);
- booleanResult(result.gpr(), m_compileIndex);
+ JSValueOperand value(this, node->child1());
+ GPRTemporary result(this, Reuse, value, TagWord);
+
+ JITCompiler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
+
+ m_jit.compare8(JITCompiler::AboveOrEqual,
+ JITCompiler::Address(value.payloadGPR(), JSCell::typeInfoTypeOffset()),
+ TrustedImm32(ObjectType),
+ result.gpr());
+ JITCompiler::Jump done = m_jit.jump();
+
+ isNotCell.link(&m_jit);
+ m_jit.move(TrustedImm32(0), result.gpr());
+
+ done.link(&m_jit);
+ booleanResult(result.gpr(), node);
+ break;
+ }
+
+ case IsObjectOrNull: {
+ compileIsObjectOrNull(node);
break;
}
case IsFunction: {
- JSValueOperand value(this, node.child1());
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
- GPRResult result(this);
- GPRReg resultGPR = result.gpr();
- flushRegisters();
- callOperation(operationIsFunction, resultGPR, valueTagGPR, valuePayloadGPR);
- booleanResult(result.gpr(), m_compileIndex);
+ compileIsFunction(node);
+ break;
+ }
+ case TypeOf: {
+ compileTypeOf(node);
break;
}
- case Phi:
case Flush:
break;
- case Breakpoint:
-#if ENABLE(DEBUG_WITH_BREAKPOINT)
- m_jit.breakpoint();
-#else
- ASSERT_NOT_REACHED();
-#endif
- break;
-
case Call:
case Construct:
+ case CallVarargs:
+ case CallForwardVarargs:
+ case ConstructVarargs:
+ case ConstructForwardVarargs:
emitCall(node);
break;
- case Resolve: {
- flushRegisters();
- GPRResult resultPayload(this);
- GPRResult2 resultTag(this);
- callOperation(operationResolve, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
- jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ case LoadVarargs: {
+ LoadVarargsData* data = node->loadVarargsData();
+
+ GPRReg argumentsTagGPR;
+ GPRReg argumentsPayloadGPR;
+ {
+ JSValueOperand arguments(this, node->child1());
+ argumentsTagGPR = arguments.tagGPR();
+ argumentsPayloadGPR = arguments.payloadGPR();
+ flushRegisters();
+ }
+
+ callOperation(operationSizeOfVarargs, GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR, data->offset);
+
+ lock(GPRInfo::returnValueGPR);
+ {
+ JSValueOperand arguments(this, node->child1());
+ argumentsTagGPR = arguments.tagGPR();
+ argumentsPayloadGPR = arguments.payloadGPR();
+ flushRegisters();
+ }
+ unlock(GPRInfo::returnValueGPR);
+
+ // FIXME: There is a chance that we will call an effectful length property twice. This is safe
+ // from the standpoint of the VM's integrity, but it's subtly wrong from a spec compliance
+ // standpoint. The best solution would be one where we can exit *into* the op_call_varargs right
+ // past the sizing.
+ // https://bugs.webkit.org/show_bug.cgi?id=141448
+
+ GPRReg argCountIncludingThisGPR =
+ JITCompiler::selectScratchGPR(GPRInfo::returnValueGPR, argumentsTagGPR, argumentsPayloadGPR);
+
+ m_jit.add32(TrustedImm32(1), GPRInfo::returnValueGPR, argCountIncludingThisGPR);
+ speculationCheck(
+ VarargsOverflow, JSValueSource(), Edge(), m_jit.branch32(
+ MacroAssembler::Above,
+ argCountIncludingThisGPR,
+ TrustedImm32(data->limit)));
+
+ m_jit.store32(argCountIncludingThisGPR, JITCompiler::payloadFor(data->machineCount));
+
+ callOperation(operationLoadVarargs, data->machineStart.offset(), argumentsTagGPR, argumentsPayloadGPR, data->offset, GPRInfo::returnValueGPR, data->mandatoryMinimum);
+
+ noResult(node);
break;
}
+
+ case ForwardVarargs: {
+ compileForwardVarargs(node);
+ break;
+ }
+
+ case CreateActivation: {
+ compileCreateActivation(node);
+ break;
+ }
+
+ case CreateDirectArguments: {
+ compileCreateDirectArguments(node);
+ break;
+ }
+
+ case GetFromArguments: {
+ compileGetFromArguments(node);
+ break;
+ }
+
+ case PutToArguments: {
+ compilePutToArguments(node);
+ break;
+ }
+
+ case CreateScopedArguments: {
+ compileCreateScopedArguments(node);
+ break;
+ }
+
+ case CreateClonedArguments: {
+ compileCreateClonedArguments(node);
+ break;
+ }
+
+ case NewFunction:
+ compileNewFunction(node);
+ break;
+
+ case In:
+ compileIn(node);
+ break;
- case ResolveBase: {
- flushRegisters();
- GPRResult resultPayload(this);
- GPRResult2 resultTag(this);
- callOperation(operationResolveBase, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
- jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ case StoreBarrier: {
+ compileStoreBarrier(node);
break;
}
- case ResolveBaseStrictPut: {
- flushRegisters();
- GPRResult resultPayload(this);
- GPRResult2 resultTag(this);
- callOperation(operationResolveBaseStrictPut, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber()));
- jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
+ case GetEnumerableLength: {
+ SpeculateCellOperand enumerator(this, node->child1());
+ GPRFlushedCallResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ m_jit.load32(MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::indexedLengthOffset()), resultGPR);
+ int32Result(resultGPR, node);
break;
}
+ case HasGenericProperty: {
+ JSValueOperand base(this, node->child1());
+ SpeculateCellOperand property(this, node->child2());
+ GPRFlushedCallResult resultPayload(this);
+ GPRFlushedCallResult2 resultTag(this);
+ GPRReg basePayloadGPR = base.payloadGPR();
+ GPRReg baseTagGPR = base.tagGPR();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
- case ResolveGlobal: {
- GPRTemporary globalObject(this);
- GPRTemporary resolveInfo(this);
- GPRTemporary resultTag(this);
+ flushRegisters();
+ callOperation(operationHasGenericProperty, resultTagGPR, resultPayloadGPR, baseTagGPR, basePayloadGPR, property.gpr());
+ booleanResult(resultPayloadGPR, node);
+ break;
+ }
+ case HasStructureProperty: {
+ JSValueOperand base(this, node->child1());
+ SpeculateCellOperand property(this, node->child2());
+ SpeculateCellOperand enumerator(this, node->child3());
GPRTemporary resultPayload(this);
+ GPRTemporary resultTag(this);
- GPRReg globalObjectGPR = globalObject.gpr();
- GPRReg resolveInfoGPR = resolveInfo.gpr();
- GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg baseTagGPR = base.tagGPR();
+ GPRReg basePayloadGPR = base.payloadGPR();
+ GPRReg propertyGPR = property.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
- ResolveGlobalData& data = m_jit.graph().m_resolveGlobalData[node.resolveGlobalDataIndex()];
- GlobalResolveInfo* resolveInfoAddress = &(m_jit.codeBlock()->globalResolveInfo(data.resolveInfoIndex));
+ m_jit.load32(MacroAssembler::Address(basePayloadGPR, JSCell::structureIDOffset()), resultTagGPR);
+ MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual,
+ resultTagGPR,
+ MacroAssembler::Address(enumerator.gpr(), JSPropertyNameEnumerator::cachedStructureIDOffset()));
- // Check Structure of global object
- m_jit.move(JITCompiler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectGPR);
- m_jit.move(JITCompiler::TrustedImmPtr(resolveInfoAddress), resolveInfoGPR);
- m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), resultPayloadGPR);
+ moveTrueTo(resultPayloadGPR);
+ MacroAssembler::Jump done = m_jit.jump();
- JITCompiler::Jump structuresNotMatch = m_jit.branchPtr(JITCompiler::NotEqual, resultPayloadGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset()));
+ done.link(&m_jit);
+
+ addSlowPathGenerator(slowPathCall(wrongStructure, this, operationHasGenericProperty, resultTagGPR, resultPayloadGPR, baseTagGPR, basePayloadGPR, propertyGPR));
+ booleanResult(resultPayloadGPR, node);
+ break;
+ }
+ case HasIndexedProperty: {
+ SpeculateCellOperand base(this, node->child1());
+ SpeculateInt32Operand index(this, node->child2());
+ GPRTemporary resultPayload(this);
+ GPRTemporary resultTag(this);
- // Fast case
- m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR);
- m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR);
- m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR);
+ GPRReg baseGPR = base.gpr();
+ GPRReg indexGPR = index.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
- JITCompiler::Jump wasFast = m_jit.jump();
+ MacroAssembler::JumpList slowCases;
+ ArrayMode mode = node->arrayMode();
+ switch (mode.type()) {
+ case Array::Int32:
+ case Array::Contiguous: {
+ ASSERT(!!node->child3());
+ StorageOperand storage(this, node->child3());
+ GPRTemporary scratch(this);
+
+ GPRReg storageGPR = storage.gpr();
+ GPRReg scratchGPR = scratch.gpr();
- structuresNotMatch.link(&m_jit);
- silentSpillAllRegisters(resultTagGPR, resultPayloadGPR);
- callOperation(operationResolveGlobal, resultTagGPR, resultPayloadGPR, resolveInfoGPR, &m_jit.codeBlock()->identifier(data.identifierNumber));
- silentFillAllRegisters(resultTagGPR, resultPayloadGPR);
+ slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())));
+ m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR);
+ slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag)));
+ break;
+ }
+ case Array::Double: {
+ ASSERT(!!node->child3());
+ StorageOperand storage(this, node->child3());
+ FPRTemporary scratch(this);
+ FPRReg scratchFPR = scratch.fpr();
+ GPRReg storageGPR = storage.gpr();
- wasFast.link(&m_jit);
+ slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())));
+ m_jit.loadDouble(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight), scratchFPR);
+ slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, scratchFPR, scratchFPR));
+ break;
+ }
+ case Array::ArrayStorage: {
+ ASSERT(!!node->child3());
+ StorageOperand storage(this, node->child3());
+ GPRTemporary scratch(this);
- jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex);
+ GPRReg storageGPR = storage.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, indexGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())));
+ m_jit.load32(MacroAssembler::BaseIndex(storageGPR, indexGPR, MacroAssembler::TimesEight, ArrayStorage::vectorOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), scratchGPR);
+ slowCases.append(m_jit.branch32(MacroAssembler::Equal, scratchGPR, TrustedImm32(JSValue::EmptyValueTag)));
+ break;
+ }
+ default: {
+ slowCases.append(m_jit.jump());
+ break;
+ }
+ }
+
+ moveTrueTo(resultPayloadGPR);
+ MacroAssembler::Jump done = m_jit.jump();
+
+ addSlowPathGenerator(slowPathCall(slowCases, this, operationHasIndexedProperty, resultTagGPR, resultPayloadGPR, baseGPR, indexGPR));
+
+ done.link(&m_jit);
+ booleanResult(resultPayloadGPR, node);
break;
}
+ case GetDirectPname: {
+ Edge& baseEdge = m_jit.graph().varArgChild(node, 0);
+ Edge& propertyEdge = m_jit.graph().varArgChild(node, 1);
- case CreateActivation: {
- JSValueOperand value(this, node.child1());
- GPRTemporary result(this, value, false);
-
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
- GPRReg resultGPR = result.gpr();
-
- m_jit.move(valuePayloadGPR, resultGPR);
-
- JITCompiler::Jump alreadyCreated = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
-
- silentSpillAllRegisters(resultGPR);
- callOperation(operationCreateActivation, resultGPR);
- silentFillAllRegisters(resultGPR);
+ SpeculateCellOperand base(this, baseEdge);
+ SpeculateCellOperand property(this, propertyEdge);
+ GPRReg baseGPR = base.gpr();
+ GPRReg propertyGPR = property.gpr();
+
+#if CPU(X86)
+ GPRFlushedCallResult resultPayload(this);
+ GPRFlushedCallResult2 resultTag(this);
+ GPRTemporary scratch(this);
+
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ // Not enough registers on X86 for this code, so always use the slow path.
+ flushRegisters();
+ m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), scratchGPR);
+ callOperation(operationGetByValCell, resultTagGPR, resultPayloadGPR, baseGPR, scratchGPR, propertyGPR);
+#else
+ GPRTemporary resultPayload(this);
+ GPRTemporary resultTag(this);
+ GPRTemporary scratch(this);
+
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+
+ Edge& indexEdge = m_jit.graph().varArgChild(node, 2);
+ Edge& enumeratorEdge = m_jit.graph().varArgChild(node, 3);
+
+ SpeculateInt32Operand index(this, indexEdge);
+ SpeculateCellOperand enumerator(this, enumeratorEdge);
+
+ GPRReg indexGPR = index.gpr();
+ GPRReg enumeratorGPR = enumerator.gpr();
+
+ // Check the structure
+ m_jit.load32(MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), scratchGPR);
+ MacroAssembler::Jump wrongStructure = m_jit.branch32(MacroAssembler::NotEqual,
+ scratchGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedStructureIDOffset()));
- alreadyCreated.link(&m_jit);
+ // Compute the offset
+ // If index is less than the enumerator's cached inline storage, then it's an inline access
+ MacroAssembler::Jump outOfLineAccess = m_jit.branch32(MacroAssembler::AboveOrEqual,
+ indexGPR, MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
+
+ m_jit.move(indexGPR, scratchGPR);
+ m_jit.signExtend32ToPtr(scratchGPR, scratchGPR);
+ m_jit.load32(MacroAssembler::BaseIndex(baseGPR, scratchGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagGPR);
+ m_jit.load32(MacroAssembler::BaseIndex(baseGPR, scratchGPR, MacroAssembler::TimesEight, JSObject::offsetOfInlineStorage() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadGPR);
+
+ MacroAssembler::Jump done = m_jit.jump();
- cellResult(resultGPR, m_compileIndex);
+ // Otherwise it's out of line
+ outOfLineAccess.link(&m_jit);
+ m_jit.move(indexGPR, scratchGPR);
+ m_jit.sub32(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), scratchGPR);
+ m_jit.neg32(scratchGPR);
+ m_jit.signExtend32ToPtr(scratchGPR, scratchGPR);
+ // We use resultPayloadGPR as a temporary here. We have to make sure clobber it after getting the
+ // value out of indexGPR and enumeratorGPR because resultPayloadGPR could reuse either of those registers.
+ m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultPayloadGPR);
+ int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
+ m_jit.load32(MacroAssembler::BaseIndex(resultPayloadGPR, scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagGPR);
+ m_jit.load32(MacroAssembler::BaseIndex(resultPayloadGPR, scratchGPR, MacroAssembler::TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadGPR);
+
+ done.link(&m_jit);
+
+ addSlowPathGenerator(slowPathCall(wrongStructure, this, operationGetByValCell, resultTagGPR, resultPayloadGPR, baseGPR, propertyGPR));
+#endif
+
+ jsValueResult(resultTagGPR, resultPayloadGPR, node);
break;
}
-
- case TearOffActivation: {
- JSValueOperand value(this, node.child1());
-
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
-
- JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
-
- silentSpillAllRegisters(InvalidGPRReg);
- callOperation(operationTearOffActivation, valuePayloadGPR);
- silentFillAllRegisters(InvalidGPRReg);
-
- notCreated.link(&m_jit);
-
- noResult(m_compileIndex);
+ case GetPropertyEnumerator: {
+ SpeculateCellOperand base(this, node->child1());
+ GPRFlushedCallResult result(this);
+ GPRReg resultGPR = result.gpr();
+
+ flushRegisters();
+ callOperation(operationGetPropertyEnumerator, resultGPR, base.gpr());
+ cellResult(resultGPR, node);
break;
}
-
- case NewFunctionNoCheck:
- compileNewFunctionNoCheck(node);
+ case GetEnumeratorStructurePname:
+ case GetEnumeratorGenericPname: {
+ SpeculateCellOperand enumerator(this, node->child1());
+ SpeculateInt32Operand index(this, node->child2());
+ GPRTemporary scratch(this);
+ GPRTemporary resultPayload(this);
+ GPRTemporary resultTag(this);
+
+ GPRReg enumeratorGPR = enumerator.gpr();
+ GPRReg indexGPR = index.gpr();
+ GPRReg scratchGPR = scratch.gpr();
+ GPRReg resultTagGPR = resultTag.gpr();
+ GPRReg resultPayloadGPR = resultPayload.gpr();
+
+ MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, indexGPR,
+ MacroAssembler::Address(enumeratorGPR, (op == GetEnumeratorStructurePname)
+ ? JSPropertyNameEnumerator::endStructurePropertyIndexOffset()
+ : JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
+
+ m_jit.move(MacroAssembler::TrustedImm32(JSValue::NullTag), resultTagGPR);
+ m_jit.move(MacroAssembler::TrustedImm32(0), resultPayloadGPR);
+
+ MacroAssembler::Jump done = m_jit.jump();
+ inBounds.link(&m_jit);
+
+ m_jit.loadPtr(MacroAssembler::Address(enumeratorGPR, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), scratchGPR);
+ m_jit.loadPtr(MacroAssembler::BaseIndex(scratchGPR, indexGPR, MacroAssembler::ScalePtr), resultPayloadGPR);
+ m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), resultTagGPR);
+
+ done.link(&m_jit);
+ jsValueResult(resultTagGPR, resultPayloadGPR, node);
break;
-
- case NewFunction: {
- JSValueOperand value(this, node.child1());
- GPRTemporary result(this, value, false);
-
- GPRReg valueTagGPR = value.tagGPR();
- GPRReg valuePayloadGPR = value.payloadGPR();
+ }
+ case ToIndexString: {
+ SpeculateInt32Operand index(this, node->child1());
+ GPRFlushedCallResult result(this);
GPRReg resultGPR = result.gpr();
-
- m_jit.move(valuePayloadGPR, resultGPR);
-
- JITCompiler::Jump alreadyCreated = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag));
-
- silentSpillAllRegisters(resultGPR);
- callOperation(
- operationNewFunction, resultGPR, m_jit.codeBlock()->functionDecl(node.functionDeclIndex()));
- silentFillAllRegisters(resultGPR);
-
- alreadyCreated.link(&m_jit);
-
- cellResult(resultGPR, m_compileIndex);
+
+ flushRegisters();
+ callOperation(operationToIndexString, resultGPR, index.gpr());
+ cellResult(resultGPR, node);
break;
}
-
- case NewFunctionExpression:
- compileNewFunctionExpression(node);
+ case ProfileType: {
+ JSValueOperand value(this, node->child1());
+ GPRTemporary scratch1(this);
+ GPRTemporary scratch2(this);
+ GPRTemporary scratch3(this);
+
+ GPRReg scratch1GPR = scratch1.gpr();
+ GPRReg scratch2GPR = scratch2.gpr();
+ GPRReg scratch3GPR = scratch3.gpr();
+
+ // Load the TypeProfilerLog into Scratch2.
+ TypeProfilerLog* cachedTypeProfilerLog = m_jit.vm()->typeProfilerLog();
+ m_jit.move(TrustedImmPtr(cachedTypeProfilerLog), scratch2GPR);
+
+ // Load the next LogEntry into Scratch1.
+ m_jit.loadPtr(MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()), scratch1GPR);
+
+ // Store the JSValue onto the log entry.
+ m_jit.store32(value.tagGPR(), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ m_jit.store32(value.payloadGPR(), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+
+ // Store the structureID of the cell if valueGPR is a cell, otherwise, store 0 on the log entry.
+ MacroAssembler::Jump isNotCell = m_jit.branchIfNotCell(value.jsValueRegs());
+ m_jit.load32(MacroAssembler::Address(value.payloadGPR(), JSCell::structureIDOffset()), scratch3GPR);
+ m_jit.store32(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset()));
+ MacroAssembler::Jump skipIsCell = m_jit.jump();
+ isNotCell.link(&m_jit);
+ m_jit.store32(TrustedImm32(0), MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::structureIDOffset()));
+ skipIsCell.link(&m_jit);
+
+ // Store the typeLocation on the log entry.
+ TypeLocation* cachedTypeLocation = node->typeLocation();
+ m_jit.move(TrustedImmPtr(cachedTypeLocation), scratch3GPR);
+ m_jit.storePtr(scratch3GPR, MacroAssembler::Address(scratch1GPR, TypeProfilerLog::LogEntry::locationOffset()));
+
+ // Increment the current log entry.
+ m_jit.addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), scratch1GPR);
+ m_jit.storePtr(scratch1GPR, MacroAssembler::Address(scratch2GPR, TypeProfilerLog::currentLogEntryOffset()));
+ MacroAssembler::Jump clearLog = m_jit.branchPtr(MacroAssembler::Equal, scratch1GPR, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
+ addSlowPathGenerator(
+ slowPathCall(clearLog, this, operationProcessTypeProfilerLogDFG, NoResult));
+
+ noResult(node);
+ break;
+ }
+ case ProfileControlFlow: {
+ BasicBlockLocation* basicBlockLocation = node->basicBlockLocation();
+ if (!basicBlockLocation->hasExecuted()) {
+ GPRTemporary scratch1(this);
+ basicBlockLocation->emitExecuteCode(m_jit, scratch1.gpr());
+ }
+ noResult(node);
break;
+ }
case ForceOSRExit: {
- terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
+ terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0);
break;
}
+ case InvalidationPoint:
+ emitInvalidationPoint(node);
+ break;
+
+ case CheckWatchdogTimer:
+ ASSERT(m_jit.vm()->watchdog);
+ speculationCheck(
+ WatchdogTimerFired, JSValueRegs(), 0,
+ m_jit.branchTest8(
+ JITCompiler::NonZero,
+ JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog->timerDidFireAddress())));
+ break;
+
+ case CountExecution:
+ m_jit.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node->executionCounter()->address()));
+ break;
+
case Phantom:
+ case Check:
+ DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate);
+ noResult(node);
+ break;
+
+ case Breakpoint:
+ case ProfileWillCall:
+ case ProfileDidCall:
+ case PhantomLocal:
+ case LoopHint:
// This is a no-op.
- noResult(m_compileIndex);
+ noResult(node);
+ break;
+
+
+ case Unreachable:
+ RELEASE_ASSERT_NOT_REACHED();
break;
- case InlineStart:
- case Nop:
case LastNodeType:
- ASSERT_NOT_REACHED();
+ case Phi:
+ case Upsilon:
+ case ExtractOSREntryLocal:
+ case CheckTierUpInLoop:
+ case CheckTierUpAtReturn:
+ case CheckTierUpAndOSREnter:
+ case CheckTierUpWithNestedTriggerAndOSREnter:
+ case Int52Rep:
+ case FiatInt52:
+ case Int52Constant:
+ case CheckInBounds:
+ case ArithIMul:
+ case MultiGetByOffset:
+ case MultiPutByOffset:
+ case NativeCall:
+ case NativeConstruct:
+ case CheckBadCell:
+ case BottomValue:
+ case PhantomNewObject:
+ case PhantomNewFunction:
+ case PhantomCreateActivation:
+ case PutHint:
+ case CheckStructureImmediate:
+ case MaterializeNewObject:
+ case MaterializeCreateActivation:
+ case PutStack:
+ case KillStack:
+ case GetStack:
+ case GetMyArgumentByVal:
+ DFG_CRASH(m_jit.graph(), node, "unexpected node in DFG backend");
break;
}
-
+
if (!m_compileOkay)
return;
- if (node.hasResult() && node.mustGenerate())
- use(m_compileIndex);
+ if (node->hasResult() && node->mustGenerate())
+ use(node);
+}
+
+#if ENABLE(GGC)
+void SpeculativeJIT::writeBarrier(GPRReg ownerGPR, GPRReg valueTagGPR, Edge valueUse, GPRReg scratch1, GPRReg scratch2)
+{
+ JITCompiler::Jump isNotCell;
+ if (!isKnownCell(valueUse.node()))
+ isNotCell = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::CellTag));
+
+ JITCompiler::Jump ownerIsRememberedOrInEden = m_jit.jumpIfIsRememberedOrInEden(ownerGPR);
+ storeToWriteBarrierBuffer(ownerGPR, scratch1, scratch2);
+ ownerIsRememberedOrInEden.link(&m_jit);
+
+ if (!isKnownCell(valueUse.node()))
+ isNotCell.link(&m_jit);
+}
+#endif // ENABLE(GGC)
+
+void SpeculativeJIT::moveTrueTo(GPRReg gpr)
+{
+ m_jit.move(TrustedImm32(1), gpr);
+}
+
+void SpeculativeJIT::moveFalseTo(GPRReg gpr)
+{
+ m_jit.move(TrustedImm32(0), gpr);
+}
+
+void SpeculativeJIT::blessBoolean(GPRReg)
+{
}
#endif