X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/6fe7ccc865dc7d7541b93c5bcaf6368d2c98a174..4be4e30906bcb8ee30b4d189205cb70bad6707ce:/dfg/DFGSpeculativeJIT32_64.cpp diff --git a/dfg/DFGSpeculativeJIT32_64.cpp b/dfg/DFGSpeculativeJIT32_64.cpp index f78402a..d317495 100644 --- a/dfg/DFGSpeculativeJIT32_64.cpp +++ b/dfg/DFGSpeculativeJIT32_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. * Copyright (C) 2011 Intel Corporation. All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -29,28 +29,36 @@ #if ENABLE(DFG_JIT) +#include "ArrayPrototype.h" +#include "DFGCallArrayAllocatorSlowPathGenerator.h" +#include "DFGSlowPathGenerator.h" +#include "JSActivation.h" +#include "ObjectPrototype.h" +#include "Operations.h" + namespace JSC { namespace DFG { #if USE(JSVALUE32_64) -GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat) +GPRReg SpeculativeJIT::fillInteger(Edge edge, DataFormat& returnFormat) { - Node& node = at(nodeIndex); - VirtualRegister virtualRegister = node.virtualRegister(); + ASSERT(!needsTypeCheck(edge, SpecInt32)); + + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = m_generationInfo[virtualRegister]; if (info.registerFormat() == DataFormatNone) { GPRReg gpr = allocate(); - if (node.hasConstant()) { + if (edge->hasConstant()) { m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - if (isInt32Constant(nodeIndex)) - m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); - else if (isNumberConstant(nodeIndex)) - ASSERT_NOT_REACHED(); + if (isInt32Constant(edge.node())) + m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr); + else if (isNumberConstant(edge.node())) + RELEASE_ASSERT_NOT_REACHED(); else { - ASSERT(isJSConstant(nodeIndex)); - JSValue jsValue = valueOfJSConstant(nodeIndex); + ASSERT(isJSConstant(edge.node())); + JSValue jsValue = valueOfJSConstant(edge.node()); m_jit.move(MacroAssembler::Imm32(jsValue.payload()), gpr); } } else { @@ -59,7 +67,7 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); } - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); returnFormat = DataFormatInteger; return gpr; } @@ -76,7 +84,7 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat case DataFormatJSBoolean: case DataFormatStorage: // Should only be calling this function if we know this operand to be integer. - ASSERT_NOT_REACHED(); + RELEASE_ASSERT_NOT_REACHED(); case DataFormatJSInteger: { GPRReg tagGPR = info.tagGPR(); @@ -88,7 +96,7 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat m_gprs.release(tagGPR); m_gprs.release(payloadGPR); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger); - info.fillInteger(payloadGPR); + info.fillInteger(*m_stream, payloadGPR); returnFormat = DataFormatInteger; return payloadGPR; } @@ -100,156 +108,32 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat returnFormat = DataFormatInteger; return gpr; } - } - - ASSERT_NOT_REACHED(); - return InvalidGPRReg; -} - -FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) -{ - Node& node = at(nodeIndex); - VirtualRegister virtualRegister = node.virtualRegister(); - GenerationInfo& info = m_generationInfo[virtualRegister]; - - if (info.registerFormat() == DataFormatNone) { - - if (node.hasConstant()) { - if (isInt32Constant(nodeIndex)) { - // FIXME: should not be reachable? - GPRReg gpr = allocate(); - m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); - m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - info.fillInteger(gpr); - unlock(gpr); - } else if (isNumberConstant(nodeIndex)) { - FPRReg fpr = fprAllocate(); - m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), fpr); - m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); - return fpr; - } else { - // FIXME: should not be reachable? - ASSERT_NOT_REACHED(); - } - } else { - DataFormat spillFormat = info.spillFormat(); - ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger); - if (spillFormat == DataFormatJSDouble) { - FPRReg fpr = fprAllocate(); - m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); - m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); - info.fillDouble(fpr); - return fpr; - } - - FPRReg fpr = fprAllocate(); - JITCompiler::Jump hasUnboxedDouble; - - if (spillFormat != DataFormatJSInteger && spillFormat != DataFormatInteger) { - JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)); - m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); - hasUnboxedDouble = m_jit.jump(); - isInteger.link(&m_jit); - } - - m_jit.convertInt32ToDouble(JITCompiler::payloadFor(virtualRegister), fpr); - - if (hasUnboxedDouble.isSet()) - hasUnboxedDouble.link(&m_jit); - - m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); - info.fillDouble(fpr); - return fpr; - } - } - - switch (info.registerFormat()) { - case DataFormatNone: - // Should have filled, above. - case DataFormatCell: - case DataFormatJSCell: - case DataFormatBoolean: - case DataFormatJSBoolean: - case DataFormatStorage: - // Should only be calling this function if we know this operand to be numeric. - ASSERT_NOT_REACHED(); - - case DataFormatJSInteger: - case DataFormatJS: { - GPRReg tagGPR = info.tagGPR(); - GPRReg payloadGPR = info.payloadGPR(); - FPRReg fpr = fprAllocate(); - m_gprs.lock(tagGPR); - m_gprs.lock(payloadGPR); - - JITCompiler::Jump hasUnboxedDouble; - - if (info.registerFormat() != DataFormatJSInteger) { - FPRTemporary scratch(this); - JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag)); - m_jit.jitAssertIsJSDouble(tagGPR); - unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr()); - hasUnboxedDouble = m_jit.jump(); - isInteger.link(&m_jit); - } - - m_jit.convertInt32ToDouble(payloadGPR, fpr); - - if (hasUnboxedDouble.isSet()) - hasUnboxedDouble.link(&m_jit); - - m_gprs.release(tagGPR); - m_gprs.release(payloadGPR); - m_gprs.unlock(tagGPR); - m_gprs.unlock(payloadGPR); - m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); - info.killSpilled(); - return fpr; - } - - case DataFormatInteger: { - FPRReg fpr = fprAllocate(); - GPRReg gpr = info.gpr(); - m_gprs.lock(gpr); - m_jit.convertInt32ToDouble(gpr, fpr); - m_gprs.unlock(gpr); - return fpr; - } - case DataFormatJSDouble: - case DataFormatDouble: { - FPRReg fpr = info.fpr(); - m_fprs.lock(fpr); - return fpr; + default: + RELEASE_ASSERT_NOT_REACHED(); + return InvalidGPRReg; } - } - - ASSERT_NOT_REACHED(); - return InvalidFPRReg; } -bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr) +bool SpeculativeJIT::fillJSValue(Edge edge, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr) { // FIXME: For double we could fill with a FPR. UNUSED_PARAM(fpr); - Node& node = at(nodeIndex); - VirtualRegister virtualRegister = node.virtualRegister(); + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = m_generationInfo[virtualRegister]; switch (info.registerFormat()) { case DataFormatNone: { - if (node.hasConstant()) { + if (edge->hasConstant()) { tagGPR = allocate(); payloadGPR = allocate(); - m_jit.move(Imm32(valueOfJSConstant(nodeIndex).tag()), tagGPR); - m_jit.move(Imm32(valueOfJSConstant(nodeIndex).payload()), payloadGPR); + m_jit.move(Imm32(valueOfJSConstant(edge.node()).tag()), tagGPR); + m_jit.move(Imm32(valueOfJSConstant(edge.node()).payload()), payloadGPR); m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant); - info.fillJSValue(tagGPR, payloadGPR, isInt32Constant(nodeIndex) ? DataFormatJSInteger : DataFormatJS); + info.fillJSValue(*m_stream, tagGPR, payloadGPR, isInt32Constant(edge.node()) ? DataFormatJSInteger : DataFormatJS); } else { DataFormat spillFormat = info.spillFormat(); ASSERT(spillFormat != DataFormatNone && spillFormat != DataFormatStorage); @@ -275,7 +159,7 @@ bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& pa m_jit.load32(JITCompiler::payloadFor(virtualRegister), payloadGPR); m_gprs.retain(tagGPR, virtualRegister, SpillOrderSpilled); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderSpilled); - info.fillJSValue(tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat); + info.fillJSValue(*m_stream, tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat); } return true; @@ -310,14 +194,14 @@ bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& pa fillFormat = DataFormatJSBoolean; break; default: - ASSERT_NOT_REACHED(); + RELEASE_ASSERT_NOT_REACHED(); break; } m_jit.move(TrustedImm32(tag), tagGPR); m_gprs.release(gpr); m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS); - info.fillJSValue(tagGPR, payloadGPR, fillFormat); + info.fillJSValue(*m_stream, tagGPR, payloadGPR, fillFormat); return true; } @@ -332,7 +216,7 @@ bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& pa m_fprs.release(oldFPR); m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS); - info.fillJSValue(tagGPR, payloadGPR, DataFormatJS); + info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS); return true; } @@ -349,124 +233,17 @@ bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& pa case DataFormatStorage: // this type currently never occurs - ASSERT_NOT_REACHED(); - } - - ASSERT_NOT_REACHED(); - return true; -} - -void SpeculativeJIT::nonSpeculativeValueToNumber(Node& node) -{ - if (isKnownNumeric(node.child1().index())) { - JSValueOperand op1(this, node.child1()); - op1.fill(); - if (op1.isDouble()) { - FPRTemporary result(this, op1); - m_jit.moveDouble(op1.fpr(), result.fpr()); - doubleResult(result.fpr(), m_compileIndex); - } else { - GPRTemporary resultTag(this, op1); - GPRTemporary resultPayload(this, op1, false); - m_jit.move(op1.tagGPR(), resultTag.gpr()); - m_jit.move(op1.payloadGPR(), resultPayload.gpr()); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); - } - return; - } - - JSValueOperand op1(this, node.child1()); - GPRTemporary resultTag(this, op1); - GPRTemporary resultPayload(this, op1, false); - - ASSERT(!isInt32Constant(node.child1().index())); - ASSERT(!isNumberConstant(node.child1().index())); - - GPRReg tagGPR = op1.tagGPR(); - GPRReg payloadGPR = op1.payloadGPR(); - GPRReg resultTagGPR = resultTag.gpr(); - GPRReg resultPayloadGPR = resultPayload.gpr(); - op1.use(); - - JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag)); - JITCompiler::Jump nonNumeric = m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag)); - - // First, if we get here we have a double encoded as a JSValue - JITCompiler::Jump hasUnboxedDouble = m_jit.jump(); - - // Next handle cells (& other JS immediates) - nonNumeric.link(&m_jit); - silentSpillAllRegisters(resultTagGPR, resultPayloadGPR); - callOperation(dfgConvertJSValueToNumber, FPRInfo::returnValueFPR, tagGPR, payloadGPR); - boxDouble(FPRInfo::returnValueFPR, resultTagGPR, resultPayloadGPR); - silentFillAllRegisters(resultTagGPR, resultPayloadGPR); - JITCompiler::Jump hasCalledToNumber = m_jit.jump(); - - // Finally, handle integers. - isInteger.link(&m_jit); - hasUnboxedDouble.link(&m_jit); - m_jit.move(tagGPR, resultTagGPR); - m_jit.move(payloadGPR, resultPayloadGPR); - hasCalledToNumber.link(&m_jit); - jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); -} - -void SpeculativeJIT::nonSpeculativeValueToInt32(Node& node) -{ - ASSERT(!isInt32Constant(node.child1().index())); - - if (isKnownInteger(node.child1().index())) { - IntegerOperand op1(this, node.child1()); - GPRTemporary result(this, op1); - m_jit.move(op1.gpr(), result.gpr()); - integerResult(result.gpr(), m_compileIndex); - return; - } - - GenerationInfo& childInfo = m_generationInfo[at(node.child1()).virtualRegister()]; - if (childInfo.isJSDouble()) { - DoubleOperand op1(this, node.child1()); - GPRTemporary result(this); - FPRReg fpr = op1.fpr(); - GPRReg gpr = result.gpr(); - op1.use(); - JITCompiler::Jump truncatedToInteger = m_jit.branchTruncateDoubleToInt32(fpr, gpr, JITCompiler::BranchIfTruncateSuccessful); - - silentSpillAllRegisters(gpr); - callOperation(toInt32, gpr, fpr); - silentFillAllRegisters(gpr); + RELEASE_ASSERT_NOT_REACHED(); - truncatedToInteger.link(&m_jit); - integerResult(gpr, m_compileIndex, UseChildrenCalledExplicitly); - return; + default: + RELEASE_ASSERT_NOT_REACHED(); + return true; } - - JSValueOperand op1(this, node.child1()); - GPRTemporary result(this); - GPRReg tagGPR = op1.tagGPR(); - GPRReg payloadGPR = op1.payloadGPR(); - GPRReg resultGPR = result.gpr(); - op1.use(); - - JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag)); - - // First handle non-integers - silentSpillAllRegisters(resultGPR); - callOperation(dfgConvertJSValueToInt32, GPRInfo::returnValueGPR, tagGPR, payloadGPR); - m_jit.move(GPRInfo::returnValueGPR, resultGPR); - silentFillAllRegisters(resultGPR); - JITCompiler::Jump hasCalledToInt32 = m_jit.jump(); - - // Then handle integers. - isInteger.link(&m_jit); - m_jit.move(payloadGPR, resultGPR); - hasCalledToInt32.link(&m_jit); - integerResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); } -void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node) +void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node* node) { - IntegerOperand op1(this, node.child1()); + IntegerOperand op1(this, node->child1()); FPRTemporary boxer(this); GPRTemporary resultTag(this, op1); GPRTemporary resultPayload(this); @@ -488,69 +265,78 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node) done.link(&m_jit); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); + jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); } -JITCompiler::Call SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode) +void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode) { JITCompiler::DataLabelPtr structureToCompare; - JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast(-1))); + JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast(unusedPointer))); - m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR); + JITCompiler::ConvertibleLoadLabel propertyStorageLoad = m_jit.convertibleLoadPtr(JITCompiler::Address(basePayloadGPR, JSObject::butterflyOffset()), resultPayloadGPR); JITCompiler::DataLabelCompact tagLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); JITCompiler::DataLabelCompact payloadLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); - JITCompiler::Jump done = m_jit.jump(); - - structureCheck.m_jump.link(&m_jit); - - if (slowPathTarget.isSet()) - slowPathTarget.link(&m_jit); - - JITCompiler::Label slowCase = m_jit.label(); - - if (spillMode == NeedToSpill) - silentSpillAllRegisters(resultTagGPR, resultPayloadGPR); - JITCompiler::Call functionCall; - if (baseTagGPROrNone == InvalidGPRReg) - functionCall = callOperation(operationGetByIdOptimize, resultTagGPR, resultPayloadGPR, JSValue::CellTag, basePayloadGPR, identifier(identifierNumber)); - else - functionCall = callOperation(operationGetByIdOptimize, resultTagGPR, resultPayloadGPR, baseTagGPROrNone, basePayloadGPR, identifier(identifierNumber)); - if (spillMode == NeedToSpill) - silentFillAllRegisters(resultTagGPR, resultPayloadGPR); - - done.link(&m_jit); - JITCompiler::Label doneLabel = m_jit.label(); - m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, tagLoadWithPatch, payloadLoadWithPatch, slowCase, doneLabel, safeCast(basePayloadGPR), safeCast(resultTagGPR), safeCast(resultPayloadGPR), safeCast(scratchGPR), spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed)); - - return functionCall; + OwnPtr slowPath; + if (baseTagGPROrNone == InvalidGPRReg) { + if (!slowPathTarget.isSet()) { + slowPath = slowPathCall( + structureCheck.m_jump, this, operationGetByIdOptimize, + JSValueRegs(resultTagGPR, resultPayloadGPR), + static_cast(JSValue::CellTag), basePayloadGPR, + identifier(identifierNumber)); + } else { + JITCompiler::JumpList slowCases; + slowCases.append(structureCheck.m_jump); + slowCases.append(slowPathTarget); + slowPath = slowPathCall( + slowCases, this, operationGetByIdOptimize, + JSValueRegs(resultTagGPR, resultPayloadGPR), + static_cast(JSValue::CellTag), basePayloadGPR, + identifier(identifierNumber)); + } + } else { + if (!slowPathTarget.isSet()) { + slowPath = slowPathCall( + structureCheck.m_jump, this, operationGetByIdOptimize, + JSValueRegs(resultTagGPR, resultPayloadGPR), baseTagGPROrNone, basePayloadGPR, + identifier(identifierNumber)); + } else { + JITCompiler::JumpList slowCases; + slowCases.append(structureCheck.m_jump); + slowCases.append(slowPathTarget); + slowPath = slowPathCall( + slowCases, this, operationGetByIdOptimize, + JSValueRegs(resultTagGPR, resultPayloadGPR), baseTagGPROrNone, basePayloadGPR, + identifier(identifierNumber)); + } + } + m_jit.addPropertyAccess( + PropertyAccessRecord( + codeOrigin, structureToCompare, structureCheck, propertyStorageLoad, + tagLoadWithPatch, payloadLoadWithPatch, slowPath.get(), doneLabel, + safeCast(basePayloadGPR), safeCast(resultTagGPR), + safeCast(resultPayloadGPR), usedRegisters(), + spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed)); + addSlowPathGenerator(slowPath.release()); } void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget) { JITCompiler::DataLabelPtr structureToCompare; - JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast(-1))); + JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast(unusedPointer))); writeBarrier(basePayloadGPR, valueTagGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR); - m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), scratchGPR); + JITCompiler::ConvertibleLoadLabel propertyStorageLoad = m_jit.convertibleLoadPtr(JITCompiler::Address(basePayloadGPR, JSObject::butterflyOffset()), scratchGPR); JITCompiler::DataLabel32 tagStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valueTagGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); JITCompiler::DataLabel32 payloadStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valuePayloadGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); - JITCompiler::Jump done = m_jit.jump(); - - structureCheck.m_jump.link(&m_jit); - - if (slowPathTarget.isSet()) - slowPathTarget.link(&m_jit); - - JITCompiler::Label slowCase = m_jit.label(); - - silentSpillAllRegisters(InvalidGPRReg); + JITCompiler::Label doneLabel = m_jit.label(); V_DFGOperation_EJCI optimizedCall; - if (m_jit.strictModeFor(at(m_compileIndex).codeOrigin)) { + if (m_jit.strictModeFor(m_currentNode->codeOrigin)) { if (putKind == Direct) optimizedCall = operationPutByIdDirectStrictOptimize; else @@ -561,13 +347,33 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, else optimizedCall = operationPutByIdNonStrictOptimize; } - JITCompiler::Call functionCall = callOperation(optimizedCall, valueTagGPR, valuePayloadGPR, basePayloadGPR, identifier(identifierNumber)); - silentFillAllRegisters(InvalidGPRReg); - - done.link(&m_jit); - JITCompiler::Label doneLabel = m_jit.label(); - - m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, functionCall, structureCheck, JITCompiler::DataLabelCompact(tagStoreWithPatch.label()), JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()), slowCase, doneLabel, safeCast(basePayloadGPR), safeCast(valueTagGPR), safeCast(valuePayloadGPR), safeCast(scratchGPR))); + OwnPtr slowPath; + if (!slowPathTarget.isSet()) { + slowPath = slowPathCall( + structureCheck.m_jump, this, optimizedCall, NoResult, valueTagGPR, valuePayloadGPR, + basePayloadGPR, identifier(identifierNumber)); + } else { + JITCompiler::JumpList slowCases; + slowCases.append(structureCheck.m_jump); + slowCases.append(slowPathTarget); + slowPath = slowPathCall( + slowCases, this, optimizedCall, NoResult, valueTagGPR, valuePayloadGPR, + basePayloadGPR, identifier(identifierNumber)); + } + RegisterSet currentlyUsedRegisters = usedRegisters(); + currentlyUsedRegisters.clear(scratchGPR); + ASSERT(currentlyUsedRegisters.get(basePayloadGPR)); + ASSERT(currentlyUsedRegisters.get(valueTagGPR)); + ASSERT(currentlyUsedRegisters.get(valuePayloadGPR)); + m_jit.addPropertyAccess( + PropertyAccessRecord( + codeOrigin, structureToCompare, structureCheck, propertyStorageLoad, + JITCompiler::DataLabelCompact(tagStoreWithPatch.label()), + JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()), + slowPath.get(), doneLabel, safeCast(basePayloadGPR), + safeCast(valueTagGPR), safeCast(valuePayloadGPR), + usedRegisters())); + addSlowPathGenerator(slowPath.release()); } void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool invert) @@ -580,13 +386,36 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv GPRReg resultPayloadGPR = resultPayload.gpr(); JITCompiler::Jump notCell; - if (!isKnownCell(operand.index())) - notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); - - m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultPayloadGPR); - m_jit.test8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultPayloadGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), resultPayloadGPR); - - if (!isKnownCell(operand.index())) { + JITCompiler::Jump notMasqueradesAsUndefined; + if (m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) { + if (!isKnownCell(operand.node())) + notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); + + m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint()); + m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR); + notMasqueradesAsUndefined = m_jit.jump(); + } else { + GPRTemporary localGlobalObject(this); + GPRTemporary remoteGlobalObject(this); + + if (!isKnownCell(operand.node())) + notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); + + m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultPayloadGPR); + JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(resultPayloadGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined)); + + m_jit.move(invert ? TrustedImm32(1) : TrustedImm32(0), resultPayloadGPR); + notMasqueradesAsUndefined = m_jit.jump(); + + isMasqueradesAsUndefined.link(&m_jit); + GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); + GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); + m_jit.move(JITCompiler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)), localGlobalObjectGPR); + m_jit.loadPtr(JITCompiler::Address(resultPayloadGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); + m_jit.compare32(invert ? JITCompiler::NotEqual : JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, resultPayloadGPR); + } + + if (!isKnownCell(operand.node())) { JITCompiler::Jump done = m_jit.jump(); notCell.link(&m_jit); @@ -599,16 +428,17 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompareNull(Edge operand, bool inv done.link(&m_jit); } - booleanResult(resultPayloadGPR, m_compileIndex); + notMasqueradesAsUndefined.link(&m_jit); + + booleanResult(resultPayloadGPR, m_currentNode); } -void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex branchNodeIndex, bool invert) +void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, Node* branchNode, bool invert) { - Node& branchNode = at(branchNodeIndex); - BlockIndex taken = branchNode.takenBlockIndex(); - BlockIndex notTaken = branchNode.notTakenBlockIndex(); + BlockIndex taken = branchNode->takenBlockIndex(); + BlockIndex notTaken = branchNode->notTakenBlockIndex(); - if (taken == (m_block + 1)) { + if (taken == nextBlock()) { invert = !invert; BlockIndex tmp = taken; taken = notTaken; @@ -621,16 +451,33 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex br GPRTemporary result(this, arg); GPRReg resultGPR = result.gpr(); - + JITCompiler::Jump notCell; - - if (!isKnownCell(operand.index())) - notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); - - m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultGPR); - branchTest8(invert ? JITCompiler::Zero : JITCompiler::NonZero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), taken); - - if (!isKnownCell(operand.index())) { + + if (m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) { + if (!isKnownCell(operand.node())) + notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); + + m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint()); + jump(invert ? taken : notTaken, ForceJump); + } else { + GPRTemporary localGlobalObject(this); + GPRTemporary remoteGlobalObject(this); + + if (!isKnownCell(operand.node())) + notCell = m_jit.branch32(MacroAssembler::NotEqual, argTagGPR, TrustedImm32(JSValue::CellTag)); + + m_jit.loadPtr(JITCompiler::Address(argPayloadGPR, JSCell::structureOffset()), resultGPR); + branchTest8(JITCompiler::Zero, JITCompiler::Address(resultGPR, Structure::typeInfoFlagsOffset()), JITCompiler::TrustedImm32(MasqueradesAsUndefined), invert ? taken : notTaken); + + GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); + GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); + m_jit.move(TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)), localGlobalObjectGPR); + m_jit.loadPtr(JITCompiler::Address(resultGPR, Structure::globalObjectOffset()), remoteGlobalObjectGPR); + branchPtr(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, invert ? notTaken : taken); + } + + if (!isKnownCell(operand.node())) { jump(notTaken, ForceJump); notCell.link(&m_jit); @@ -644,20 +491,20 @@ void SpeculativeJIT::nonSpeculativePeepholeBranchNull(Edge operand, NodeIndex br jump(notTaken); } -bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, Edge operand, bool invert) +bool SpeculativeJIT::nonSpeculativeCompareNull(Node* node, Edge operand, bool invert) { unsigned branchIndexInBlock = detectPeepHoleBranch(); if (branchIndexInBlock != UINT_MAX) { - NodeIndex branchNodeIndex = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock); + Node* branchNode = m_jit.graph().m_blocks[m_block]->at(branchIndexInBlock); - ASSERT(node.adjustedRefCount() == 1); + ASSERT(node->adjustedRefCount() == 1); - nonSpeculativePeepholeBranchNull(operand, branchNodeIndex, invert); + nonSpeculativePeepholeBranchNull(operand, branchNode, invert); - use(node.child1()); - use(node.child2()); + use(node->child1()); + use(node->child2()); m_indexInBlock = branchIndexInBlock; - m_compileIndex = branchNodeIndex; + m_currentNode = branchNode; return true; } @@ -667,17 +514,16 @@ bool SpeculativeJIT::nonSpeculativeCompareNull(Node& node, Edge operand, bool in return false; } -void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNodeIndex, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction) +void SpeculativeJIT::nonSpeculativePeepholeBranch(Node* node, Node* branchNode, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction) { - Node& branchNode = at(branchNodeIndex); - BlockIndex taken = branchNode.takenBlockIndex(); - BlockIndex notTaken = branchNode.notTakenBlockIndex(); + BlockIndex taken = branchNode->takenBlockIndex(); + BlockIndex notTaken = branchNode->notTakenBlockIndex(); JITCompiler::ResultCondition callResultCondition = JITCompiler::NonZero; // The branch instruction will branch to the taken block. // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. - if (taken == (m_block + 1)) { + if (taken == nextBlock()) { cond = JITCompiler::invert(cond); callResultCondition = JITCompiler::Zero; BlockIndex tmp = taken; @@ -685,8 +531,8 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo notTaken = tmp; } - JSValueOperand arg1(this, node.child1()); - JSValueOperand arg2(this, node.child2()); + JSValueOperand arg1(this, node->child1()); + JSValueOperand arg2(this, node->child2()); GPRReg arg1TagGPR = arg1.tagGPR(); GPRReg arg1PayloadGPR = arg1.payloadGPR(); GPRReg arg2TagGPR = arg2.tagGPR(); @@ -694,7 +540,7 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo JITCompiler::JumpList slowPath; - if (isKnownNotInteger(node.child1().index()) || isKnownNotInteger(node.child2().index())) { + if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) { GPRResult result(this); GPRReg resultGPR = result.gpr(); @@ -712,14 +558,14 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo arg1.use(); arg2.use(); - if (!isKnownInteger(node.child1().index())) + if (!isKnownInteger(node->child1().node())) slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); - if (!isKnownInteger(node.child2().index())) + if (!isKnownInteger(node->child2().node())) slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); branch32(cond, arg1PayloadGPR, arg2PayloadGPR, taken); - if (!isKnownInteger(node.child1().index()) || !isKnownInteger(node.child2().index())) { + if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) { jump(notTaken, ForceJump); slowPath.link(&m_jit); @@ -735,13 +581,49 @@ void SpeculativeJIT::nonSpeculativePeepholeBranch(Node& node, NodeIndex branchNo jump(notTaken); m_indexInBlock = m_jit.graph().m_blocks[m_block]->size() - 1; - m_compileIndex = branchNodeIndex; + m_currentNode = branchNode; } -void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction) +template +class CompareAndBoxBooleanSlowPathGenerator + : public CallSlowPathGenerator { +public: + CompareAndBoxBooleanSlowPathGenerator( + JumpType from, SpeculativeJIT* jit, + S_DFGOperation_EJJ function, GPRReg result, GPRReg arg1Tag, GPRReg arg1Payload, + GPRReg arg2Tag, GPRReg arg2Payload) + : CallSlowPathGenerator( + from, jit, function, NeedToSpill, result) + , m_arg1Tag(arg1Tag) + , m_arg1Payload(arg1Payload) + , m_arg2Tag(arg2Tag) + , m_arg2Payload(arg2Payload) + { + } + +protected: + virtual void generateInternal(SpeculativeJIT* jit) + { + this->setUp(jit); + this->recordCall( + jit->callOperation( + this->m_function, this->m_result, m_arg1Tag, m_arg1Payload, m_arg2Tag, + m_arg2Payload)); + jit->m_jit.and32(JITCompiler::TrustedImm32(1), this->m_result); + this->tearDown(jit); + } + +private: + GPRReg m_arg1Tag; + GPRReg m_arg1Payload; + GPRReg m_arg2Tag; + GPRReg m_arg2Payload; +}; + +void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node* node, MacroAssembler::RelationalCondition cond, S_DFGOperation_EJJ helperFunction) { - JSValueOperand arg1(this, node.child1()); - JSValueOperand arg2(this, node.child2()); + JSValueOperand arg1(this, node->child1()); + JSValueOperand arg2(this, node->child2()); GPRReg arg1TagGPR = arg1.tagGPR(); GPRReg arg1PayloadGPR = arg1.payloadGPR(); GPRReg arg2TagGPR = arg2.tagGPR(); @@ -749,7 +631,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler JITCompiler::JumpList slowPath; - if (isKnownNotInteger(node.child1().index()) || isKnownNotInteger(node.child2().index())) { + if (isKnownNotInteger(node->child1().node()) || isKnownNotInteger(node->child2().node())) { GPRResult result(this); GPRReg resultPayloadGPR = result.gpr(); @@ -759,7 +641,7 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler flushRegisters(); callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); - booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); + booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); } else { GPRTemporary resultPayload(this, arg1, false); GPRReg resultPayloadGPR = resultPayload.gpr(); @@ -767,48 +649,40 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeCompare(Node& node, MacroAssembler arg1.use(); arg2.use(); - if (!isKnownInteger(node.child1().index())) + if (!isKnownInteger(node->child1().node())) slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg1TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); - if (!isKnownInteger(node.child2().index())) + if (!isKnownInteger(node->child2().node())) slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, arg2TagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag))); m_jit.compare32(cond, arg1PayloadGPR, arg2PayloadGPR, resultPayloadGPR); - if (!isKnownInteger(node.child1().index()) || !isKnownInteger(node.child2().index())) { - JITCompiler::Jump haveResult = m_jit.jump(); - - slowPath.link(&m_jit); - - silentSpillAllRegisters(resultPayloadGPR); - callOperation(helperFunction, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR); - silentFillAllRegisters(resultPayloadGPR); - - m_jit.andPtr(TrustedImm32(1), resultPayloadGPR); - - haveResult.link(&m_jit); + if (!isKnownInteger(node->child1().node()) || !isKnownInteger(node->child2().node())) { + addSlowPathGenerator(adoptPtr( + new CompareAndBoxBooleanSlowPathGenerator( + slowPath, this, helperFunction, resultPayloadGPR, arg1TagGPR, + arg1PayloadGPR, arg2TagGPR, arg2PayloadGPR))); } - booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); + booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); } } -void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branchNodeIndex, bool invert) +void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node* node, Node* branchNode, bool invert) { - Node& branchNode = at(branchNodeIndex); - BlockIndex taken = branchNode.takenBlockIndex(); - BlockIndex notTaken = branchNode.notTakenBlockIndex(); + BlockIndex taken = branchNode->takenBlockIndex(); + BlockIndex notTaken = branchNode->notTakenBlockIndex(); // The branch instruction will branch to the taken block. // If taken is next, switch taken with notTaken & invert the branch condition so we can fall through. - if (taken == (m_block + 1)) { + if (taken == nextBlock()) { invert = !invert; BlockIndex tmp = taken; taken = notTaken; notTaken = tmp; } - JSValueOperand arg1(this, node.child1()); - JSValueOperand arg2(this, node.child2()); + JSValueOperand arg1(this, node->child1()); + JSValueOperand arg2(this, node->child2()); GPRReg arg1TagGPR = arg1.tagGPR(); GPRReg arg1PayloadGPR = arg1.payloadGPR(); GPRReg arg2TagGPR = arg2.tagGPR(); @@ -820,7 +694,7 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch arg1.use(); arg2.use(); - if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) { + if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) { // see if we get lucky: if the arguments are cells and they reference the same // cell, then they must be strictly equal. branchPtr(JITCompiler::Equal, arg1PayloadGPR, arg2PayloadGPR, invert ? notTaken : taken); @@ -843,10 +717,10 @@ void SpeculativeJIT::nonSpeculativePeepholeStrictEq(Node& node, NodeIndex branch jump(notTaken); } -void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert) +void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node* node, bool invert) { - JSValueOperand arg1(this, node.child1()); - JSValueOperand arg2(this, node.child2()); + JSValueOperand arg1(this, node->child1()); + JSValueOperand arg2(this, node->child2()); GPRReg arg1TagGPR = arg1.tagGPR(); GPRReg arg1PayloadGPR = arg1.payloadGPR(); GPRReg arg2TagGPR = arg2.tagGPR(); @@ -858,9 +732,10 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert) arg1.use(); arg2.use(); - if (isKnownCell(node.child1().index()) && isKnownCell(node.child2().index())) { + if (isKnownCell(node->child1().node()) && isKnownCell(node->child2().node())) { // see if we get lucky: if the arguments are cells and they reference the same // cell, then they must be strictly equal. + // FIXME: this should flush registers instead of silent spill/fill. JITCompiler::Jump notEqualCase = m_jit.branchPtr(JITCompiler::NotEqual, arg1PayloadGPR, arg2PayloadGPR); m_jit.move(JITCompiler::TrustedImm32(!invert), resultPayloadGPR); @@ -885,27 +760,21 @@ void SpeculativeJIT::nonSpeculativeNonPeepholeStrictEq(Node& node, bool invert) m_jit.andPtr(JITCompiler::TrustedImm32(1), resultPayloadGPR); } - booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); + booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); } -void SpeculativeJIT::emitCall(Node& node) +void SpeculativeJIT::emitCall(Node* node) { - P_DFGOperation_E slowCallFunction; - - if (node.op() == Call) - slowCallFunction = operationLinkCall; - else { - ASSERT(node.op() == Construct); - slowCallFunction = operationLinkConstruct; - } + if (node->op() != Call) + ASSERT(node->op() == Construct); // For constructors, the this argument is not passed but we have to make space // for it. - int dummyThisArgument = node.op() == Call ? 0 : 1; + int dummyThisArgument = node->op() == Call ? 0 : 1; - CallLinkInfo::CallType callType = node.op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct; + CallLinkInfo::CallType callType = node->op() == Call ? CallLinkInfo::Call : CallLinkInfo::Construct; - Edge calleeEdge = m_jit.graph().m_varArgChildren[node.firstChild()]; + Edge calleeEdge = m_jit.graph().m_varArgChildren[node->firstChild()]; JSValueOperand callee(this, calleeEdge); GPRReg calleeTagGPR = callee.tagGPR(); GPRReg calleePayloadGPR = callee.payloadGPR(); @@ -913,15 +782,15 @@ void SpeculativeJIT::emitCall(Node& node) // The call instruction's first child is either the function (normal call) or the // receiver (method call). subsequent children are the arguments. - int numPassedArgs = node.numChildren() - 1; + int numPassedArgs = node->numChildren() - 1; - m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(RegisterFile::ArgumentCount)); - m_jit.storePtr(GPRInfo::callFrameRegister, callFramePayloadSlot(RegisterFile::CallerFrame)); - m_jit.store32(calleePayloadGPR, callFramePayloadSlot(RegisterFile::Callee)); - m_jit.store32(calleeTagGPR, callFrameTagSlot(RegisterFile::Callee)); + m_jit.store32(MacroAssembler::TrustedImm32(numPassedArgs + dummyThisArgument), callFramePayloadSlot(JSStack::ArgumentCount)); + m_jit.storePtr(GPRInfo::callFrameRegister, callFramePayloadSlot(JSStack::CallerFrame)); + m_jit.store32(calleePayloadGPR, callFramePayloadSlot(JSStack::Callee)); + m_jit.store32(calleeTagGPR, callFrameTagSlot(JSStack::Callee)); for (int i = 0; i < numPassedArgs; i++) { - Edge argEdge = m_jit.graph().m_varArgChildren[node.firstChild() + 1 + i]; + Edge argEdge = m_jit.graph().m_varArgChildren[node->firstChild() + 1 + i]; JSValueOperand arg(this, argEdge); GPRReg argTagGPR = arg.tagGPR(); GPRReg argPayloadGPR = arg.payloadGPR(); @@ -941,16 +810,18 @@ void SpeculativeJIT::emitCall(Node& node) JITCompiler::DataLabelPtr targetToCheck; JITCompiler::JumpList slowPath; - slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck)); - slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, calleeTagGPR, TrustedImm32(JSValue::CellTag))); - m_jit.loadPtr(MacroAssembler::Address(calleePayloadGPR, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), resultPayloadGPR); - m_jit.storePtr(resultPayloadGPR, callFramePayloadSlot(RegisterFile::ScopeChain)); - m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), callFrameTagSlot(RegisterFile::ScopeChain)); - + CallBeginToken token; + m_jit.beginCall(node->codeOrigin, token); + m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister); + + slowPath.append(m_jit.branch32(MacroAssembler::NotEqual, calleeTagGPR, TrustedImm32(JSValue::CellTag))); + slowPath.append(m_jit.branchPtrWithPatch(MacroAssembler::NotEqual, calleePayloadGPR, targetToCheck)); + m_jit.loadPtr(MacroAssembler::Address(calleePayloadGPR, OBJECT_OFFSETOF(JSFunction, m_scope)), resultPayloadGPR); + m_jit.storePtr(resultPayloadGPR, MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), MacroAssembler::Address(GPRInfo::callFrameRegister, static_cast(sizeof(Register)) * JSStack::ScopeChain + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); - CodeOrigin codeOrigin = at(m_compileIndex).codeOrigin; - CallBeginToken token = m_jit.beginCall(); + CodeOrigin codeOrigin = node->codeOrigin; JITCompiler::Call fastCall = m_jit.nearCall(); m_jit.notifyCall(fastCall, codeOrigin, token); @@ -958,65 +829,72 @@ void SpeculativeJIT::emitCall(Node& node) slowPath.link(&m_jit); - m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - m_jit.poke(GPRInfo::argumentGPR0); - token = m_jit.beginCall(); - JITCompiler::Call slowCall = m_jit.appendCall(slowCallFunction); - m_jit.addFastExceptionCheck(slowCall, codeOrigin, token); - m_jit.addPtr(TrustedImm32(m_jit.codeBlock()->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister); - token = m_jit.beginCall(); - JITCompiler::Call theCall = m_jit.call(GPRInfo::returnValueGPR); - m_jit.notifyCall(theCall, codeOrigin, token); + if (calleeTagGPR == GPRInfo::nonArgGPR0) { + if (calleePayloadGPR == GPRInfo::nonArgGPR1) + m_jit.swap(GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR0); + else { + m_jit.move(calleeTagGPR, GPRInfo::nonArgGPR1); + m_jit.move(calleePayloadGPR, GPRInfo::nonArgGPR0); + } + } else { + m_jit.move(calleePayloadGPR, GPRInfo::nonArgGPR0); + m_jit.move(calleeTagGPR, GPRInfo::nonArgGPR1); + } + m_jit.prepareForExceptionCheck(); + JITCompiler::Call slowCall = m_jit.nearCall(); + m_jit.notifyCall(slowCall, codeOrigin, token); done.link(&m_jit); m_jit.setupResults(resultPayloadGPR, resultTagGPR); - jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, DataFormatJS, UseChildrenCalledExplicitly); + jsValueResult(resultTagGPR, resultPayloadGPR, node, DataFormatJS, UseChildrenCalledExplicitly); - m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, at(m_compileIndex).codeOrigin); + m_jit.addJSCall(fastCall, slowCall, targetToCheck, callType, calleePayloadGPR, node->codeOrigin); } template -GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& returnFormat) +GPRReg SpeculativeJIT::fillSpeculateIntInternal(Edge edge, DataFormat& returnFormat) { #if DFG_ENABLE(DEBUG_VERBOSE) - dataLog("SpecInt@%d ", nodeIndex); + dataLogF("SpecInt@%d ", edge->index()); #endif - if (isKnownNotInteger(nodeIndex)) { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); - returnFormat = DataFormatInteger; - return allocate(); - } - - Node& node = at(nodeIndex); - VirtualRegister virtualRegister = node.virtualRegister(); + AbstractValue& value = m_state.forNode(edge); + SpeculatedType type = value.m_type; + ASSERT(edge.useKind() != KnownInt32Use || !(value.m_type & ~SpecInt32)); + value.filter(SpecInt32); + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = m_generationInfo[virtualRegister]; switch (info.registerFormat()) { case DataFormatNone: { - - if (node.hasConstant()) { - ASSERT(isInt32Constant(nodeIndex)); + if ((edge->hasConstant() && !isInt32Constant(edge.node())) || info.spillFormat() == DataFormatDouble) { + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + returnFormat = DataFormatInteger; + return allocate(); + } + + if (edge->hasConstant()) { + ASSERT(isInt32Constant(edge.node())); GPRReg gpr = allocate(); - m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); + m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); returnFormat = DataFormatInteger; return gpr; } DataFormat spillFormat = info.spillFormat(); - ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger); + ASSERT_UNUSED(spillFormat, (spillFormat & DataFormatJS) || spillFormat == DataFormatInteger); // If we know this was spilled as an integer we can fill without checking. - if (spillFormat != DataFormatJSInteger && spillFormat != DataFormatInteger) - speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag))); + if (type & ~SpecInt32) + speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag))); GPRReg gpr = allocate(); m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); returnFormat = DataFormatInteger; return gpr; } @@ -1028,13 +906,13 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& GPRReg payloadGPR = info.payloadGPR(); m_gprs.lock(tagGPR); m_gprs.lock(payloadGPR); - if (info.registerFormat() != DataFormatJSInteger) - speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag))); + if (type & ~SpecInt32) + speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::Int32Tag))); m_gprs.unlock(tagGPR); m_gprs.release(tagGPR); m_gprs.release(payloadGPR); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger); - info.fillInteger(payloadGPR); + info.fillInteger(*m_stream, payloadGPR); // If !strict we're done, return. returnFormat = DataFormatInteger; return payloadGPR; @@ -1053,58 +931,61 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& case DataFormatJSDouble: case DataFormatJSCell: case DataFormatJSBoolean: + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + returnFormat = DataFormatInteger; + return allocate(); + case DataFormatStorage: - ASSERT_NOT_REACHED(); + default: + RELEASE_ASSERT_NOT_REACHED(); + return InvalidGPRReg; } - - ASSERT_NOT_REACHED(); - return InvalidGPRReg; } -GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat) +GPRReg SpeculativeJIT::fillSpeculateInt(Edge edge, DataFormat& returnFormat) { - return fillSpeculateIntInternal(nodeIndex, returnFormat); + return fillSpeculateIntInternal(edge, returnFormat); } -GPRReg SpeculativeJIT::fillSpeculateIntStrict(NodeIndex nodeIndex) +GPRReg SpeculativeJIT::fillSpeculateIntStrict(Edge edge) { DataFormat mustBeDataFormatInteger; - GPRReg result = fillSpeculateIntInternal(nodeIndex, mustBeDataFormatInteger); + GPRReg result = fillSpeculateIntInternal(edge, mustBeDataFormatInteger); ASSERT(mustBeDataFormatInteger == DataFormatInteger); return result; } -FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) +FPRReg SpeculativeJIT::fillSpeculateDouble(Edge edge) { #if DFG_ENABLE(DEBUG_VERBOSE) - dataLog("SpecDouble@%d ", nodeIndex); + dataLogF("SpecDouble@%d ", edge->index()); #endif - if (isKnownNotNumber(nodeIndex)) { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); - return fprAllocate(); - } - - Node& node = at(nodeIndex); - VirtualRegister virtualRegister = node.virtualRegister(); + AbstractValue& value = m_state.forNode(edge); + SpeculatedType type = value.m_type; + ASSERT(edge.useKind() != KnownNumberUse || !(value.m_type & ~SpecNumber)); + value.filter(SpecNumber); + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = m_generationInfo[virtualRegister]; if (info.registerFormat() == DataFormatNone) { - if (node.hasConstant()) { - if (isInt32Constant(nodeIndex)) { + if (edge->hasConstant()) { + if (isInt32Constant(edge.node())) { GPRReg gpr = allocate(); - m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); + m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(edge.node())), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); unlock(gpr); - } else if (isNumberConstant(nodeIndex)) { + } else if (isNumberConstant(edge.node())) { FPRReg fpr = fprAllocate(); - m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), fpr); + m_jit.loadDouble(addressOfDoubleConstant(edge.node()), fpr); m_fprs.retain(fpr, virtualRegister, SpillOrderConstant); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; - } else - ASSERT_NOT_REACHED(); + } else { + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + return fprAllocate(); + } } else { DataFormat spillFormat = info.spillFormat(); ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger); @@ -1112,7 +993,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) FPRReg fpr = fprAllocate(); m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } @@ -1121,7 +1002,8 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) if (spillFormat != DataFormatJSInteger && spillFormat != DataFormatInteger) { JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::Int32Tag)); - speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::AboveOrEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::LowestTag))); + if (type & ~SpecNumber) + speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::AboveOrEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::LowestTag))); m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); hasUnboxedDouble = m_jit.jump(); @@ -1134,7 +1016,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) hasUnboxedDouble.link(&m_jit); m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); info.killSpilled(); return fpr; } @@ -1155,7 +1037,8 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) if (info.registerFormat() != DataFormatJSInteger) { FPRTemporary scratch(this); JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, tagGPR, TrustedImm32(JSValue::Int32Tag)); - speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag))); + if (type & ~SpecNumber) + speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::AboveOrEqual, tagGPR, TrustedImm32(JSValue::LowestTag))); unboxDouble(tagGPR, payloadGPR, fpr, scratch.fpr()); hasUnboxedDouble = m_jit.jump(); isInteger.link(&m_jit); @@ -1171,7 +1054,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) m_gprs.unlock(tagGPR); m_gprs.unlock(payloadGPR); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); info.killSpilled(); return fpr; } @@ -1194,51 +1077,60 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) case DataFormatNone: case DataFormatStorage: + RELEASE_ASSERT_NOT_REACHED(); + case DataFormatCell: case DataFormatJSCell: case DataFormatBoolean: case DataFormatJSBoolean: - ASSERT_NOT_REACHED(); - } + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + return fprAllocate(); - ASSERT_NOT_REACHED(); - return InvalidFPRReg; + default: + RELEASE_ASSERT_NOT_REACHED(); + return InvalidFPRReg; + } } -GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) +GPRReg SpeculativeJIT::fillSpeculateCell(Edge edge) { #if DFG_ENABLE(DEBUG_VERBOSE) - dataLog("SpecCell@%d ", nodeIndex); + dataLogF("SpecCell@%d ", edge->index()); #endif - if (isKnownNotCell(nodeIndex)) { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); - return allocate(); - } - - Node& node = at(nodeIndex); - VirtualRegister virtualRegister = node.virtualRegister(); + AbstractValue& value = m_state.forNode(edge); + SpeculatedType type = value.m_type; + ASSERT((edge.useKind() != KnownCellUse && edge.useKind() != KnownStringUse) || !(value.m_type & ~SpecCell)); + value.filter(SpecCell); + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = m_generationInfo[virtualRegister]; switch (info.registerFormat()) { case DataFormatNone: { + if (info.spillFormat() == DataFormatInteger || info.spillFormat() == DataFormatDouble) { + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + return allocate(); + } - if (node.hasConstant()) { - JSValue jsValue = valueOfJSConstant(nodeIndex); - ASSERT(jsValue.isCell()); + if (edge->hasConstant()) { + JSValue jsValue = valueOfJSConstant(edge.node()); GPRReg gpr = allocate(); - m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr); - info.fillCell(gpr); + if (jsValue.isCell()) { + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr); + info.fillCell(*m_stream, gpr); + return gpr; + } + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); return gpr; } ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatCell); - if (info.spillFormat() != DataFormatJSCell && info.spillFormat() != DataFormatCell) - speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag))); + if (type & ~SpecCell) + speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::CellTag))); GPRReg gpr = allocate(); m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); - info.fillCell(gpr); + info.fillCell(*m_stream, gpr); return gpr; } @@ -1254,13 +1146,13 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) GPRReg payloadGPR = info.payloadGPR(); m_gprs.lock(tagGPR); m_gprs.lock(payloadGPR); - if (info.spillFormat() != DataFormatJSCell) - speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag))); + if (type & ~SpecCell) + speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::CellTag))); m_gprs.unlock(tagGPR); m_gprs.release(tagGPR); m_gprs.release(payloadGPR); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderCell); - info.fillCell(payloadGPR); + info.fillCell(*m_stream, payloadGPR); return payloadGPR; } @@ -1270,50 +1162,58 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) case DataFormatDouble: case DataFormatJSBoolean: case DataFormatBoolean: + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + return allocate(); + case DataFormatStorage: - ASSERT_NOT_REACHED(); - } + RELEASE_ASSERT_NOT_REACHED(); - ASSERT_NOT_REACHED(); - return InvalidGPRReg; + default: + RELEASE_ASSERT_NOT_REACHED(); + return InvalidGPRReg; + } } -GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) +GPRReg SpeculativeJIT::fillSpeculateBoolean(Edge edge) { #if DFG_ENABLE(DEBUG_VERBOSE) - dataLog("SpecBool@%d ", nodeIndex); + dataLogF("SpecBool@%d ", edge.node()->index()); #endif - Node& node = m_jit.graph()[nodeIndex]; - VirtualRegister virtualRegister = node.virtualRegister(); + AbstractValue& value = m_state.forNode(edge); + SpeculatedType type = value.m_type; + value.filter(SpecBoolean); + VirtualRegister virtualRegister = edge->virtualRegister(); GenerationInfo& info = m_generationInfo[virtualRegister]; - if ((node.hasConstant() && !valueOfJSConstant(nodeIndex).isBoolean()) - || !(info.isJSBoolean() || info.isUnknownJS())) { - terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); - return allocate(); - } switch (info.registerFormat()) { case DataFormatNone: { - - if (node.hasConstant()) { - JSValue jsValue = valueOfJSConstant(nodeIndex); - ASSERT(jsValue.isBoolean()); + if (info.spillFormat() == DataFormatInteger || info.spillFormat() == DataFormatDouble) { + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + return allocate(); + } + + if (edge->hasConstant()) { + JSValue jsValue = valueOfJSConstant(edge.node()); GPRReg gpr = allocate(); - m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr); - info.fillBoolean(gpr); + if (jsValue.isBoolean()) { + m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); + m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr); + info.fillBoolean(*m_stream, gpr); + return gpr; + } + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); return gpr; } ASSERT((info.spillFormat() & DataFormatJS) || info.spillFormat() == DataFormatBoolean); - if (info.spillFormat() != DataFormatJSBoolean && info.spillFormat() != DataFormatBoolean) - speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag))); + if (type & ~SpecBoolean) + speculationCheck(BadType, JSValueSource(JITCompiler::addressFor(virtualRegister)), edge, m_jit.branch32(MacroAssembler::NotEqual, JITCompiler::tagFor(virtualRegister), TrustedImm32(JSValue::BooleanTag))); GPRReg gpr = allocate(); m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); - info.fillBoolean(gpr); + info.fillBoolean(*m_stream, gpr); return gpr; } @@ -1329,14 +1229,14 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) GPRReg payloadGPR = info.payloadGPR(); m_gprs.lock(tagGPR); m_gprs.lock(payloadGPR); - if (info.registerFormat() != DataFormatJSBoolean) - speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), nodeIndex, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::BooleanTag))); + if (type & ~SpecBoolean) + speculationCheck(BadType, JSValueRegs(tagGPR, payloadGPR), edge, m_jit.branch32(MacroAssembler::NotEqual, tagGPR, TrustedImm32(JSValue::BooleanTag))); m_gprs.unlock(tagGPR); m_gprs.release(tagGPR); m_gprs.release(payloadGPR); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderBoolean); - info.fillBoolean(payloadGPR); + info.fillBoolean(*m_stream, payloadGPR); return payloadGPR; } @@ -1346,43 +1246,87 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) case DataFormatDouble: case DataFormatJSCell: case DataFormatCell: - case DataFormatStorage: - ASSERT_NOT_REACHED(); - } + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); + return allocate(); + + case DataFormatStorage: + RELEASE_ASSERT_NOT_REACHED(); - ASSERT_NOT_REACHED(); - return InvalidGPRReg; + default: + RELEASE_ASSERT_NOT_REACHED(); + return InvalidGPRReg; + } } JITCompiler::Jump SpeculativeJIT::convertToDouble(JSValueOperand& op, FPRReg result) { FPRTemporary scratch(this); - JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, op.tagGPR(), TrustedImm32(JSValue::Int32Tag)); - JITCompiler::Jump notNumber = m_jit.branch32(MacroAssembler::AboveOrEqual, op.payloadGPR(), TrustedImm32(JSValue::LowestTag)); + GPRReg opPayloadGPR = op.payloadGPR(); + GPRReg opTagGPR = op.tagGPR(); + FPRReg scratchFPR = scratch.fpr(); + + JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, opTagGPR, TrustedImm32(JSValue::Int32Tag)); + JITCompiler::Jump notNumber = m_jit.branch32(MacroAssembler::AboveOrEqual, opPayloadGPR, TrustedImm32(JSValue::LowestTag)); - unboxDouble(op.tagGPR(), op.payloadGPR(), result, scratch.fpr()); + unboxDouble(opTagGPR, opPayloadGPR, result, scratchFPR); JITCompiler::Jump done = m_jit.jump(); isInteger.link(&m_jit); - m_jit.convertInt32ToDouble(op.payloadGPR(), result); + m_jit.convertInt32ToDouble(opPayloadGPR, result); done.link(&m_jit); return notNumber; } -void SpeculativeJIT::compileObjectEquality(Node& node, const ClassInfo* classInfo, PredictionChecker predictionCheck) +void SpeculativeJIT::compileObjectEquality(Node* node) { - SpeculateCellOperand op1(this, node.child1()); - SpeculateCellOperand op2(this, node.child2()); + SpeculateCellOperand op1(this, node->child1()); + SpeculateCellOperand op2(this, node->child2()); GPRReg op1GPR = op1.gpr(); GPRReg op2GPR = op2.gpr(); - if (!predictionCheck(m_state.forNode(node.child1()).m_type)) - speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo))); - if (!predictionCheck(m_state.forNode(node.child2()).m_type)) - speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node.child2(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op2GPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo))); + if (m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) { + m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint()); + DFG_TYPE_CHECK( + JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr( + MacroAssembler::Equal, + MacroAssembler::Address(op1GPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + DFG_TYPE_CHECK( + JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr( + MacroAssembler::Equal, + MacroAssembler::Address(op2GPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + } else { + GPRTemporary structure(this); + GPRReg structureGPR = structure.gpr(); + + m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR); + DFG_TYPE_CHECK( + JSValueSource::unboxedCell(op1GPR), node->child1(), SpecObject, m_jit.branchPtr( + MacroAssembler::Equal, + structureGPR, + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), node->child1(), + m_jit.branchTest8( + MacroAssembler::NonZero, + MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); + + m_jit.loadPtr(MacroAssembler::Address(op2GPR, JSCell::structureOffset()), structureGPR); + DFG_TYPE_CHECK( + JSValueSource::unboxedCell(op2GPR), node->child2(), SpecObject, m_jit.branchPtr( + MacroAssembler::Equal, + structureGPR, + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + speculationCheck(BadType, JSValueSource::unboxedCell(op2GPR), node->child2(), + m_jit.branchTest8( + MacroAssembler::NonZero, + MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); + } GPRTemporary resultPayload(this, op2); GPRReg resultPayloadGPR = resultPayload.gpr(); @@ -1394,47 +1338,81 @@ void SpeculativeJIT::compileObjectEquality(Node& node, const ClassInfo* classInf m_jit.move(TrustedImm32(0), resultPayloadGPR); done.link(&m_jit); - booleanResult(resultPayloadGPR, m_compileIndex); + booleanResult(resultPayloadGPR, node); } -void SpeculativeJIT::compileObjectToObjectOrOtherEquality( - Edge leftChild, Edge rightChild, - const ClassInfo* classInfo, PredictionChecker predictionCheck) +void SpeculativeJIT::compileObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild) { SpeculateCellOperand op1(this, leftChild); - JSValueOperand op2(this, rightChild); + JSValueOperand op2(this, rightChild, ManualOperandSpeculation); GPRTemporary result(this); GPRReg op1GPR = op1.gpr(); GPRReg op2TagGPR = op2.tagGPR(); GPRReg op2PayloadGPR = op2.payloadGPR(); GPRReg resultGPR = result.gpr(); - - if (!predictionCheck(m_state.forNode(leftChild).m_type)) { - speculationCheck( - BadType, JSValueSource::unboxedCell(op1GPR), leftChild.index(), - m_jit.branchPtr( - MacroAssembler::NotEqual, - MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), - MacroAssembler::TrustedImmPtr(classInfo))); + GPRTemporary structure; + GPRReg structureGPR = InvalidGPRReg; + + bool masqueradesAsUndefinedWatchpointValid = m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid(); + + if (!masqueradesAsUndefinedWatchpointValid) { + // The masquerades as undefined case will use the structure register, so allocate it here. + // Do this at the top of the function to avoid branching around a register allocation. + GPRTemporary realStructure(this); + structure.adopt(realStructure); + structureGPR = structure.gpr(); + } + + if (masqueradesAsUndefinedWatchpointValid) { + m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint()); + DFG_TYPE_CHECK( + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( + MacroAssembler::Equal, + MacroAssembler::Address(op1GPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + } else { + m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR); + DFG_TYPE_CHECK( + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( + MacroAssembler::Equal, + structureGPR, + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, + m_jit.branchTest8( + MacroAssembler::NonZero, + MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } + // It seems that most of the time when programs do a == b where b may be either null/undefined // or an object, b is usually an object. Balance the branches to make that case fast. MacroAssembler::Jump rightNotCell = m_jit.branch32(MacroAssembler::NotEqual, op2TagGPR, TrustedImm32(JSValue::CellTag)); - // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the - // proof, when filtered on cell, demonstrates that we have an object of the desired type - // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the - // speculation. - if (!predictionCheck(m_state.forNode(rightChild).m_type & PredictCell)) { - speculationCheck( - BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(), + // We know that within this branch, rightChild must be a cell. + if (masqueradesAsUndefinedWatchpointValid) { + m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint()); + DFG_TYPE_CHECK( + JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, + m_jit.branchPtr( + MacroAssembler::Equal, + MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + } else { + m_jit.loadPtr(MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()), structureGPR); + DFG_TYPE_CHECK( + JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::NotEqual, - MacroAssembler::Address(op2PayloadGPR, JSCell::classInfoOffset()), - MacroAssembler::TrustedImmPtr(classInfo))); + MacroAssembler::Equal, + structureGPR, + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, + m_jit.branchTest8( + MacroAssembler::NonZero, + MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } // At this point we know that we can perform a straight-forward equality comparison on pointer @@ -1447,12 +1425,12 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality( // We know that within this branch, rightChild must not be a cell. Check if that is enough to // prove that it is either null or undefined. - if (!isOtherPrediction(m_state.forNode(rightChild).m_type & ~PredictCell)) { + if (needsTypeCheck(rightChild, SpecCell | SpecOther)) { m_jit.move(op2TagGPR, resultGPR); m_jit.or32(TrustedImm32(1), resultGPR); - speculationCheck( - BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(), + typeCheck( + JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther, m_jit.branch32( MacroAssembler::NotEqual, resultGPR, MacroAssembler::TrustedImm32(JSValue::NullTag))); @@ -1465,33 +1443,54 @@ void SpeculativeJIT::compileObjectToObjectOrOtherEquality( m_jit.move(TrustedImm32(1), resultGPR); done.link(&m_jit); - booleanResult(resultGPR, m_compileIndex); + booleanResult(resultGPR, m_currentNode); } -void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality( - Edge leftChild, Edge rightChild, NodeIndex branchNodeIndex, - const ClassInfo* classInfo, PredictionChecker predictionCheck) +void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality(Edge leftChild, Edge rightChild, Node* branchNode) { - Node& branchNode = at(branchNodeIndex); - BlockIndex taken = branchNode.takenBlockIndex(); - BlockIndex notTaken = branchNode.notTakenBlockIndex(); + BlockIndex taken = branchNode->takenBlockIndex(); + BlockIndex notTaken = branchNode->notTakenBlockIndex(); SpeculateCellOperand op1(this, leftChild); - JSValueOperand op2(this, rightChild); + JSValueOperand op2(this, rightChild, ManualOperandSpeculation); GPRTemporary result(this); GPRReg op1GPR = op1.gpr(); GPRReg op2TagGPR = op2.tagGPR(); GPRReg op2PayloadGPR = op2.payloadGPR(); GPRReg resultGPR = result.gpr(); - - if (!predictionCheck(m_state.forNode(leftChild).m_type)) { - speculationCheck( - BadType, JSValueSource::unboxedCell(op1GPR), leftChild.index(), - m_jit.branchPtr( - MacroAssembler::NotEqual, - MacroAssembler::Address(op1GPR, JSCell::classInfoOffset()), - MacroAssembler::TrustedImmPtr(classInfo))); + GPRTemporary structure; + GPRReg structureGPR = InvalidGPRReg; + + bool masqueradesAsUndefinedWatchpointValid = m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid(); + + if (!masqueradesAsUndefinedWatchpointValid) { + // The masquerades as undefined case will use the structure register, so allocate it here. + // Do this at the top of the function to avoid branching around a register allocation. + GPRTemporary realStructure(this); + structure.adopt(realStructure); + structureGPR = structure.gpr(); + } + + if (masqueradesAsUndefinedWatchpointValid) { + m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint()); + DFG_TYPE_CHECK( + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( + MacroAssembler::Equal, + MacroAssembler::Address(op1GPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + } else { + m_jit.loadPtr(MacroAssembler::Address(op1GPR, JSCell::structureOffset()), structureGPR); + DFG_TYPE_CHECK( + JSValueSource::unboxedCell(op1GPR), leftChild, SpecObject, m_jit.branchPtr( + MacroAssembler::Equal, + structureGPR, + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + speculationCheck(BadType, JSValueSource::unboxedCell(op1GPR), leftChild, + m_jit.branchTest8( + MacroAssembler::NonZero, + MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } // It seems that most of the time when programs do a == b where b may be either null/undefined @@ -1499,17 +1498,28 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality( MacroAssembler::Jump rightNotCell = m_jit.branch32(MacroAssembler::NotEqual, op2TagGPR, TrustedImm32(JSValue::CellTag)); - // We know that within this branch, rightChild must be a cell. If the CFA can tell us that the - // proof, when filtered on cell, demonstrates that we have an object of the desired type - // (predictionCheck() will test for FinalObject or Array, currently), then we can skip the - // speculation. - if (!predictionCheck(m_state.forNode(rightChild).m_type & PredictCell)) { - speculationCheck( - BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(), + // We know that within this branch, rightChild must be a cell. + if (masqueradesAsUndefinedWatchpointValid) { + m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint()); + DFG_TYPE_CHECK( + JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, + m_jit.branchPtr( + MacroAssembler::Equal, + MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + } else { + m_jit.loadPtr(MacroAssembler::Address(op2PayloadGPR, JSCell::structureOffset()), structureGPR); + DFG_TYPE_CHECK( + JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, (~SpecCell) | SpecObject, m_jit.branchPtr( - MacroAssembler::NotEqual, - MacroAssembler::Address(op2PayloadGPR, JSCell::classInfoOffset()), - MacroAssembler::TrustedImmPtr(classInfo))); + MacroAssembler::Equal, + structureGPR, + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + speculationCheck(BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, + m_jit.branchTest8( + MacroAssembler::NonZero, + MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::TrustedImm32(MasqueradesAsUndefined))); } // At this point we know that we can perform a straight-forward equality comparison on pointer @@ -1519,7 +1529,7 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality( // We know that within this branch, rightChild must not be a cell. Check if that is enough to // prove that it is either null or undefined. - if (isOtherPrediction(m_state.forNode(rightChild).m_type & ~PredictCell)) + if (!needsTypeCheck(rightChild, SpecCell | SpecOther)) rightNotCell.link(&m_jit); else { jump(notTaken, ForceJump); @@ -1528,8 +1538,8 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality( m_jit.move(op2TagGPR, resultGPR); m_jit.or32(TrustedImm32(1), resultGPR); - speculationCheck( - BadType, JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild.index(), + typeCheck( + JSValueRegs(op2TagGPR, op2PayloadGPR), rightChild, SpecCell | SpecOther, m_jit.branch32( MacroAssembler::NotEqual, resultGPR, MacroAssembler::TrustedImm32(JSValue::NullTag))); @@ -1538,22 +1548,22 @@ void SpeculativeJIT::compilePeepHoleObjectToObjectOrOtherEquality( jump(notTaken); } -void SpeculativeJIT::compileIntegerCompare(Node& node, MacroAssembler::RelationalCondition condition) +void SpeculativeJIT::compileIntegerCompare(Node* node, MacroAssembler::RelationalCondition condition) { - SpeculateIntegerOperand op1(this, node.child1()); - SpeculateIntegerOperand op2(this, node.child2()); + SpeculateIntegerOperand op1(this, node->child1()); + SpeculateIntegerOperand op2(this, node->child2()); GPRTemporary resultPayload(this); m_jit.compare32(condition, op1.gpr(), op2.gpr(), resultPayload.gpr()); // If we add a DataFormatBool, we should use it here. - booleanResult(resultPayload.gpr(), m_compileIndex); + booleanResult(resultPayload.gpr(), node); } -void SpeculativeJIT::compileDoubleCompare(Node& node, MacroAssembler::DoubleCondition condition) +void SpeculativeJIT::compileDoubleCompare(Node* node, MacroAssembler::DoubleCondition condition) { - SpeculateDoubleOperand op1(this, node.child1()); - SpeculateDoubleOperand op2(this, node.child2()); + SpeculateDoubleOperand op1(this, node->child1()); + SpeculateDoubleOperand op2(this, node->child2()); GPRTemporary resultPayload(this); m_jit.move(TrustedImm32(1), resultPayload.gpr()); @@ -1561,13 +1571,13 @@ void SpeculativeJIT::compileDoubleCompare(Node& node, MacroAssembler::DoubleCond m_jit.move(TrustedImm32(0), resultPayload.gpr()); trueCase.link(&m_jit); - booleanResult(resultPayload.gpr(), m_compileIndex); + booleanResult(resultPayload.gpr(), node); } -void SpeculativeJIT::compileValueAdd(Node& node) +void SpeculativeJIT::compileValueAdd(Node* node) { - JSValueOperand op1(this, node.child1()); - JSValueOperand op2(this, node.child2()); + JSValueOperand op1(this, node->child1()); + JSValueOperand op2(this, node->child2()); GPRReg op1TagGPR = op1.tagGPR(); GPRReg op1PayloadGPR = op1.payloadGPR(); @@ -1578,139 +1588,223 @@ void SpeculativeJIT::compileValueAdd(Node& node) GPRResult2 resultTag(this); GPRResult resultPayload(this); - if (isKnownNotNumber(node.child1().index()) || isKnownNotNumber(node.child2().index())) + if (isKnownNotNumber(node->child1().node()) || isKnownNotNumber(node->child2().node())) callOperation(operationValueAddNotNumber, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR); else callOperation(operationValueAdd, resultTag.gpr(), resultPayload.gpr(), op1TagGPR, op1PayloadGPR, op2TagGPR, op2PayloadGPR); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); + jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); } -void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse, const ClassInfo* classInfo, bool needSpeculationCheck) +void SpeculativeJIT::compileObjectOrOtherLogicalNot(Edge nodeUse) { - JSValueOperand value(this, nodeUse); + JSValueOperand value(this, nodeUse, ManualOperandSpeculation); GPRTemporary resultPayload(this); GPRReg valueTagGPR = value.tagGPR(); GPRReg valuePayloadGPR = value.payloadGPR(); GPRReg resultPayloadGPR = resultPayload.gpr(); - + GPRTemporary structure; + GPRReg structureGPR = InvalidGPRReg; + + bool masqueradesAsUndefinedWatchpointValid = m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid(); + + if (!masqueradesAsUndefinedWatchpointValid) { + // The masquerades as undefined case will use the structure register, so allocate it here. + // Do this at the top of the function to avoid branching around a register allocation. + GPRTemporary realStructure(this); + structure.adopt(realStructure); + structureGPR = structure.gpr(); + } + MacroAssembler::Jump notCell = m_jit.branch32(MacroAssembler::NotEqual, valueTagGPR, TrustedImm32(JSValue::CellTag)); - if (needSpeculationCheck) - speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valuePayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo))); + if (masqueradesAsUndefinedWatchpointValid) { + m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint()); + + DFG_TYPE_CHECK( + JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, + m_jit.branchPtr( + MacroAssembler::Equal, + MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + } else { + m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()), structureGPR); + + DFG_TYPE_CHECK( + JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, + m_jit.branchPtr( + MacroAssembler::Equal, + structureGPR, + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + + MacroAssembler::Jump isNotMasqueradesAsUndefined = + m_jit.branchTest8( + MacroAssembler::Zero, + MacroAssembler::Address(structureGPR, Structure::typeInfoFlagsOffset()), + MacroAssembler::TrustedImm32(MasqueradesAsUndefined)); + + speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, + m_jit.branchPtr( + MacroAssembler::Equal, + MacroAssembler::Address(structureGPR, Structure::globalObjectOffset()), + MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)))); + + isNotMasqueradesAsUndefined.link(&m_jit); + } m_jit.move(TrustedImm32(0), resultPayloadGPR); MacroAssembler::Jump done = m_jit.jump(); notCell.link(&m_jit); COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); - if (needSpeculationCheck) { + if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) { m_jit.move(valueTagGPR, resultPayloadGPR); m_jit.or32(TrustedImm32(1), resultPayloadGPR); - speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branch32(MacroAssembler::NotEqual, resultPayloadGPR, TrustedImm32(JSValue::NullTag))); + typeCheck( + JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther, + m_jit.branch32( + MacroAssembler::NotEqual, + resultPayloadGPR, + TrustedImm32(JSValue::NullTag))); } m_jit.move(TrustedImm32(1), resultPayloadGPR); done.link(&m_jit); - booleanResult(resultPayloadGPR, m_compileIndex); + booleanResult(resultPayloadGPR, m_currentNode); } -void SpeculativeJIT::compileLogicalNot(Node& node) +void SpeculativeJIT::compileLogicalNot(Node* node) { - if (at(node.child1()).shouldSpeculateBoolean()) { - SpeculateBooleanOperand value(this, node.child1()); + switch (node->child1().useKind()) { + case BooleanUse: { + SpeculateBooleanOperand value(this, node->child1()); GPRTemporary result(this, value); m_jit.xor32(TrustedImm32(1), value.gpr(), result.gpr()); - booleanResult(result.gpr(), m_compileIndex); - return; - } - if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) { - compileObjectOrOtherLogicalNot(node.child1(), &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type)); + booleanResult(result.gpr(), node); return; } - if (at(node.child1()).shouldSpeculateArrayOrOther()) { - compileObjectOrOtherLogicalNot(node.child1(), &JSArray::s_info, !isArrayOrOtherPrediction(m_state.forNode(node.child1()).m_type)); + + case ObjectOrOtherUse: { + compileObjectOrOtherLogicalNot(node->child1()); return; } - if (at(node.child1()).shouldSpeculateInteger()) { - SpeculateIntegerOperand value(this, node.child1()); + + case Int32Use: { + SpeculateIntegerOperand value(this, node->child1()); GPRTemporary resultPayload(this, value); m_jit.compare32(MacroAssembler::Equal, value.gpr(), MacroAssembler::TrustedImm32(0), resultPayload.gpr()); - booleanResult(resultPayload.gpr(), m_compileIndex); + booleanResult(resultPayload.gpr(), node); return; } - if (at(node.child1()).shouldSpeculateNumber()) { - SpeculateDoubleOperand value(this, node.child1()); + + case NumberUse: { + SpeculateDoubleOperand value(this, node->child1()); FPRTemporary scratch(this); GPRTemporary resultPayload(this); m_jit.move(TrustedImm32(0), resultPayload.gpr()); MacroAssembler::Jump nonZero = m_jit.branchDoubleNonZero(value.fpr(), scratch.fpr()); m_jit.move(TrustedImm32(1), resultPayload.gpr()); nonZero.link(&m_jit); - booleanResult(resultPayload.gpr(), m_compileIndex); + booleanResult(resultPayload.gpr(), node); return; } - JSValueOperand arg1(this, node.child1()); - GPRTemporary resultPayload(this, arg1, false); - GPRReg arg1TagGPR = arg1.tagGPR(); - GPRReg arg1PayloadGPR = arg1.payloadGPR(); - GPRReg resultPayloadGPR = resultPayload.gpr(); + case UntypedUse: { + JSValueOperand arg1(this, node->child1()); + GPRTemporary resultPayload(this, arg1, false); + GPRReg arg1TagGPR = arg1.tagGPR(); + GPRReg arg1PayloadGPR = arg1.payloadGPR(); + GPRReg resultPayloadGPR = resultPayload.gpr(); - arg1.use(); + arg1.use(); - JITCompiler::Jump fastCase = m_jit.branch32(JITCompiler::Equal, arg1TagGPR, TrustedImm32(JSValue::BooleanTag)); - - silentSpillAllRegisters(resultPayloadGPR); - callOperation(dfgConvertJSValueToBoolean, resultPayloadGPR, arg1TagGPR, arg1PayloadGPR); - silentFillAllRegisters(resultPayloadGPR); - JITCompiler::Jump doNot = m_jit.jump(); - - fastCase.link(&m_jit); - m_jit.move(arg1PayloadGPR, resultPayloadGPR); + JITCompiler::Jump slowCase = m_jit.branch32(JITCompiler::NotEqual, arg1TagGPR, TrustedImm32(JSValue::BooleanTag)); + + m_jit.move(arg1PayloadGPR, resultPayloadGPR); - doNot.link(&m_jit); - m_jit.xor32(TrustedImm32(1), resultPayloadGPR); - booleanResult(resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); + addSlowPathGenerator( + slowPathCall( + slowCase, this, dfgConvertJSValueToBoolean, resultPayloadGPR, arg1TagGPR, + arg1PayloadGPR)); + + m_jit.xor32(TrustedImm32(1), resultPayloadGPR); + booleanResult(resultPayloadGPR, node, UseChildrenCalledExplicitly); + return; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } } -void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BlockIndex taken, BlockIndex notTaken, const ClassInfo* classInfo, bool needSpeculationCheck) +void SpeculativeJIT::emitObjectOrOtherBranch(Edge nodeUse, BlockIndex taken, BlockIndex notTaken) { - JSValueOperand value(this, nodeUse); + JSValueOperand value(this, nodeUse, ManualOperandSpeculation); GPRTemporary scratch(this); GPRReg valueTagGPR = value.tagGPR(); GPRReg valuePayloadGPR = value.payloadGPR(); GPRReg scratchGPR = scratch.gpr(); MacroAssembler::Jump notCell = m_jit.branch32(MacroAssembler::NotEqual, valueTagGPR, TrustedImm32(JSValue::CellTag)); - if (needSpeculationCheck) - speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(valuePayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(classInfo))); + if (m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) { + m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint()); + + DFG_TYPE_CHECK( + JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, + m_jit.branchPtr( + MacroAssembler::Equal, + MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + } else { + m_jit.loadPtr(MacroAssembler::Address(valuePayloadGPR, JSCell::structureOffset()), scratchGPR); + + DFG_TYPE_CHECK( + JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, (~SpecCell) | SpecObject, + m_jit.branchPtr( + MacroAssembler::Equal, + scratchGPR, + MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get()))); + + JITCompiler::Jump isNotMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::Zero, MacroAssembler::Address(scratchGPR, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + + speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, + m_jit.branchPtr( + MacroAssembler::Equal, + MacroAssembler::Address(scratchGPR, Structure::globalObjectOffset()), + MacroAssembler::TrustedImmPtr(m_jit.graph().globalObjectFor(m_currentNode->codeOrigin)))); + + isNotMasqueradesAsUndefined.link(&m_jit); + } jump(taken, ForceJump); notCell.link(&m_jit); COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); - if (needSpeculationCheck) { + if (needsTypeCheck(nodeUse, SpecCell | SpecOther)) { m_jit.move(valueTagGPR, scratchGPR); m_jit.or32(TrustedImm32(1), scratchGPR); - speculationCheck(BadType, JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag))); + typeCheck( + JSValueRegs(valueTagGPR, valuePayloadGPR), nodeUse, SpecCell | SpecOther, + m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag))); } jump(notTaken); - noResult(m_compileIndex); + noResult(m_currentNode); } -void SpeculativeJIT::emitBranch(Node& node) +void SpeculativeJIT::emitBranch(Node* node) { - BlockIndex taken = node.takenBlockIndex(); - BlockIndex notTaken = node.notTakenBlockIndex(); + BlockIndex taken = node->takenBlockIndex(); + BlockIndex notTaken = node->notTakenBlockIndex(); - if (at(node.child1()).shouldSpeculateBoolean()) { - SpeculateBooleanOperand value(this, node.child1()); + switch (node->child1().useKind()) { + case BooleanUse: { + SpeculateBooleanOperand value(this, node->child1()); MacroAssembler::ResultCondition condition = MacroAssembler::NonZero; - if (taken == (m_block + 1)) { + if (taken == nextBlock()) { condition = MacroAssembler::Zero; BlockIndex tmp = taken; taken = notTaken; @@ -1720,35 +1814,43 @@ void SpeculativeJIT::emitBranch(Node& node) branchTest32(condition, value.gpr(), TrustedImm32(1), taken); jump(notTaken); - noResult(m_compileIndex); - } else if (at(node.child1()).shouldSpeculateFinalObjectOrOther()) { - emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSFinalObject::s_info, !isFinalObjectOrOtherPrediction(m_state.forNode(node.child1()).m_type)); - } else if (at(node.child1()).shouldSpeculateArrayOrOther()) { - emitObjectOrOtherBranch(node.child1(), taken, notTaken, &JSArray::s_info, !isArrayOrOtherPrediction(m_state.forNode(node.child1()).m_type)); - } else if (at(node.child1()).shouldSpeculateNumber()) { - if (at(node.child1()).shouldSpeculateInteger()) { + noResult(node); + return; + } + + case ObjectOrOtherUse: { + emitObjectOrOtherBranch(node->child1(), taken, notTaken); + return; + } + + case NumberUse: + case Int32Use: { + if (node->child1().useKind() == Int32Use) { bool invert = false; - if (taken == (m_block + 1)) { + if (taken == nextBlock()) { invert = true; BlockIndex tmp = taken; taken = notTaken; notTaken = tmp; } - SpeculateIntegerOperand value(this, node.child1()); + SpeculateIntegerOperand value(this, node->child1()); branchTest32(invert ? MacroAssembler::Zero : MacroAssembler::NonZero, value.gpr(), taken); } else { - SpeculateDoubleOperand value(this, node.child1()); + SpeculateDoubleOperand value(this, node->child1()); FPRTemporary scratch(this); branchDoubleNonZero(value.fpr(), scratch.fpr(), taken); } jump(notTaken); - noResult(m_compileIndex); - } else { - JSValueOperand value(this, node.child1()); + noResult(node); + return; + } + + case UntypedUse: { + JSValueOperand value(this, node->child1()); value.fill(); GPRReg valueTagGPR = value.tagGPR(); GPRReg valuePayloadGPR = value.payloadGPR(); @@ -1756,7 +1858,7 @@ void SpeculativeJIT::emitBranch(Node& node) GPRTemporary result(this); GPRReg resultGPR = result.gpr(); - use(node.child1()); + use(node->child1()); JITCompiler::Jump fastPath = m_jit.branch32(JITCompiler::Equal, valueTagGPR, JITCompiler::TrustedImm32(JSValue::Int32Tag)); JITCompiler::Jump slowPath = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, JITCompiler::TrustedImm32(JSValue::BooleanTag)); @@ -1773,99 +1875,207 @@ void SpeculativeJIT::emitBranch(Node& node) branchTest32(JITCompiler::NonZero, resultGPR, taken); jump(notTaken); - noResult(m_compileIndex, UseChildrenCalledExplicitly); + noResult(node, UseChildrenCalledExplicitly); + return; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } +} + +template +void SpeculativeJIT::compileContiguousPutByVal(Node* node, BaseOperandType& base, PropertyOperandType& property, ValueOperandType& value, GPRReg valuePayloadReg, TagType valueTag) +{ + Edge child4 = m_jit.graph().varArgChild(node, 3); + + ArrayMode arrayMode = node->arrayMode(); + + GPRReg baseReg = base.gpr(); + GPRReg propertyReg = property.gpr(); + + StorageOperand storage(this, child4); + GPRReg storageReg = storage.gpr(); + + if (node->op() == PutByValAlias) { + // Store the value to the array. + GPRReg propertyReg = property.gpr(); + m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + + noResult(node); + return; + } + + MacroAssembler::Jump slowCase; + + if (arrayMode.isInBounds()) { + speculationCheck( + StoreToHoleOrOutOfBounds, JSValueRegs(), 0, + m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + } else { + MacroAssembler::Jump inBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); + + slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfVectorLength())); + + if (!arrayMode.isOutOfBounds()) + speculationCheck(OutOfBounds, JSValueRegs(), 0, slowCase); + + m_jit.add32(TrustedImm32(1), propertyReg); + m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength())); + m_jit.sub32(TrustedImm32(1), propertyReg); + + inBounds.link(&m_jit); + } + + m_jit.store32(valueTag, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + + base.use(); + property.use(); + value.use(); + storage.use(); + + if (arrayMode.isOutOfBounds()) { + addSlowPathGenerator( + slowPathCall( + slowCase, this, + m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, + NoResult, baseReg, propertyReg, valueTag, valuePayloadReg)); } + + noResult(node, UseChildrenCalledExplicitly); } -void SpeculativeJIT::compile(Node& node) +void SpeculativeJIT::compile(Node* node) { - NodeType op = node.op(); + NodeType op = node->op(); + +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + m_jit.clearRegisterAllocationOffsets(); +#endif switch (op) { case JSConstant: - initConstantInfo(m_compileIndex); + initConstantInfo(node); + break; + + case PhantomArguments: + initConstantInfo(node); break; case WeakJSConstant: - m_jit.addWeakReference(node.weakConstant()); - initConstantInfo(m_compileIndex); + m_jit.addWeakReference(node->weakConstant()); + initConstantInfo(node); break; + case Identity: { + RELEASE_ASSERT_NOT_REACHED(); + break; + } + case GetLocal: { - PredictedType prediction = node.variableAccessData()->prediction(); - AbstractValue& value = block()->valuesAtHead.operand(node.local()); + SpeculatedType prediction = node->variableAccessData()->prediction(); + AbstractValue& value = m_state.variables().operand(node->local()); // If we have no prediction for this local, then don't attempt to compile. - if (prediction == PredictNone || value.isClear()) { - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); + if (prediction == SpecNone) { + terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); break; } - if (!m_jit.graph().isCaptured(node.local())) { - if (node.variableAccessData()->shouldUseDoubleFormat()) { - FPRTemporary result(this); - m_jit.loadDouble(JITCompiler::addressFor(node.local()), result.fpr()); - VirtualRegister virtualRegister = node.virtualRegister(); - m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble); - m_generationInfo[virtualRegister].initDouble(m_compileIndex, node.refCount(), result.fpr()); - break; - } + // If the CFA is tracking this variable and it found that the variable + // cannot have been assigned, then don't attempt to proceed. + if (value.isClear()) { + // FIXME: We should trap instead. + // https://bugs.webkit.org/show_bug.cgi?id=110383 + terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); + break; + } - if (isInt32Prediction(prediction)) { - GPRTemporary result(this); - m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr()); - - // Like integerResult, but don't useChildren - our children are phi nodes, - // and don't represent values within this dataflow with virtual registers. - VirtualRegister virtualRegister = node.virtualRegister(); - m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger); - m_generationInfo[virtualRegister].initInteger(m_compileIndex, node.refCount(), result.gpr()); - break; - } - - if (isArrayPrediction(prediction)) { - GPRTemporary result(this); - m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr()); - - // Like cellResult, but don't useChildren - our children are phi nodes, - // and don't represent values within this dataflow with virtual registers. - VirtualRegister virtualRegister = node.virtualRegister(); - m_gprs.retain(result.gpr(), virtualRegister, SpillOrderCell); - m_generationInfo[virtualRegister].initCell(m_compileIndex, node.refCount(), result.gpr()); - break; - } - - if (isBooleanPrediction(prediction)) { - GPRTemporary result(this); - m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr()); - - // Like booleanResult, but don't useChildren - our children are phi nodes, - // and don't represent values within this dataflow with virtual registers. - VirtualRegister virtualRegister = node.virtualRegister(); - m_gprs.retain(result.gpr(), virtualRegister, SpillOrderBoolean); - m_generationInfo[virtualRegister].initBoolean(m_compileIndex, node.refCount(), result.gpr()); - break; - } + if (node->variableAccessData()->shouldUseDoubleFormat()) { + FPRTemporary result(this); + m_jit.loadDouble(JITCompiler::addressFor(node->local()), result.fpr()); + VirtualRegister virtualRegister = node->virtualRegister(); + m_fprs.retain(result.fpr(), virtualRegister, SpillOrderDouble); + m_generationInfo[virtualRegister].initDouble(node, node->refCount(), result.fpr()); + break; + } + + if (isInt32Speculation(value.m_type)) { + GPRTemporary result(this); + m_jit.load32(JITCompiler::payloadFor(node->local()), result.gpr()); + + // Like integerResult, but don't useChildren - our children are phi nodes, + // and don't represent values within this dataflow with virtual registers. + VirtualRegister virtualRegister = node->virtualRegister(); + m_gprs.retain(result.gpr(), virtualRegister, SpillOrderInteger); + m_generationInfo[virtualRegister].initInteger(node, node->refCount(), result.gpr()); + break; + } + + if (isCellSpeculation(value.m_type)) { + GPRTemporary result(this); + m_jit.load32(JITCompiler::payloadFor(node->local()), result.gpr()); + + // Like cellResult, but don't useChildren - our children are phi nodes, + // and don't represent values within this dataflow with virtual registers. + VirtualRegister virtualRegister = node->virtualRegister(); + m_gprs.retain(result.gpr(), virtualRegister, SpillOrderCell); + m_generationInfo[virtualRegister].initCell(node, node->refCount(), result.gpr()); + break; + } + + if (isBooleanSpeculation(value.m_type)) { + GPRTemporary result(this); + m_jit.load32(JITCompiler::payloadFor(node->local()), result.gpr()); + + // Like booleanResult, but don't useChildren - our children are phi nodes, + // and don't represent values within this dataflow with virtual registers. + VirtualRegister virtualRegister = node->virtualRegister(); + m_gprs.retain(result.gpr(), virtualRegister, SpillOrderBoolean); + m_generationInfo[virtualRegister].initBoolean(node, node->refCount(), result.gpr()); + break; } GPRTemporary result(this); GPRTemporary tag(this); - m_jit.load32(JITCompiler::payloadFor(node.local()), result.gpr()); - m_jit.load32(JITCompiler::tagFor(node.local()), tag.gpr()); + m_jit.load32(JITCompiler::payloadFor(node->local()), result.gpr()); + m_jit.load32(JITCompiler::tagFor(node->local()), tag.gpr()); // Like jsValueResult, but don't useChildren - our children are phi nodes, // and don't represent values within this dataflow with virtual registers. - VirtualRegister virtualRegister = node.virtualRegister(); + VirtualRegister virtualRegister = node->virtualRegister(); m_gprs.retain(result.gpr(), virtualRegister, SpillOrderJS); m_gprs.retain(tag.gpr(), virtualRegister, SpillOrderJS); - DataFormat format; - if (isCellPrediction(value.m_type) - && !m_jit.graph().isCaptured(node.local())) - format = DataFormatJSCell; - else - format = DataFormatJS; - m_generationInfo[virtualRegister].initJSValue(m_compileIndex, node.refCount(), tag.gpr(), result.gpr(), format); + m_generationInfo[virtualRegister].initJSValue(node, node->refCount(), tag.gpr(), result.gpr(), DataFormatJS); + break; + } + + case GetLocalUnlinked: { + GPRTemporary payload(this); + GPRTemporary tag(this); + m_jit.load32(JITCompiler::payloadFor(node->unlinkedLocal()), payload.gpr()); + m_jit.load32(JITCompiler::tagFor(node->unlinkedLocal()), tag.gpr()); + jsValueResult(tag.gpr(), payload.gpr(), node); + break; + } + + case MovHintAndCheck: { + compileMovHintAndCheck(node); + break; + } + + case InlineStart: { + compileInlineStart(node); + break; + } + + case MovHint: + case ZombieHint: { + RELEASE_ASSERT_NOT_REACHED(); break; } @@ -1873,88 +2083,65 @@ void SpeculativeJIT::compile(Node& node) // SetLocal doubles as a hint as to where a node will be stored and // as a speculation point. So before we speculate make sure that we // know where the child of this node needs to go in the virtual - // register file. + // stack. compileMovHint(node); - // As far as OSR is concerned, we're on the bytecode index corresponding - // to the *next* instruction, since we've already "executed" the - // SetLocal and whatever other DFG Nodes are associated with the same - // bytecode index as the SetLocal. - ASSERT(m_codeOriginForOSR == node.codeOrigin); - Node* nextNode = &at(block()->at(m_indexInBlock + 1)); - - // But even more oddly, we need to be super careful about the following - // sequence: - // - // a: Foo() - // b: SetLocal(@a) - // c: Flush(@b) - // - // This next piece of crazy takes care of this. - if (nextNode->op() == Flush && nextNode->child1() == m_compileIndex) - nextNode = &at(block()->at(m_indexInBlock + 2)); - - // Oddly, it's possible for the bytecode index for the next node to be - // equal to ours. This will happen for op_post_inc. And, even more oddly, - // this is just fine. Ordinarily, this wouldn't be fine, since if the - // next node failed OSR then we'd be OSR-ing with this SetLocal's local - // variable already set even though from the standpoint of the old JIT, - // this SetLocal should not have executed. But for op_post_inc, it's just - // fine, because this SetLocal's local (i.e. the LHS in a x = y++ - // statement) would be dead anyway - so the fact that DFG would have - // already made the assignment, and baked it into the register file during - // OSR exit, would not be visible to the old JIT in any way. - m_codeOriginForOSR = nextNode->codeOrigin; - - if (!m_jit.graph().isCaptured(node.local())) { - if (node.variableAccessData()->shouldUseDoubleFormat()) { - SpeculateDoubleOperand value(this, node.child1()); - m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local())); - noResult(m_compileIndex); + if (node->variableAccessData()->shouldUnboxIfPossible()) { + if (node->variableAccessData()->shouldUseDoubleFormat()) { + SpeculateDoubleOperand value(this, node->child1()); + m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->local())); + noResult(node); // Indicate that it's no longer necessary to retrieve the value of - // this bytecode variable from registers or other locations in the register file, + // this bytecode variable from registers or other locations in the stack, // but that it is stored as a double. - valueSourceReferenceForOperand(node.local()) = ValueSource(DoubleInRegisterFile); + recordSetLocal(node->local(), ValueSource(DoubleInJSStack)); break; } - PredictedType predictedType = node.variableAccessData()->argumentAwarePrediction(); - if (m_generationInfo[at(node.child1()).virtualRegister()].registerFormat() == DataFormatDouble) { - DoubleOperand value(this, node.child1()); - m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local())); - noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(DoubleInRegisterFile); + SpeculatedType predictedType = node->variableAccessData()->argumentAwarePrediction(); + if (m_generationInfo[node->child1()->virtualRegister()].registerFormat() == DataFormatDouble) { + SpeculateDoubleOperand value(this, node->child1(), ManualOperandSpeculation); + m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node->local())); + noResult(node); + recordSetLocal(node->local(), ValueSource(DoubleInJSStack)); break; } - if (isInt32Prediction(predictedType)) { - SpeculateIntegerOperand value(this, node.child1()); - m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local())); - noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(Int32InRegisterFile); + if (isInt32Speculation(predictedType)) { + SpeculateIntegerOperand value(this, node->child1()); + m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->local())); + noResult(node); + recordSetLocal(node->local(), ValueSource(Int32InJSStack)); break; } - if (isArrayPrediction(predictedType)) { - SpeculateCellOperand cell(this, node.child1()); + if (isCellSpeculation(predictedType)) { + SpeculateCellOperand cell(this, node->child1()); GPRReg cellGPR = cell.gpr(); - if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) - speculationCheck(BadType, JSValueSource::unboxedCell(cellGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); - m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node.local())); - noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(CellInRegisterFile); + m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node->local())); + noResult(node); + recordSetLocal(node->local(), ValueSource(CellInJSStack)); break; } - if (isBooleanPrediction(predictedType)) { - SpeculateBooleanOperand value(this, node.child1()); - m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local())); - noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(BooleanInRegisterFile); + if (isBooleanSpeculation(predictedType)) { + SpeculateBooleanOperand value(this, node->child1()); + m_jit.store32(value.gpr(), JITCompiler::payloadFor(node->local())); + noResult(node); + recordSetLocal(node->local(), ValueSource(BooleanInJSStack)); break; } } - JSValueOperand value(this, node.child1()); - m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node.local())); - m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node.local())); - noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(ValueInRegisterFile); + JSValueOperand value(this, node->child1()); + m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node->local())); + m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node->local())); + noResult(node); + recordSetLocal(node->local(), ValueSource(ValueInJSStack)); + + // If we're storing an arguments object that has been optimized away, + // our variable event stream for OSR exit now reflects the optimized + // value (JSValue()). On the slow path, we want an arguments object + // instead. We add an additional move hint to show OSR exit that it + // needs to reconstruct the arguments object. + if (node->child1()->op() == PhantomArguments) + compileMovHint(node); + break; } @@ -1968,54 +2155,54 @@ void SpeculativeJIT::compile(Node& node) case BitAnd: case BitOr: case BitXor: - if (isInt32Constant(node.child1().index())) { - SpeculateIntegerOperand op2(this, node.child2()); + if (isInt32Constant(node->child1().node())) { + SpeculateIntegerOperand op2(this, node->child2()); GPRTemporary result(this, op2); - bitOp(op, valueOfInt32Constant(node.child1().index()), op2.gpr(), result.gpr()); + bitOp(op, valueOfInt32Constant(node->child1().node()), op2.gpr(), result.gpr()); - integerResult(result.gpr(), m_compileIndex); - } else if (isInt32Constant(node.child2().index())) { - SpeculateIntegerOperand op1(this, node.child1()); + integerResult(result.gpr(), node); + } else if (isInt32Constant(node->child2().node())) { + SpeculateIntegerOperand op1(this, node->child1()); GPRTemporary result(this, op1); - bitOp(op, valueOfInt32Constant(node.child2().index()), op1.gpr(), result.gpr()); + bitOp(op, valueOfInt32Constant(node->child2().node()), op1.gpr(), result.gpr()); - integerResult(result.gpr(), m_compileIndex); + integerResult(result.gpr(), node); } else { - SpeculateIntegerOperand op1(this, node.child1()); - SpeculateIntegerOperand op2(this, node.child2()); + SpeculateIntegerOperand op1(this, node->child1()); + SpeculateIntegerOperand op2(this, node->child2()); GPRTemporary result(this, op1, op2); GPRReg reg1 = op1.gpr(); GPRReg reg2 = op2.gpr(); bitOp(op, reg1, reg2, result.gpr()); - integerResult(result.gpr(), m_compileIndex); + integerResult(result.gpr(), node); } break; case BitRShift: case BitLShift: case BitURShift: - if (isInt32Constant(node.child2().index())) { - SpeculateIntegerOperand op1(this, node.child1()); + if (isInt32Constant(node->child2().node())) { + SpeculateIntegerOperand op1(this, node->child1()); GPRTemporary result(this, op1); - shiftOp(op, op1.gpr(), valueOfInt32Constant(node.child2().index()) & 0x1f, result.gpr()); + shiftOp(op, op1.gpr(), valueOfInt32Constant(node->child2().node()) & 0x1f, result.gpr()); - integerResult(result.gpr(), m_compileIndex); + integerResult(result.gpr(), node); } else { // Do not allow shift amount to be used as the result, MacroAssembler does not permit this. - SpeculateIntegerOperand op1(this, node.child1()); - SpeculateIntegerOperand op2(this, node.child2()); + SpeculateIntegerOperand op1(this, node->child1()); + SpeculateIntegerOperand op2(this, node->child2()); GPRTemporary result(this, op1); GPRReg reg1 = op1.gpr(); GPRReg reg2 = op2.gpr(); shiftOp(op, reg1, reg2, result.gpr()); - integerResult(result.gpr(), m_compileIndex); + integerResult(result.gpr(), node); } break; @@ -2034,29 +2221,21 @@ void SpeculativeJIT::compile(Node& node) break; } - case Int32ToDouble: { + case Int32ToDouble: + case ForwardInt32ToDouble: { compileInt32ToDouble(node); break; } - case CheckNumber: { - if (!isNumberPrediction(m_state.forNode(node.child1()).m_type)) { - JSValueOperand op1(this, node.child1()); - JITCompiler::Jump isInteger = m_jit.branch32(MacroAssembler::Equal, op1.tagGPR(), TrustedImm32(JSValue::Int32Tag)); - speculationCheck( - BadType, JSValueRegs(op1.tagGPR(), op1.payloadGPR()), node.child1().index(), - m_jit.branch32(MacroAssembler::AboveOrEqual, op1.tagGPR(), TrustedImm32(JSValue::LowestTag))); - isInteger.link(&m_jit); - } - noResult(m_compileIndex); - break; - } - case ValueAdd: case ArithAdd: compileAdd(node); break; + case MakeRope: + compileMakeRope(node); + break; + case ArithSub: compileArithSub(node); break; @@ -2069,25 +2248,42 @@ void SpeculativeJIT::compile(Node& node) compileArithMul(node); break; + case ArithIMul: + compileArithIMul(node); + break; + case ArithDiv: { - if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) { + switch (node->binaryUseKind()) { + case Int32Use: { #if CPU(X86) compileIntegerArithDivForX86(node); -#else // CPU(X86) -> so non-X86 code follows - ASSERT_NOT_REACHED(); // should have been coverted into a double divide. -#endif // CPU(X86) +#elif CPU(ARM64) + compileIntegerArithDivForARM64(node); +#elif CPU(APPLE_ARMV7S) + compileIntegerArithDivForARMv7s(node); +#else // CPU type without integer divide + RELEASE_ASSERT_NOT_REACHED(); // should have been coverted into a double divide. +#endif + break; + } + + case NumberUse: { + SpeculateDoubleOperand op1(this, node->child1()); + SpeculateDoubleOperand op2(this, node->child2()); + FPRTemporary result(this, op1); + + FPRReg reg1 = op1.fpr(); + FPRReg reg2 = op2.fpr(); + m_jit.divDouble(reg1, reg2, result.fpr()); + + doubleResult(result.fpr(), node); + break; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); break; } - - SpeculateDoubleOperand op1(this, node.child1()); - SpeculateDoubleOperand op2(this, node.child2()); - FPRTemporary result(this, op1); - - FPRReg reg1 = op1.fpr(); - FPRReg reg2 = op2.fpr(); - m_jit.divDouble(reg1, reg2, result.fpr()); - - doubleResult(result.fpr(), m_compileIndex); break; } @@ -2097,8 +2293,9 @@ void SpeculativeJIT::compile(Node& node) } case ArithAbs: { - if (at(node.child1()).shouldSpeculateInteger() && node.canSpeculateInteger()) { - SpeculateIntegerOperand op1(this, node.child1()); + switch (node->child1().useKind()) { + case Int32Use: { + SpeculateIntegerOperand op1(this, node->child1()); GPRTemporary result(this, op1); GPRTemporary scratch(this); @@ -2106,80 +2303,106 @@ void SpeculativeJIT::compile(Node& node) m_jit.rshift32(result.gpr(), MacroAssembler::TrustedImm32(31), scratch.gpr()); m_jit.add32(scratch.gpr(), result.gpr()); m_jit.xor32(scratch.gpr(), result.gpr()); - speculationCheck(Overflow, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31))); - integerResult(result.gpr(), m_compileIndex); + speculationCheck(Overflow, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, result.gpr(), MacroAssembler::TrustedImm32(1 << 31))); + integerResult(result.gpr(), node); break; } - SpeculateDoubleOperand op1(this, node.child1()); - FPRTemporary result(this); - - m_jit.absDouble(op1.fpr(), result.fpr()); - doubleResult(result.fpr(), m_compileIndex); + + case NumberUse: { + SpeculateDoubleOperand op1(this, node->child1()); + FPRTemporary result(this); + + m_jit.absDouble(op1.fpr(), result.fpr()); + doubleResult(result.fpr(), node); + break; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } break; } case ArithMin: case ArithMax: { - if (Node::shouldSpeculateInteger(at(node.child1()), at(node.child2())) && node.canSpeculateInteger()) { - SpeculateStrictInt32Operand op1(this, node.child1()); - SpeculateStrictInt32Operand op2(this, node.child2()); + switch (node->binaryUseKind()) { + case Int32Use: { + SpeculateStrictInt32Operand op1(this, node->child1()); + SpeculateStrictInt32Operand op2(this, node->child2()); GPRTemporary result(this, op1); - - MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1.gpr(), op2.gpr()); - m_jit.move(op2.gpr(), result.gpr()); - if (op1.gpr() != result.gpr()) { + + GPRReg op1GPR = op1.gpr(); + GPRReg op2GPR = op2.gpr(); + GPRReg resultGPR = result.gpr(); + + MacroAssembler::Jump op1Less = m_jit.branch32(op == ArithMin ? MacroAssembler::LessThan : MacroAssembler::GreaterThan, op1GPR, op2GPR); + m_jit.move(op2GPR, resultGPR); + if (op1GPR != resultGPR) { MacroAssembler::Jump done = m_jit.jump(); op1Less.link(&m_jit); - m_jit.move(op1.gpr(), result.gpr()); + m_jit.move(op1GPR, resultGPR); done.link(&m_jit); } else op1Less.link(&m_jit); - integerResult(result.gpr(), m_compileIndex); + integerResult(resultGPR, node); break; } - SpeculateDoubleOperand op1(this, node.child1()); - SpeculateDoubleOperand op2(this, node.child2()); - FPRTemporary result(this, op1); - - MacroAssembler::JumpList done; + case NumberUse: { + SpeculateDoubleOperand op1(this, node->child1()); + SpeculateDoubleOperand op2(this, node->child2()); + FPRTemporary result(this, op1); + + FPRReg op1FPR = op1.fpr(); + FPRReg op2FPR = op2.fpr(); + FPRReg resultFPR = result.fpr(); + + MacroAssembler::JumpList done; - MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1.fpr(), op2.fpr()); + MacroAssembler::Jump op1Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleLessThan : MacroAssembler::DoubleGreaterThan, op1FPR, op2FPR); - // op2 is eather the lesser one or one of then is NaN - MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1.fpr(), op2.fpr()); + // op2 is eather the lesser one or one of then is NaN + MacroAssembler::Jump op2Less = m_jit.branchDouble(op == ArithMin ? MacroAssembler::DoubleGreaterThanOrEqual : MacroAssembler::DoubleLessThanOrEqual, op1FPR, op2FPR); - // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding - // op1 + op2 and putting it into result. - m_jit.addDouble(op1.fpr(), op2.fpr(), result.fpr()); - done.append(m_jit.jump()); + // Unordered case. We don't know which of op1, op2 is NaN. Manufacture NaN by adding + // op1 + op2 and putting it into result. + m_jit.addDouble(op1FPR, op2FPR, resultFPR); + done.append(m_jit.jump()); - op2Less.link(&m_jit); - m_jit.moveDouble(op2.fpr(), result.fpr()); + op2Less.link(&m_jit); + m_jit.moveDouble(op2FPR, resultFPR); - if (op1.fpr() != result.fpr()) { - done.append(m_jit.jump()); + if (op1FPR != resultFPR) { + done.append(m_jit.jump()); - op1Less.link(&m_jit); - m_jit.moveDouble(op1.fpr(), result.fpr()); - } else - op1Less.link(&m_jit); + op1Less.link(&m_jit); + m_jit.moveDouble(op1FPR, resultFPR); + } else + op1Less.link(&m_jit); - done.link(&m_jit); + done.link(&m_jit); - doubleResult(result.fpr(), m_compileIndex); + doubleResult(resultFPR, node); + break; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } break; } case ArithSqrt: { - SpeculateDoubleOperand op1(this, node.child1()); + SpeculateDoubleOperand op1(this, node->child1()); FPRTemporary result(this, op1); m_jit.sqrtDouble(op1.fpr(), result.fpr()); - doubleResult(result.fpr(), m_compileIndex); + doubleResult(result.fpr(), node); break; } @@ -2206,22 +2429,23 @@ void SpeculativeJIT::compile(Node& node) if (compare(node, JITCompiler::GreaterThanOrEqual, JITCompiler::DoubleGreaterThanOrEqual, operationCompareGreaterEq)) return; break; + + case CompareEqConstant: + ASSERT(isNullConstant(node->child2().node())); + if (nonSpeculativeCompareNull(node, node->child1())) + return; + break; case CompareEq: - if (isNullConstant(node.child1().index())) { - if (nonSpeculativeCompareNull(node, node.child2())) - return; - break; - } - if (isNullConstant(node.child2().index())) { - if (nonSpeculativeCompareNull(node, node.child1())) - return; - break; - } if (compare(node, JITCompiler::Equal, JITCompiler::DoubleEqual, operationCompareEq)) return; break; + case CompareStrictEqConstant: + if (compileStrictEqForConstant(node, node->child1(), valueOfJSConstant(node->child2().node()))) + return; + break; + case CompareStrictEq: if (compileStrictEq(node)) return; @@ -2238,15 +2462,32 @@ void SpeculativeJIT::compile(Node& node) break; } + case StringFromCharCode: { + compileFromCharCode(node); + break; + } + + case CheckArray: { + checkArray(node); + break; + } + + case Arrayify: + case ArrayifyToStructure: { + arrayify(node); + break; + } + case GetByVal: { - if (!node.prediction() || !at(node.child1()).prediction() || !at(node.child2()).prediction()) { - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); + switch (node->arrayMode().type()) { + case Array::SelectUsingPredictions: + case Array::ForceExit: + RELEASE_ASSERT_NOT_REACHED(); + terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); break; - } - - if (!at(node.child2()).shouldSpeculateInteger() || !isActionableArrayPrediction(at(node.child1()).prediction())) { - SpeculateCellOperand base(this, node.child1()); // Save a register, speculate cell. We'll probably be right. - JSValueOperand property(this, node.child2()); + case Array::Generic: { + SpeculateCellOperand base(this, node->child1()); // Save a register, speculate cell. We'll probably be right. + JSValueOperand property(this, node->child2()); GPRReg baseGPR = base.gpr(); GPRReg propertyTagGPR = property.tagGPR(); GPRReg propertyPayloadGPR = property.payloadGPR(); @@ -2256,124 +2497,261 @@ void SpeculativeJIT::compile(Node& node) GPRResult resultPayload(this); callOperation(operationGetByValCell, resultTag.gpr(), resultPayload.gpr(), baseGPR, propertyTagGPR, propertyPayloadGPR); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); + jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); break; } + case Array::Int32: + case Array::Contiguous: { + if (node->arrayMode().isInBounds()) { + SpeculateStrictInt32Operand property(this, node->child2()); + StorageOperand storage(this, node->child3()); + + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); + + if (!m_compileOkay) + return; + + speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + + GPRTemporary resultPayload(this); + if (node->arrayMode().type() == Array::Int32) { + speculationCheck( + OutOfBounds, JSValueRegs(), 0, + m_jit.branch32( + MacroAssembler::Equal, + MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), + TrustedImm32(JSValue::EmptyValueTag))); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); + integerResult(resultPayload.gpr(), node); + break; + } + + GPRTemporary resultTag(this); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr()); + speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag))); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); + jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); + break; + } - if (at(node.child1()).prediction() == PredictString) { - compileGetByValOnString(node); + SpeculateCellOperand base(this, node->child1()); + SpeculateStrictInt32Operand property(this, node->child2()); + StorageOperand storage(this, node->child3()); + + GPRReg baseReg = base.gpr(); + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); + if (!m_compileOkay) return; - break; - } - - if (at(node.child1()).shouldSpeculateInt8Array()) { - compileGetByValOnIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateInt16Array()) { - compileGetByValOnIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), node, sizeof(int16_t), isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateInt32Array()) { - compileGetByValOnIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), node, sizeof(int32_t), isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateUint8Array()) { - compileGetByValOnIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), node, sizeof(uint8_t), isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); - if (!m_compileOkay) - return; - break; - } + + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + GPRReg resultTagReg = resultTag.gpr(); + GPRReg resultPayloadReg = resultPayload.gpr(); + + MacroAssembler::JumpList slowCases; - if (at(node.child1()).shouldSpeculateUint8ClampedArray()) { - compileGetByValOnIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), node, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); - if (!m_compileOkay) - return; + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg); + slowCases.append(m_jit.branch32(MacroAssembler::Equal, resultTagReg, TrustedImm32(JSValue::EmptyValueTag))); + + addSlowPathGenerator( + slowPathCall( + slowCases, this, operationGetByValArrayInt, + JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg)); + + jsValueResult(resultTagReg, resultPayloadReg, node); break; } + case Array::Double: { + if (node->arrayMode().isInBounds()) { + if (node->arrayMode().isSaneChain()) { + JSGlobalObject* globalObject = m_jit.globalObjectFor(node->codeOrigin); + ASSERT(globalObject->arrayPrototypeChainIsSane()); + globalObject->arrayPrototype()->structure()->addTransitionWatchpoint(speculationWatchpoint()); + globalObject->objectPrototype()->structure()->addTransitionWatchpoint(speculationWatchpoint()); + } + + SpeculateStrictInt32Operand property(this, node->child2()); + StorageOperand storage(this, node->child3()); + + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); + + if (!m_compileOkay) + return; + + speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + + FPRTemporary result(this); + m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), result.fpr()); + if (!node->arrayMode().isSaneChain()) + speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, result.fpr(), result.fpr())); + doubleResult(result.fpr(), node); + break; + } - if (at(node.child1()).shouldSpeculateUint16Array()) { - compileGetByValOnIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateUint32Array()) { - compileGetByValOnIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), node, sizeof(uint32_t), isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateFloat32Array()) { - compileGetByValOnFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), node, sizeof(float), isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateFloat64Array()) { - compileGetByValOnFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), node, sizeof(double), isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks); + SpeculateCellOperand base(this, node->child1()); + SpeculateStrictInt32Operand property(this, node->child2()); + StorageOperand storage(this, node->child3()); + + GPRReg baseReg = base.gpr(); + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); + if (!m_compileOkay) return; - break; + + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + FPRTemporary temp(this); + GPRReg resultTagReg = resultTag.gpr(); + GPRReg resultPayloadReg = resultPayload.gpr(); + FPRReg tempReg = temp.fpr(); + + MacroAssembler::JumpList slowCases; + + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, Butterfly::offsetOfPublicLength()))); + + m_jit.loadDouble(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight), tempReg); + slowCases.append(m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempReg, tempReg)); + boxDouble(tempReg, resultTagReg, resultPayloadReg); + + addSlowPathGenerator( + slowPathCall( + slowCases, this, operationGetByValArrayInt, + JSValueRegs(resultTagReg, resultPayloadReg), baseReg, propertyReg)); + + jsValueResult(resultTagReg, resultPayloadReg, node); + break; } + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: { + if (node->arrayMode().isInBounds()) { + SpeculateStrictInt32Operand property(this, node->child2()); + StorageOperand storage(this, node->child3()); + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); - ASSERT(at(node.child1()).shouldSpeculateArray()); + if (!m_compileOkay) + return; - SpeculateStrictInt32Operand property(this, node.child2()); - StorageOperand storage(this, node.child3()); - GPRReg propertyReg = property.gpr(); - GPRReg storageReg = storage.gpr(); - - if (!m_compileOkay) - return; + speculationCheck(OutOfBounds, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset()))); + + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr()); + speculationCheck(LoadFromHole, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag))); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); + + jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); + break; + } - // Check that base is an array, and that property is contained within m_vector (< m_vectorLength). - // If we have predicted the base to be type array, we can skip the check. - { - SpeculateCellOperand base(this, node.child1()); + SpeculateCellOperand base(this, node->child1()); + SpeculateStrictInt32Operand property(this, node->child2()); + StorageOperand storage(this, node->child3()); + GPRReg propertyReg = property.gpr(); + GPRReg storageReg = storage.gpr(); GPRReg baseReg = base.gpr(); - if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) - speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); - speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset()))); - } - GPRTemporary resultTag(this); - GPRTemporary resultPayload(this); + if (!m_compileOkay) + return; + + GPRTemporary resultTag(this); + GPRTemporary resultPayload(this); + GPRReg resultTagReg = resultTag.gpr(); + GPRReg resultPayloadReg = resultPayload.gpr(); - // FIXME: In cases where there are subsequent by_val accesses to the same base it might help to cache - // the storage pointer - especially if there happens to be another register free right now. If we do so, - // then we'll need to allocate a new temporary for result. - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag.gpr()); - speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag))); - m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload.gpr()); + JITCompiler::Jump outOfBounds = m_jit.branch32( + MacroAssembler::AboveOrEqual, propertyReg, + MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTagReg); + JITCompiler::Jump hole = m_jit.branch32( + MacroAssembler::Equal, resultTag.gpr(), TrustedImm32(JSValue::EmptyValueTag)); + m_jit.load32(MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayloadReg); + + JITCompiler::JumpList slowCases; + slowCases.append(outOfBounds); + slowCases.append(hole); + addSlowPathGenerator( + slowPathCall( + slowCases, this, operationGetByValArrayInt, + JSValueRegs(resultTagReg, resultPayloadReg), + baseReg, propertyReg)); + + jsValueResult(resultTagReg, resultPayloadReg, node); + break; + } + case Array::String: + compileGetByValOnString(node); + break; + case Array::Arguments: + compileGetByValOnArguments(node); + break; + case Array::Int8Array: + compileGetByValOnIntTypedArray(m_jit.vm()->int8ArrayDescriptor(), node, sizeof(int8_t), SignedTypedArray); + break; + case Array::Int16Array: + compileGetByValOnIntTypedArray(m_jit.vm()->int16ArrayDescriptor(), node, sizeof(int16_t), SignedTypedArray); + break; + case Array::Int32Array: + compileGetByValOnIntTypedArray(m_jit.vm()->int32ArrayDescriptor(), node, sizeof(int32_t), SignedTypedArray); + break; + case Array::Uint8Array: + compileGetByValOnIntTypedArray(m_jit.vm()->uint8ArrayDescriptor(), node, sizeof(uint8_t), UnsignedTypedArray); + break; + case Array::Uint8ClampedArray: + compileGetByValOnIntTypedArray(m_jit.vm()->uint8ClampedArrayDescriptor(), node, sizeof(uint8_t), UnsignedTypedArray); + break; + case Array::Uint16Array: + compileGetByValOnIntTypedArray(m_jit.vm()->uint16ArrayDescriptor(), node, sizeof(uint16_t), UnsignedTypedArray); + break; + case Array::Uint32Array: + compileGetByValOnIntTypedArray(m_jit.vm()->uint32ArrayDescriptor(), node, sizeof(uint32_t), UnsignedTypedArray); + break; + case Array::Float32Array: + compileGetByValOnFloatTypedArray(m_jit.vm()->float32ArrayDescriptor(), node, sizeof(float)); + break; + case Array::Float64Array: + compileGetByValOnFloatTypedArray(m_jit.vm()->float64ArrayDescriptor(), node, sizeof(double)); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } break; } - case PutByVal: { - if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) { - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); + case PutByVal: + case PutByValAlias: { + Edge child1 = m_jit.graph().varArgChild(node, 0); + Edge child2 = m_jit.graph().varArgChild(node, 1); + Edge child3 = m_jit.graph().varArgChild(node, 2); + Edge child4 = m_jit.graph().varArgChild(node, 3); + + ArrayMode arrayMode = node->arrayMode().modeForPut(); + bool alreadyHandled = false; + + switch (arrayMode.type()) { + case Array::SelectUsingPredictions: + case Array::ForceExit: + RELEASE_ASSERT_NOT_REACHED(); + terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); + alreadyHandled = true; break; - } - - if (!at(node.child2()).shouldSpeculateInteger() || !isActionableMutableArrayPrediction(at(node.child1()).prediction())) { - SpeculateCellOperand base(this, node.child1()); // Save a register, speculate cell. We'll probably be right. - JSValueOperand property(this, node.child2()); - JSValueOperand value(this, node.child3()); + case Array::Generic: { + ASSERT(node->op() == PutByVal); + + SpeculateCellOperand base(this, child1); // Save a register, speculate cell. We'll probably be right. + JSValueOperand property(this, child2); + JSValueOperand value(this, child3); GPRReg baseGPR = base.gpr(); GPRReg propertyTagGPR = property.tagGPR(); GPRReg propertyPayloadGPR = property.payloadGPR(); @@ -2383,233 +2761,186 @@ void SpeculativeJIT::compile(Node& node) flushRegisters(); callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValCellStrict : operationPutByValCellNonStrict, baseGPR, propertyTagGPR, propertyPayloadGPR, valueTagGPR, valuePayloadGPR); - noResult(m_compileIndex); + noResult(node); + alreadyHandled = true; break; } - - SpeculateCellOperand base(this, node.child1()); - SpeculateStrictInt32Operand property(this, node.child2()); - if (at(node.child1()).shouldSpeculateInt8Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateInt16Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateInt32Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateUint8Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateUint8ClampedArray()) { - compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding); - if (!m_compileOkay) - return; + default: break; } - - if (at(node.child1()).shouldSpeculateUint16Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); - if (!m_compileOkay) - return; - break; - } - if (at(node.child1()).shouldSpeculateUint32Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray); - if (!m_compileOkay) - return; - break; - } + if (alreadyHandled) + break; - if (at(node.child1()).shouldSpeculateFloat32Array()) { - compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks); - if (!m_compileOkay) - return; - break; - } + SpeculateCellOperand base(this, child1); + SpeculateStrictInt32Operand property(this, child2); - if (at(node.child1()).shouldSpeculateFloat64Array()) { - compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks); - if (!m_compileOkay) - return; - break; - } - - ASSERT(at(node.child1()).shouldSpeculateArray()); - - JSValueOperand value(this, node.child3()); - GPRTemporary scratch(this); - - // Map base, property & value into registers, allocate a scratch register. GPRReg baseReg = base.gpr(); GPRReg propertyReg = property.gpr(); - GPRReg valueTagReg = value.tagGPR(); - GPRReg valuePayloadReg = value.payloadGPR(); - GPRReg scratchReg = scratch.gpr(); - - if (!m_compileOkay) - return; - - writeBarrier(baseReg, valueTagReg, node.child3(), WriteBarrierForPropertyAccess, scratchReg); - - // Check that base is an array, and that property is contained within m_vector (< m_vectorLength). - // If we have predicted the base to be type array, we can skip the check. - if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) - speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); - - base.use(); - property.use(); - value.use(); - - MacroAssembler::Jump withinArrayBounds = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(baseReg, JSArray::vectorLengthOffset())); - - // Code to handle put beyond array bounds. - silentSpillAllRegisters(scratchReg); - callOperation(m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, baseReg, propertyReg, valueTagReg, valuePayloadReg); - silentFillAllRegisters(scratchReg); - JITCompiler::Jump wasBeyondArrayBounds = m_jit.jump(); - - withinArrayBounds.link(&m_jit); - - // Get the array storage. - GPRReg storageReg = scratchReg; - m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg); - - // Check if we're writing to a hole; if so increment m_numValuesInVector. - MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); - m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - // If we're writing to a hole we might be growing the array; - MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length))); - m_jit.add32(TrustedImm32(1), propertyReg); - m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, OBJECT_OFFSETOF(ArrayStorage, m_length))); - m_jit.sub32(TrustedImm32(1), propertyReg); - - lengthDoesNotNeedUpdate.link(&m_jit); - notHoleValue.link(&m_jit); - - // Store the value to the array. - m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - - wasBeyondArrayBounds.link(&m_jit); - - noResult(m_compileIndex, UseChildrenCalledExplicitly); - break; - } + switch (arrayMode.type()) { + case Array::Int32: { + SpeculateIntegerOperand value(this, child3); - case PutByValAlias: { - if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) { - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); - break; - } + GPRReg valuePayloadReg = value.gpr(); - ASSERT(isActionableMutableArrayPrediction(at(node.child1()).prediction())); - ASSERT(at(node.child2()).shouldSpeculateInteger()); - - SpeculateCellOperand base(this, node.child1()); - SpeculateStrictInt32Operand property(this, node.child2()); - - if (at(node.child1()).shouldSpeculateInt8Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), NoTypedArraySpecCheck, SignedTypedArray); if (!m_compileOkay) return; - break; - } - - if (at(node.child1()).shouldSpeculateInt16Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), NoTypedArraySpecCheck, SignedTypedArray); - if (!m_compileOkay) - return; - break; + + compileContiguousPutByVal(node, base, property, value, valuePayloadReg, TrustedImm32(JSValue::Int32Tag)); + break; } + case Array::Contiguous: { + JSValueOperand value(this, child3); + + GPRReg valueTagReg = value.tagGPR(); + GPRReg valuePayloadReg = value.payloadGPR(); - if (at(node.child1()).shouldSpeculateInt32Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), NoTypedArraySpecCheck, SignedTypedArray); if (!m_compileOkay) return; - break; - } - if (at(node.child1()).shouldSpeculateUint8Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray); - if (!m_compileOkay) - return; - break; + if (Heap::isWriteBarrierEnabled()) { + GPRTemporary scratch(this); + writeBarrier(baseReg, valueTagReg, child3, WriteBarrierForPropertyAccess, scratch.gpr()); + } + + compileContiguousPutByVal(node, base, property, value, valuePayloadReg, valueTagReg); + break; } - - if (at(node.child1()).shouldSpeculateUint8ClampedArray()) { - compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray, ClampRounding); - if (!m_compileOkay) - return; + case Array::Double: { + compileDoublePutByVal(node, base, property); break; } + case Array::ArrayStorage: + case Array::SlowPutArrayStorage: { + JSValueOperand value(this, child3); - if (at(node.child1()).shouldSpeculateUint16Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), NoTypedArraySpecCheck, UnsignedTypedArray); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateUint32Array()) { - compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), NoTypedArraySpecCheck, UnsignedTypedArray); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateFloat32Array()) { - compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), NoTypedArraySpecCheck); - if (!m_compileOkay) - return; - break; - } - - if (at(node.child1()).shouldSpeculateFloat64Array()) { - compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), NoTypedArraySpecCheck); + GPRReg valueTagReg = value.tagGPR(); + GPRReg valuePayloadReg = value.payloadGPR(); + if (!m_compileOkay) return; - break; - } - - ASSERT(at(node.child1()).shouldSpeculateArray()); + + { + GPRTemporary scratch(this); + GPRReg scratchReg = scratch.gpr(); + writeBarrier(baseReg, valueTagReg, child3, WriteBarrierForPropertyAccess, scratchReg); + } + + StorageOperand storage(this, child4); + GPRReg storageReg = storage.gpr(); + + if (node->op() == PutByValAlias) { + // Store the value to the array. + GPRReg propertyReg = property.gpr(); + m_jit.store32(value.tagGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(value.payloadGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + + noResult(node); + break; + } - JSValueOperand value(this, node.child3()); - GPRTemporary scratch(this, base); - - GPRReg baseReg = base.gpr(); - GPRReg scratchReg = scratch.gpr(); + MacroAssembler::JumpList slowCases; - writeBarrier(baseReg, value.tagGPR(), node.child3(), WriteBarrierForPropertyAccess, scratchReg); + MacroAssembler::Jump beyondArrayBounds = m_jit.branch32(MacroAssembler::AboveOrEqual, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::vectorLengthOffset())); + if (!arrayMode.isOutOfBounds()) + speculationCheck(OutOfBounds, JSValueRegs(), 0, beyondArrayBounds); + else + slowCases.append(beyondArrayBounds); - // Get the array storage. - GPRReg storageReg = scratchReg; - m_jit.loadPtr(MacroAssembler::Address(baseReg, JSArray::storageOffset()), storageReg); + // Check if we're writing to a hole; if so increment m_numValuesInVector. + if (arrayMode.isInBounds()) { + speculationCheck( + StoreToHole, JSValueRegs(), 0, + m_jit.branch32(MacroAssembler::Equal, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag))); + } else { + MacroAssembler::Jump notHoleValue = m_jit.branch32(MacroAssembler::NotEqual, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); + if (arrayMode.isSlowPut()) { + // This is sort of strange. If we wanted to optimize this code path, we would invert + // the above branch. But it's simply not worth it since this only happens if we're + // already having a bad time. + slowCases.append(m_jit.jump()); + } else { + m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageReg, ArrayStorage::numValuesInVectorOffset())); + + // If we're writing to a hole we might be growing the array; + MacroAssembler::Jump lengthDoesNotNeedUpdate = m_jit.branch32(MacroAssembler::Below, propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + m_jit.add32(TrustedImm32(1), propertyReg); + m_jit.store32(propertyReg, MacroAssembler::Address(storageReg, ArrayStorage::lengthOffset())); + m_jit.sub32(TrustedImm32(1), propertyReg); + + lengthDoesNotNeedUpdate.link(&m_jit); + } + notHoleValue.link(&m_jit); + } + + // Store the value to the array. + m_jit.store32(valueTagReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadReg, MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - // Store the value to the array. - GPRReg propertyReg = property.gpr(); - m_jit.store32(value.tagGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - m_jit.store32(value.payloadGPR(), MacroAssembler::BaseIndex(storageReg, propertyReg, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + base.use(); + property.use(); + value.use(); + storage.use(); + + if (!slowCases.empty()) { + addSlowPathGenerator( + slowPathCall( + slowCases, this, + m_jit.codeBlock()->isStrictMode() ? operationPutByValBeyondArrayBoundsStrict : operationPutByValBeyondArrayBoundsNonStrict, + NoResult, baseReg, propertyReg, valueTagReg, valuePayloadReg)); + } - noResult(m_compileIndex); + noResult(node, UseChildrenCalledExplicitly); + break; + } + + case Array::Arguments: + // FIXME: we could at some point make this work. Right now we're assuming that the register + // pressure would be too great. + RELEASE_ASSERT_NOT_REACHED(); + break; + + case Array::Int8Array: + compilePutByValForIntTypedArray(m_jit.vm()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), SignedTypedArray); + break; + + case Array::Int16Array: + compilePutByValForIntTypedArray(m_jit.vm()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), SignedTypedArray); + break; + + case Array::Int32Array: + compilePutByValForIntTypedArray(m_jit.vm()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), SignedTypedArray); + break; + + case Array::Uint8Array: + compilePutByValForIntTypedArray(m_jit.vm()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), UnsignedTypedArray); + break; + + case Array::Uint8ClampedArray: + compilePutByValForIntTypedArray(m_jit.vm()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), UnsignedTypedArray, ClampRounding); + break; + + case Array::Uint16Array: + compilePutByValForIntTypedArray(m_jit.vm()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), UnsignedTypedArray); + break; + + case Array::Uint32Array: + compilePutByValForIntTypedArray(m_jit.vm()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), UnsignedTypedArray); + break; + + case Array::Float32Array: + compilePutByValForFloatTypedArray(m_jit.vm()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float)); + break; + + case Array::Float64Array: + compilePutByValForFloatTypedArray(m_jit.vm()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double)); + break; + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } break; } @@ -2617,9 +2948,9 @@ void SpeculativeJIT::compile(Node& node) if (compileRegExpExec(node)) return; - if (!node.adjustedRefCount()) { - SpeculateCellOperand base(this, node.child1()); - SpeculateCellOperand argument(this, node.child2()); + if (!node->adjustedRefCount()) { + SpeculateCellOperand base(this, node->child1()); + SpeculateCellOperand argument(this, node->child2()); GPRReg baseGPR = base.gpr(); GPRReg argumentGPR = argument.gpr(); @@ -2629,12 +2960,12 @@ void SpeculativeJIT::compile(Node& node) // Must use jsValueResult because otherwise we screw up register // allocation, which thinks that this node has a result. - booleanResult(result.gpr(), m_compileIndex); + booleanResult(result.gpr(), node); break; } - SpeculateCellOperand base(this, node.child1()); - SpeculateCellOperand argument(this, node.child2()); + SpeculateCellOperand base(this, node->child1()); + SpeculateCellOperand argument(this, node->child2()); GPRReg baseGPR = base.gpr(); GPRReg argumentGPR = argument.gpr(); @@ -2643,13 +2974,13 @@ void SpeculativeJIT::compile(Node& node) GPRResult resultPayload(this); callOperation(operationRegExpExec, resultTag.gpr(), resultPayload.gpr(), baseGPR, argumentGPR); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); + jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); break; } case RegExpTest: { - SpeculateCellOperand base(this, node.child1()); - SpeculateCellOperand argument(this, node.child2()); + SpeculateCellOperand base(this, node->child1()); + SpeculateCellOperand argument(this, node->child2()); GPRReg baseGPR = base.gpr(); GPRReg argumentGPR = argument.gpr(); @@ -2658,144 +2989,276 @@ void SpeculativeJIT::compile(Node& node) callOperation(operationRegExpTest, result.gpr(), baseGPR, argumentGPR); // If we add a DataFormatBool, we should use it here. - booleanResult(result.gpr(), m_compileIndex); + booleanResult(result.gpr(), node); break; } case ArrayPush: { - SpeculateCellOperand base(this, node.child1()); - JSValueOperand value(this, node.child2()); - GPRTemporary storage(this); + ASSERT(node->arrayMode().isJSArray()); + + SpeculateCellOperand base(this, node->child1()); GPRTemporary storageLength(this); GPRReg baseGPR = base.gpr(); - GPRReg valueTagGPR = value.tagGPR(); - GPRReg valuePayloadGPR = value.payloadGPR(); - GPRReg storageGPR = storage.gpr(); GPRReg storageLengthGPR = storageLength.gpr(); - writeBarrier(baseGPR, valueTagGPR, node.child2(), WriteBarrierForPropertyAccess, storageGPR, storageLengthGPR); - - if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) - speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); + StorageOperand storage(this, node->child3()); + GPRReg storageGPR = storage.gpr(); - m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR); - m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR); + switch (node->arrayMode().type()) { + case Array::Int32: { + SpeculateIntegerOperand value(this, node->child2()); + GPRReg valuePayloadGPR = value.gpr(); + + m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); + MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); + m_jit.store32(TrustedImm32(JSValue::Int32Tag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + m_jit.add32(TrustedImm32(1), storageLengthGPR); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); + + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationArrayPush, + JSValueRegs(storageGPR, storageLengthGPR), + TrustedImm32(JSValue::Int32Tag), valuePayloadGPR, baseGPR)); - // Refuse to handle bizarre lengths. - speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe))); + jsValueResult(storageGPR, storageLengthGPR, node); + break; + } + + case Array::Contiguous: { + JSValueOperand value(this, node->child2()); + GPRReg valueTagGPR = value.tagGPR(); + GPRReg valuePayloadGPR = value.payloadGPR(); + + if (Heap::isWriteBarrierEnabled()) { + GPRTemporary scratch(this); + writeBarrier(baseGPR, valueTagGPR, node->child2(), WriteBarrierForPropertyAccess, scratch.gpr(), storageLengthGPR); + } + + m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); + MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); + m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + m_jit.add32(TrustedImm32(1), storageLengthGPR); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); + + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationArrayPush, + JSValueRegs(storageGPR, storageLengthGPR), + valueTagGPR, valuePayloadGPR, baseGPR)); - MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset())); + jsValueResult(storageGPR, storageLengthGPR, node); + break; + } + + case Array::Double: { + SpeculateDoubleOperand value(this, node->child2()); + FPRReg valueFPR = value.fpr(); + + DFG_TYPE_CHECK( + JSValueRegs(), node->child2(), SpecRealNumber, + m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, valueFPR, valueFPR)); + + m_jit.load32(MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), storageLengthGPR); + MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); + m_jit.storeDouble(valueFPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight)); + m_jit.add32(TrustedImm32(1), storageLengthGPR); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); + + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationArrayPushDouble, + JSValueRegs(storageGPR, storageLengthGPR), + valueFPR, baseGPR)); - m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + jsValueResult(storageGPR, storageLengthGPR, node); + break; + } + + case Array::ArrayStorage: { + JSValueOperand value(this, node->child2()); + GPRReg valueTagGPR = value.tagGPR(); + GPRReg valuePayloadGPR = value.payloadGPR(); + + if (Heap::isWriteBarrierEnabled()) { + GPRTemporary scratch(this); + writeBarrier(baseGPR, valueTagGPR, node->child2(), WriteBarrierForPropertyAccess, scratch.gpr(), storageLengthGPR); + } + + m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); - m_jit.add32(TrustedImm32(1), storageLengthGPR); - m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length))); - m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); + // Refuse to handle bizarre lengths. + speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branch32(MacroAssembler::Above, storageLengthGPR, TrustedImm32(0x7ffffffe))); - MacroAssembler::Jump done = m_jit.jump(); + MacroAssembler::Jump slowPath = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); - slowPath.link(&m_jit); + m_jit.store32(valueTagGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(valuePayloadGPR, MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - silentSpillAllRegisters(storageGPR, storageLengthGPR); - callOperation(operationArrayPush, storageGPR, storageLengthGPR, valueTagGPR, valuePayloadGPR, baseGPR); - silentFillAllRegisters(storageGPR, storageLengthGPR); + m_jit.add32(TrustedImm32(1), storageLengthGPR); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); + m_jit.add32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); + m_jit.move(TrustedImm32(JSValue::Int32Tag), storageGPR); - done.link(&m_jit); + addSlowPathGenerator(slowPathCall(slowPath, this, operationArrayPush, JSValueRegs(storageGPR, storageLengthGPR), valueTagGPR, valuePayloadGPR, baseGPR)); - jsValueResult(storageGPR, storageLengthGPR, m_compileIndex); + jsValueResult(storageGPR, storageLengthGPR, node); + break; + } + + default: + CRASH(); + break; + } break; } case ArrayPop: { - SpeculateCellOperand base(this, node.child1()); + ASSERT(node->arrayMode().isJSArray()); + + SpeculateCellOperand base(this, node->child1()); + StorageOperand storage(this, node->child2()); GPRTemporary valueTag(this); GPRTemporary valuePayload(this); - GPRTemporary storage(this); - GPRTemporary storageLength(this); GPRReg baseGPR = base.gpr(); GPRReg valueTagGPR = valueTag.gpr(); GPRReg valuePayloadGPR = valuePayload.gpr(); GPRReg storageGPR = storage.gpr(); - GPRReg storageLengthGPR = storageLength.gpr(); - - if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) - speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); - m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), storageGPR); - m_jit.load32(MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), storageLengthGPR); + switch (node->arrayMode().type()) { + case Array::Int32: + case Array::Contiguous: { + m_jit.load32( + MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valuePayloadGPR); + MacroAssembler::Jump undefinedCase = + m_jit.branchTest32(MacroAssembler::Zero, valuePayloadGPR); + m_jit.sub32(TrustedImm32(1), valuePayloadGPR); + m_jit.store32( + valuePayloadGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.load32( + MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), + valueTagGPR); + MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); + m_jit.store32( + MacroAssembler::TrustedImm32(JSValue::EmptyValueTag), + MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.load32( + MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), + valuePayloadGPR); + + addSlowPathGenerator( + slowPathMove( + undefinedCase, this, + MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, + MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); + addSlowPathGenerator( + slowPathCall( + slowCase, this, operationArrayPopAndRecoverLength, + JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); + + jsValueResult(valueTagGPR, valuePayloadGPR, node); + break; + } + + case Array::Double: { + FPRTemporary temp(this); + FPRReg tempFPR = temp.fpr(); + + m_jit.load32( + MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength()), valuePayloadGPR); + MacroAssembler::Jump undefinedCase = + m_jit.branchTest32(MacroAssembler::Zero, valuePayloadGPR); + m_jit.sub32(TrustedImm32(1), valuePayloadGPR); + m_jit.store32( + valuePayloadGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.loadDouble( + MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight), + tempFPR); + MacroAssembler::Jump slowCase = m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, tempFPR, tempFPR); + JSValue nan = JSValue(JSValue::EncodeAsDouble, QNaN); + m_jit.store32( + MacroAssembler::TrustedImm32(nan.u.asBits.tag), + MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32( + MacroAssembler::TrustedImm32(nan.u.asBits.payload), + MacroAssembler::BaseIndex(storageGPR, valuePayloadGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + boxDouble(tempFPR, valueTagGPR, valuePayloadGPR); + + addSlowPathGenerator( + slowPathMove( + undefinedCase, this, + MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, + MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); + addSlowPathGenerator( + slowPathCall( + slowCase, this, operationArrayPopAndRecoverLength, + JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); + + jsValueResult(valueTagGPR, valuePayloadGPR, node); + break; + } + + case Array::ArrayStorage: { + GPRTemporary storageLength(this); + GPRReg storageLengthGPR = storageLength.gpr(); + + m_jit.load32(MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset()), storageLengthGPR); - MacroAssembler::Jump emptyArrayCase = m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR); + JITCompiler::JumpList setUndefinedCases; + setUndefinedCases.append(m_jit.branchTest32(MacroAssembler::Zero, storageLengthGPR)); - m_jit.sub32(TrustedImm32(1), storageLengthGPR); + m_jit.sub32(TrustedImm32(1), storageLengthGPR); - MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(baseGPR, JSArray::vectorLengthOffset())); + MacroAssembler::Jump slowCase = m_jit.branch32(MacroAssembler::AboveOrEqual, storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::vectorLengthOffset())); - m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTagGPR); - m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), valuePayloadGPR); + m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTagGPR); + m_jit.load32(MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), valuePayloadGPR); - m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_length))); + m_jit.store32(storageLengthGPR, MacroAssembler::Address(storageGPR, ArrayStorage::lengthOffset())); - MacroAssembler::Jump holeCase = m_jit.branch32(MacroAssembler::Equal, TrustedImm32(JSValue::EmptyValueTag), valueTagGPR); + setUndefinedCases.append(m_jit.branch32(MacroAssembler::Equal, TrustedImm32(JSValue::EmptyValueTag), valueTagGPR)); - m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(TrustedImm32(JSValue::EmptyValueTag), MacroAssembler::BaseIndex(storageGPR, storageLengthGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - m_jit.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); + m_jit.sub32(TrustedImm32(1), MacroAssembler::Address(storageGPR, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector))); - MacroAssembler::JumpList done; + addSlowPathGenerator( + slowPathMove( + setUndefinedCases, this, + MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR, + MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR)); - done.append(m_jit.jump()); - - holeCase.link(&m_jit); - emptyArrayCase.link(&m_jit); - m_jit.move(MacroAssembler::TrustedImm32(jsUndefined().tag()), valueTagGPR); - m_jit.move(MacroAssembler::TrustedImm32(jsUndefined().payload()), valuePayloadGPR); - done.append(m_jit.jump()); - - slowCase.link(&m_jit); - - silentSpillAllRegisters(valueTagGPR, valuePayloadGPR); - callOperation(operationArrayPop, valueTagGPR, valuePayloadGPR, baseGPR); - silentFillAllRegisters(valueTagGPR, valuePayloadGPR); - - done.link(&m_jit); - - jsValueResult(valueTagGPR, valuePayloadGPR, m_compileIndex); + addSlowPathGenerator( + slowPathCall( + slowCase, this, operationArrayPop, + JSValueRegs(valueTagGPR, valuePayloadGPR), baseGPR)); + + jsValueResult(valueTagGPR, valuePayloadGPR, node); + break; + } + + default: + CRASH(); + break; + } break; } case DFG::Jump: { - BlockIndex taken = node.takenBlockIndex(); + BlockIndex taken = node->takenBlockIndex(); jump(taken); - noResult(m_compileIndex); + noResult(node); break; } case Branch: - if (isStrictInt32(node.child1().index()) || at(node.child1()).shouldSpeculateInteger()) { - SpeculateIntegerOperand op(this, node.child1()); - - BlockIndex taken = node.takenBlockIndex(); - BlockIndex notTaken = node.notTakenBlockIndex(); - - MacroAssembler::ResultCondition condition = MacroAssembler::NonZero; - - if (taken == (m_block + 1)) { - condition = MacroAssembler::Zero; - BlockIndex tmp = taken; - taken = notTaken; - notTaken = tmp; - } - - branchTest32(condition, op.gpr(), taken); - jump(notTaken); - - noResult(m_compileIndex); - break; - } emitBranch(node); break; @@ -2810,7 +3273,7 @@ void SpeculativeJIT::compile(Node& node) #endif // Return the result in returnValueGPR. - JSValueOperand op1(this, node.child1()); + JSValueOperand op1(this, node->child1()); op1.fill(); if (op1.isDouble()) boxDouble(op1.fpr(), GPRInfo::returnValueGPR2, GPRInfo::returnValueGPR); @@ -2827,14 +3290,14 @@ void SpeculativeJIT::compile(Node& node) } // Grab the return address. - m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::ReturnPC, GPRInfo::regT2); + m_jit.emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, GPRInfo::regT2); // Restore our caller's "r". - m_jit.emitGetFromCallFrameHeaderPtr(RegisterFile::CallerFrame, GPRInfo::callFrameRegister); + m_jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, GPRInfo::callFrameRegister); // Return. m_jit.restoreReturnAddressBeforeReturn(GPRInfo::regT2); m_jit.ret(); - noResult(m_compileIndex); + noResult(node); break; } @@ -2842,28 +3305,13 @@ void SpeculativeJIT::compile(Node& node) case ThrowReferenceError: { // We expect that throw statements are rare and are intended to exit the code block // anyway, so we just OSR back to the old JIT for now. - terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); + terminateSpeculativeExecution(Uncountable, JSValueRegs(), 0); break; } case ToPrimitive: { - if (at(node.child1()).shouldSpeculateInteger()) { - // It's really profitable to speculate integer, since it's really cheap, - // it means we don't have to do any real work, and we emit a lot less code. - - SpeculateIntegerOperand op1(this, node.child1()); - GPRTemporary result(this, op1); - - ASSERT(op1.format() == DataFormatInteger); - m_jit.move(op1.gpr(), result.gpr()); - - integerResult(result.gpr(), m_compileIndex); - break; - } - - // FIXME: Add string speculation here. - - JSValueOperand op1(this, node.child1()); + RELEASE_ASSERT(node->child1().useKind() == UntypedUse); + JSValueOperand op1(this, node->child1()); GPRTemporary resultTag(this, op1); GPRTemporary resultPayload(this, op1, false); @@ -2874,63 +3322,208 @@ void SpeculativeJIT::compile(Node& node) op1.use(); - if (!(m_state.forNode(node.child1()).m_type & ~(PredictNumber | PredictBoolean))) { + if (!(m_state.forNode(node->child1()).m_type & ~(SpecNumber | SpecBoolean))) { m_jit.move(op1TagGPR, resultTagGPR); m_jit.move(op1PayloadGPR, resultPayloadGPR); } else { - MacroAssembler::JumpList alreadyPrimitive; - - alreadyPrimitive.append(m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag))); - alreadyPrimitive.append(m_jit.branchPtr(MacroAssembler::Equal, MacroAssembler::Address(op1PayloadGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info))); - - silentSpillAllRegisters(resultTagGPR, resultPayloadGPR); - callOperation(operationToPrimitive, resultTagGPR, resultPayloadGPR, op1TagGPR, op1PayloadGPR); - silentFillAllRegisters(resultTagGPR, resultPayloadGPR); - - MacroAssembler::Jump done = m_jit.jump(); + MacroAssembler::Jump alreadyPrimitive = m_jit.branch32(MacroAssembler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag)); + MacroAssembler::Jump notPrimitive = m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(op1PayloadGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(m_jit.vm()->stringStructure.get())); alreadyPrimitive.link(&m_jit); m_jit.move(op1TagGPR, resultTagGPR); m_jit.move(op1PayloadGPR, resultPayloadGPR); - done.link(&m_jit); + addSlowPathGenerator( + slowPathCall( + notPrimitive, this, operationToPrimitive, + JSValueRegs(resultTagGPR, resultPayloadGPR), op1TagGPR, op1PayloadGPR)); } - jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); + jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); + break; + } + + case ToString: { + if (node->child1().useKind() == UntypedUse) { + JSValueOperand op1(this, node->child1()); + GPRReg op1PayloadGPR = op1.payloadGPR(); + GPRReg op1TagGPR = op1.tagGPR(); + + GPRResult result(this); + GPRReg resultGPR = result.gpr(); + + flushRegisters(); + + JITCompiler::Jump done; + if (node->child1()->prediction() & SpecString) { + JITCompiler::Jump slowPath1 = m_jit.branch32( + JITCompiler::NotEqual, op1TagGPR, TrustedImm32(JSValue::CellTag)); + JITCompiler::Jump slowPath2 = m_jit.branchPtr( + JITCompiler::NotEqual, + JITCompiler::Address(op1PayloadGPR, JSCell::structureOffset()), + TrustedImmPtr(m_jit.vm()->stringStructure.get())); + m_jit.move(op1PayloadGPR, resultGPR); + done = m_jit.jump(); + slowPath1.link(&m_jit); + slowPath2.link(&m_jit); + } + callOperation(operationToString, resultGPR, op1TagGPR, op1PayloadGPR); + if (done.isSet()) + done.link(&m_jit); + cellResult(resultGPR, node); + break; + } + + compileToStringOnCell(node); + break; + } + + case NewStringObject: { + compileNewStringObject(node); break; } - case StrCat: case NewArray: { - // We really don't want to grow the register file just to do a StrCat or NewArray. - // Say we have 50 functions on the stack that all have a StrCat in them that has - // upwards of 10 operands. In the DFG this would mean that each one gets - // some random virtual register, and then to do the StrCat we'd need a second - // span of 10 operands just to have somewhere to copy the 10 operands to, where - // they'd be contiguous and we could easily tell the C code how to find them. - // Ugly! So instead we use the scratchBuffer infrastructure in JSGlobalData. That - // way, those 50 functions will share the same scratchBuffer for offloading their - // StrCat operands. It's about as good as we can do, unless we start doing - // virtual register coalescing to ensure that operands to StrCat get spilled - // in exactly the place where StrCat wants them, or else have the StrCat - // refer to those operands' SetLocal instructions to force them to spill in - // the right place. Basically, any way you cut it, the current approach - // probably has the best balance of performance and sensibility in the sense - // that it does not increase the complexity of the DFG JIT just to make StrCat - // fast and pretty. - - size_t scratchSize = sizeof(EncodedJSValue) * node.numChildren(); - ScratchBuffer* scratchBuffer = m_jit.globalData()->scratchBufferForSize(scratchSize); + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); + if (!globalObject->isHavingABadTime() && !hasArrayStorage(node->indexingType())) { + globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); + + Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()); + ASSERT(structure->indexingType() == node->indexingType()); + ASSERT( + hasUndecided(structure->indexingType()) + || hasInt32(structure->indexingType()) + || hasDouble(structure->indexingType()) + || hasContiguous(structure->indexingType())); + + unsigned numElements = node->numChildren(); + + GPRTemporary result(this); + GPRTemporary storage(this); + + GPRReg resultGPR = result.gpr(); + GPRReg storageGPR = storage.gpr(); + + emitAllocateJSArray(resultGPR, structure, storageGPR, numElements); + + // At this point, one way or another, resultGPR and storageGPR have pointers to + // the JSArray and the Butterfly, respectively. + + ASSERT(!hasUndecided(structure->indexingType()) || !node->numChildren()); + + for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) { + Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]; + switch (node->indexingType()) { + case ALL_BLANK_INDEXING_TYPES: + case ALL_UNDECIDED_INDEXING_TYPES: + CRASH(); + break; + case ALL_DOUBLE_INDEXING_TYPES: { + SpeculateDoubleOperand operand(this, use); + FPRReg opFPR = operand.fpr(); + DFG_TYPE_CHECK( + JSValueRegs(), use, SpecRealNumber, + m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR)); + + m_jit.storeDouble(opFPR, MacroAssembler::Address(storageGPR, sizeof(double) * operandIdx)); + break; + } + case ALL_INT32_INDEXING_TYPES: { + SpeculateIntegerOperand operand(this, use); + m_jit.store32(TrustedImm32(JSValue::Int32Tag), MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(operand.gpr(), MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + break; + } + case ALL_CONTIGUOUS_INDEXING_TYPES: { + JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]); + GPRReg opTagGPR = operand.tagGPR(); + GPRReg opPayloadGPR = operand.payloadGPR(); + m_jit.store32(opTagGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(opPayloadGPR, MacroAssembler::Address(storageGPR, sizeof(JSValue) * operandIdx + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + break; + } + default: + CRASH(); + break; + } + } + + // Yuck, we should *really* have a way of also returning the storageGPR. But + // that's the least of what's wrong with this code. We really shouldn't be + // allocating the array after having computed - and probably spilled to the + // stack - all of the things that will go into the array. The solution to that + // bigger problem will also likely fix the redundancy in reloading the storage + // pointer that we currently have. + + cellResult(resultGPR, node); + break; + } + + if (!node->numChildren()) { + flushRegisters(); + GPRResult result(this); + callOperation( + operationNewEmptyArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())); + cellResult(result.gpr(), node); + break; + } + + size_t scratchSize = sizeof(EncodedJSValue) * node->numChildren(); + ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(scratchSize); EncodedJSValue* buffer = scratchBuffer ? static_cast(scratchBuffer->dataBuffer()) : 0; - for (unsigned operandIdx = 0; operandIdx < node.numChildren(); ++operandIdx) { - JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node.firstChild() + operandIdx]); - GPRReg opTagGPR = operand.tagGPR(); - GPRReg opPayloadGPR = operand.payloadGPR(); - operand.use(); - - m_jit.store32(opTagGPR, reinterpret_cast(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); - m_jit.store32(opPayloadGPR, reinterpret_cast(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); + for (unsigned operandIdx = 0; operandIdx < node->numChildren(); ++operandIdx) { + // Need to perform the speculations that this node promises to perform. If we're + // emitting code here and the indexing type is not array storage then there is + // probably something hilarious going on and we're already failing at all the + // things, but at least we're going to be sound. + Edge use = m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]; + switch (node->indexingType()) { + case ALL_BLANK_INDEXING_TYPES: + case ALL_UNDECIDED_INDEXING_TYPES: + CRASH(); + break; + case ALL_DOUBLE_INDEXING_TYPES: { + SpeculateDoubleOperand operand(this, use); + FPRReg opFPR = operand.fpr(); + DFG_TYPE_CHECK( + JSValueRegs(), use, SpecRealNumber, + m_jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, opFPR, opFPR)); + + m_jit.storeDouble(opFPR, reinterpret_cast(buffer + operandIdx)); + break; + } + case ALL_INT32_INDEXING_TYPES: { + SpeculateIntegerOperand operand(this, use); + GPRReg opGPR = operand.gpr(); + m_jit.store32(TrustedImm32(JSValue::Int32Tag), reinterpret_cast(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); + m_jit.store32(opGPR, reinterpret_cast(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); + break; + } + case ALL_CONTIGUOUS_INDEXING_TYPES: + case ALL_ARRAY_STORAGE_INDEXING_TYPES: { + JSValueOperand operand(this, m_jit.graph().m_varArgChildren[node->firstChild() + operandIdx]); + GPRReg opTagGPR = operand.tagGPR(); + GPRReg opPayloadGPR = operand.payloadGPR(); + + m_jit.store32(opTagGPR, reinterpret_cast(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); + m_jit.store32(opPayloadGPR, reinterpret_cast(buffer + operandIdx) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); + operand.use(); + break; + } + default: + CRASH(); + break; + } + } + + switch (node->indexingType()) { + case ALL_DOUBLE_INDEXING_TYPES: + case ALL_INT32_INDEXING_TYPES: + useChildren(node); + break; + default: + break; } flushRegisters(); @@ -2943,10 +3536,11 @@ void SpeculativeJIT::compile(Node& node) m_jit.storePtr(TrustedImmPtr(scratchSize), scratch.gpr()); } - GPRResult resultPayload(this); - GPRResult2 resultTag(this); + GPRResult result(this); - callOperation(op == StrCat ? operationStrCat : operationNewArray, resultTag.gpr(), resultPayload.gpr(), static_cast(buffer), node.numChildren()); + callOperation( + operationNewArray, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), + static_cast(buffer), node->numChildren()); if (scratchSize) { GPRTemporary scratch(this); @@ -2955,20 +3549,130 @@ void SpeculativeJIT::compile(Node& node) m_jit.storePtr(TrustedImmPtr(0), scratch.gpr()); } - // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag. - cellResult(resultPayload.gpr(), m_compileIndex, UseChildrenCalledExplicitly); + cellResult(result.gpr(), node, UseChildrenCalledExplicitly); break; } + case NewArrayWithSize: { + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); + if (!globalObject->isHavingABadTime() && !hasArrayStorage(node->indexingType())) { + globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); + + SpeculateStrictInt32Operand size(this, node->child1()); + GPRTemporary result(this); + GPRTemporary storage(this); + GPRTemporary scratch(this); + GPRTemporary scratch2(this); + + GPRReg sizeGPR = size.gpr(); + GPRReg resultGPR = result.gpr(); + GPRReg storageGPR = storage.gpr(); + GPRReg scratchGPR = scratch.gpr(); + GPRReg scratch2GPR = scratch2.gpr(); + + MacroAssembler::JumpList slowCases; + slowCases.append(m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX))); + + ASSERT((1 << 3) == sizeof(JSValue)); + m_jit.move(sizeGPR, scratchGPR); + m_jit.lshift32(TrustedImm32(3), scratchGPR); + m_jit.add32(TrustedImm32(sizeof(IndexingHeader)), scratchGPR, resultGPR); + slowCases.append( + emitAllocateBasicStorage(resultGPR, storageGPR)); + m_jit.subPtr(scratchGPR, storageGPR); + Structure* structure = globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()); + emitAllocateJSObject(resultGPR, TrustedImmPtr(structure), storageGPR, scratchGPR, scratch2GPR, slowCases); + + m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfPublicLength())); + m_jit.store32(sizeGPR, MacroAssembler::Address(storageGPR, Butterfly::offsetOfVectorLength())); + + if (hasDouble(node->indexingType())) { + JSValue nan = JSValue(JSValue::EncodeAsDouble, QNaN); + + m_jit.move(sizeGPR, scratchGPR); + MacroAssembler::Jump done = m_jit.branchTest32(MacroAssembler::Zero, scratchGPR); + MacroAssembler::Label loop = m_jit.label(); + m_jit.sub32(TrustedImm32(1), scratchGPR); + m_jit.store32(TrustedImm32(nan.u.asBits.tag), MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag))); + m_jit.store32(TrustedImm32(nan.u.asBits.payload), MacroAssembler::BaseIndex(storageGPR, scratchGPR, MacroAssembler::TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + m_jit.branchTest32(MacroAssembler::NonZero, scratchGPR).linkTo(loop, &m_jit); + done.link(&m_jit); + } + + addSlowPathGenerator(adoptPtr( + new CallArrayAllocatorWithVariableSizeSlowPathGenerator( + slowCases, this, operationNewArrayWithSize, resultGPR, + globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), + globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage), + sizeGPR))); + + cellResult(resultGPR, node); + break; + } + + SpeculateStrictInt32Operand size(this, node->child1()); + GPRReg sizeGPR = size.gpr(); + flushRegisters(); + GPRResult result(this); + GPRReg resultGPR = result.gpr(); + GPRReg structureGPR = selectScratchGPR(sizeGPR); + MacroAssembler::Jump bigLength = m_jit.branch32(MacroAssembler::AboveOrEqual, sizeGPR, TrustedImm32(MIN_SPARSE_ARRAY_INDEX)); + m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType())), structureGPR); + MacroAssembler::Jump done = m_jit.jump(); + bigLength.link(&m_jit); + m_jit.move(TrustedImmPtr(globalObject->arrayStructureForIndexingTypeDuringAllocation(ArrayWithArrayStorage)), structureGPR); + done.link(&m_jit); + callOperation( + operationNewArrayWithSize, resultGPR, structureGPR, sizeGPR); + cellResult(resultGPR, node); + break; + } + case NewArrayBuffer: { + JSGlobalObject* globalObject = m_jit.graph().globalObjectFor(node->codeOrigin); + IndexingType indexingType = node->indexingType(); + if (!globalObject->isHavingABadTime() && !hasArrayStorage(indexingType)) { + globalObject->havingABadTimeWatchpoint()->add(speculationWatchpoint()); + + unsigned numElements = node->numConstants(); + + GPRTemporary result(this); + GPRTemporary storage(this); + + GPRReg resultGPR = result.gpr(); + GPRReg storageGPR = storage.gpr(); + + emitAllocateJSArray(resultGPR, globalObject->arrayStructureForIndexingTypeDuringAllocation(indexingType), storageGPR, numElements); + + if (node->indexingType() == ArrayWithDouble) { + JSValue* data = m_jit.codeBlock()->constantBuffer(node->startConstant()); + for (unsigned index = 0; index < node->numConstants(); ++index) { + union { + int32_t halves[2]; + double value; + } u; + u.value = data[index].asNumber(); + m_jit.store32(Imm32(u.halves[0]), MacroAssembler::Address(storageGPR, sizeof(double) * index)); + m_jit.store32(Imm32(u.halves[1]), MacroAssembler::Address(storageGPR, sizeof(double) * index + sizeof(int32_t))); + } + } else { + int32_t* data = bitwise_cast(m_jit.codeBlock()->constantBuffer(node->startConstant())); + for (unsigned index = 0; index < node->numConstants() * 2; ++index) { + m_jit.store32( + Imm32(data[index]), MacroAssembler::Address(storageGPR, sizeof(int32_t) * index)); + } + } + + cellResult(resultGPR, node); + break; + } + flushRegisters(); - GPRResult resultPayload(this); - GPRResult2 resultTag(this); + GPRResult result(this); - callOperation(operationNewArrayBuffer, resultTag.gpr(), resultPayload.gpr(), node.startConstant(), node.numConstants()); + callOperation(operationNewArrayBuffer, result.gpr(), globalObject->arrayStructureForIndexingTypeDuringAllocation(node->indexingType()), node->startConstant(), node->numConstants()); - // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag. - cellResult(resultPayload.gpr(), m_compileIndex); + cellResult(result.gpr(), node); break; } @@ -2977,56 +3681,17 @@ void SpeculativeJIT::compile(Node& node) GPRResult resultPayload(this); GPRResult2 resultTag(this); - callOperation(operationNewRegexp, resultTag.gpr(), resultPayload.gpr(), m_jit.codeBlock()->regexp(node.regexpIndex())); + callOperation(operationNewRegexp, resultTag.gpr(), resultPayload.gpr(), m_jit.codeBlock()->regexp(node->regexpIndex())); // FIXME: make the callOperation above explicitly return a cell result, or jitAssert the tag is a cell tag. - cellResult(resultPayload.gpr(), m_compileIndex); + cellResult(resultPayload.gpr(), node); break; } case ConvertThis: { - if (isObjectPrediction(m_state.forNode(node.child1()).m_type)) { - SpeculateCellOperand thisValue(this, node.child1()); - GPRTemporary result(this, thisValue); - m_jit.move(thisValue.gpr(), result.gpr()); - cellResult(result.gpr(), m_compileIndex); - break; - } - - if (isOtherPrediction(at(node.child1()).prediction())) { - JSValueOperand thisValue(this, node.child1()); - GPRTemporary scratch(this); - - GPRReg thisValueTagGPR = thisValue.tagGPR(); - GPRReg scratchGPR = scratch.gpr(); - - COMPILE_ASSERT((JSValue::UndefinedTag | 1) == JSValue::NullTag, UndefinedTag_OR_1_EQUALS_NullTag); - m_jit.move(thisValueTagGPR, scratchGPR); - m_jit.or32(TrustedImm32(1), scratchGPR); - // This is hard. It would be better to save the value, but we can't quite do it, - // since this operation does not otherwise get the payload. - speculationCheck(BadType, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::NotEqual, scratchGPR, TrustedImm32(JSValue::NullTag))); - - m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalThisObjectFor(node.codeOrigin)), scratchGPR); - cellResult(scratchGPR, m_compileIndex); - break; - } - - if (isObjectPrediction(at(node.child1()).prediction())) { - SpeculateCellOperand thisValue(this, node.child1()); - GPRReg thisValueGPR = thisValue.gpr(); - - if (!isObjectPrediction(m_state.forNode(node.child1()).m_type)) - speculationCheck(BadType, JSValueSource::unboxedCell(thisValueGPR), node.child1(), m_jit.branchPtr(JITCompiler::Equal, JITCompiler::Address(thisValueGPR, JSCell::classInfoOffset()), JITCompiler::TrustedImmPtr(&JSString::s_info))); - - GPRTemporary result(this, thisValue); - GPRReg resultGPR = result.gpr(); - m_jit.move(thisValueGPR, resultGPR); - cellResult(resultGPR, m_compileIndex); - break; - } - - JSValueOperand thisValue(this, node.child1()); + ASSERT(node->child1().useKind() == UntypedUse); + + JSValueOperand thisValue(this, node->child1()); GPRReg thisValueTagGPR = thisValue.tagGPR(); GPRReg thisValuePayloadGPR = thisValue.payloadGPR(); @@ -3036,7 +3701,7 @@ void SpeculativeJIT::compile(Node& node) GPRResult resultPayload(this); callOperation(operationConvertThis, resultTag.gpr(), resultPayload.gpr(), thisValueTagGPR, thisValuePayloadGPR); - cellResult(resultPayload.gpr(), m_compileIndex); + cellResult(resultPayload.gpr(), node); break; } @@ -3047,195 +3712,215 @@ void SpeculativeJIT::compile(Node& node) // then we speculate because we want to get recompiled if it isn't (since // otherwise we'd start taking slow path a lot). - SpeculateCellOperand proto(this, node.child1()); + SpeculateCellOperand callee(this, node->child1()); GPRTemporary result(this); + GPRTemporary allocator(this); + GPRTemporary structure(this); GPRTemporary scratch(this); - GPRReg protoGPR = proto.gpr(); + GPRReg calleeGPR = callee.gpr(); GPRReg resultGPR = result.gpr(); + GPRReg allocatorGPR = allocator.gpr(); + GPRReg structureGPR = structure.gpr(); GPRReg scratchGPR = scratch.gpr(); - proto.use(); - MacroAssembler::JumpList slowPath; + + m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorGPR); + m_jit.loadPtr(JITCompiler::Address(calleeGPR, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureGPR); + slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, allocatorGPR)); + emitAllocateJSObject(resultGPR, allocatorGPR, structureGPR, TrustedImmPtr(0), scratchGPR, slowPath); + + addSlowPathGenerator(slowPathCall(slowPath, this, operationCreateThis, resultGPR, calleeGPR, node->inlineCapacity())); - // Need to verify that the prototype is an object. If we have reason to believe - // that it's a FinalObject then we speculate on that directly. Otherwise we - // do the slow (structure-based) check. - if (at(node.child1()).shouldSpeculateFinalObject()) { - if (!isFinalObjectPrediction(m_state.forNode(node.child1()).m_type)) - speculationCheck(BadType, JSValueSource::unboxedCell(protoGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(protoGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSFinalObject::s_info))); - } else { - m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSCell::structureOffset()), scratchGPR); - slowPath.append(m_jit.branch8(MacroAssembler::Below, MacroAssembler::Address(scratchGPR, Structure::typeInfoTypeOffset()), MacroAssembler::TrustedImm32(ObjectType))); - } - - // Load the inheritorID (the Structure that objects who have protoGPR as the prototype - // use to refer to that prototype). If the inheritorID is not set, go to slow path. - m_jit.loadPtr(MacroAssembler::Address(protoGPR, JSObject::offsetOfInheritorID()), scratchGPR); - slowPath.append(m_jit.branchTestPtr(MacroAssembler::Zero, scratchGPR)); - - emitAllocateJSFinalObject(scratchGPR, resultGPR, scratchGPR, slowPath); - - MacroAssembler::Jump done = m_jit.jump(); - - slowPath.link(&m_jit); - - silentSpillAllRegisters(resultGPR); - if (node.codeOrigin.inlineCallFrame) - callOperation(operationCreateThisInlined, resultGPR, protoGPR, node.codeOrigin.inlineCallFrame->callee.get()); - else - callOperation(operationCreateThis, resultGPR, protoGPR); - silentFillAllRegisters(resultGPR); - - done.link(&m_jit); - - cellResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly); + cellResult(resultGPR, node); + break; + } + + case AllocationProfileWatchpoint: { + jsCast(node->function())->addAllocationProfileWatchpoint(speculationWatchpoint()); + noResult(node); break; } case NewObject: { GPRTemporary result(this); + GPRTemporary allocator(this); GPRTemporary scratch(this); GPRReg resultGPR = result.gpr(); + GPRReg allocatorGPR = allocator.gpr(); GPRReg scratchGPR = scratch.gpr(); MacroAssembler::JumpList slowPath; - emitAllocateJSFinalObject(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)->emptyObjectStructure()), resultGPR, scratchGPR, slowPath); - - MacroAssembler::Jump done = m_jit.jump(); - - slowPath.link(&m_jit); - - silentSpillAllRegisters(resultGPR); - callOperation(operationNewObject, resultGPR); - silentFillAllRegisters(resultGPR); - - done.link(&m_jit); + Structure* structure = node->structure(); + size_t allocationSize = JSObject::allocationSize(structure->inlineCapacity()); + MarkedAllocator* allocatorPtr = &m_jit.vm()->heap.allocatorForObjectWithoutDestructor(allocationSize); + + m_jit.move(TrustedImmPtr(allocatorPtr), allocatorGPR); + emitAllocateJSObject(resultGPR, allocatorGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR, slowPath); + + addSlowPathGenerator(slowPathCall(slowPath, this, operationNewObject, resultGPR, structure)); - cellResult(resultGPR, m_compileIndex); + cellResult(resultGPR, node); break; } case GetCallee: { GPRTemporary result(this); - m_jit.loadPtr(JITCompiler::addressFor(static_cast(RegisterFile::Callee)), result.gpr()); - cellResult(result.gpr(), m_compileIndex); + m_jit.loadPtr(JITCompiler::payloadFor(static_cast(node->codeOrigin.stackOffset() + static_cast(JSStack::Callee))), result.gpr()); + cellResult(result.gpr(), node); break; } - - case GetScopeChain: { + + case SetCallee: { + SpeculateCellOperand callee(this, node->child1()); + m_jit.storePtr(callee.gpr(), JITCompiler::payloadFor(static_cast(node->codeOrigin.stackOffset() + static_cast(JSStack::Callee)))); + m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), JITCompiler::tagFor(static_cast(node->codeOrigin.stackOffset() + static_cast(JSStack::Callee)))); + noResult(node); + break; + } + + case GetScope: { + SpeculateCellOperand function(this, node->child1()); + GPRTemporary result(this, function); + m_jit.loadPtr(JITCompiler::Address(function.gpr(), JSFunction::offsetOfScopeChain()), result.gpr()); + cellResult(result.gpr(), node); + break; + } + + case GetMyScope: { GPRTemporary result(this); GPRReg resultGPR = result.gpr(); - m_jit.loadPtr(JITCompiler::addressFor(static_cast(RegisterFile::ScopeChain)), resultGPR); - bool checkTopLevel = m_jit.codeBlock()->codeType() == FunctionCode && m_jit.codeBlock()->needsFullScopeChain(); - int skip = node.scopeChainDepth(); - ASSERT(skip || !checkTopLevel); - if (checkTopLevel && skip--) { - JITCompiler::Jump activationNotCreated; - if (checkTopLevel) - activationNotCreated = m_jit.branchTestPtr(JITCompiler::Zero, JITCompiler::addressFor(static_cast(m_jit.codeBlock()->activationRegister()))); - m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR); - activationNotCreated.link(&m_jit); - } - while (skip--) - m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, next)), resultGPR); + m_jit.loadPtr(JITCompiler::payloadFor(static_cast(node->codeOrigin.stackOffset() + static_cast(JSStack::ScopeChain))), resultGPR); + cellResult(resultGPR, node); + break; + } + + case SetMyScope: { + SpeculateCellOperand callee(this, node->child1()); + m_jit.storePtr(callee.gpr(), JITCompiler::payloadFor(static_cast(node->codeOrigin.stackOffset() + static_cast(JSStack::ScopeChain)))); + noResult(node); + break; + } + + case SkipTopScope: { + SpeculateCellOperand scope(this, node->child1()); + GPRTemporary result(this, scope); + GPRReg resultGPR = result.gpr(); + m_jit.move(scope.gpr(), resultGPR); + JITCompiler::Jump activationNotCreated = + m_jit.branchTestPtr( + JITCompiler::Zero, + JITCompiler::payloadFor( + static_cast(m_jit.codeBlock()->activationRegister()))); + m_jit.loadPtr(JITCompiler::Address(resultGPR, JSScope::offsetOfNext()), resultGPR); + activationNotCreated.link(&m_jit); + cellResult(resultGPR, node); + break; + } - m_jit.loadPtr(JITCompiler::Address(resultGPR, OBJECT_OFFSETOF(ScopeChainNode, object)), resultGPR); + case SkipScope: { + SpeculateCellOperand scope(this, node->child1()); + GPRTemporary result(this, scope); + m_jit.loadPtr(JITCompiler::Address(scope.gpr(), JSScope::offsetOfNext()), result.gpr()); + cellResult(result.gpr(), node); + break; + } + + case GetScopeRegisters: { + SpeculateCellOperand scope(this, node->child1()); + GPRTemporary result(this); + GPRReg scopeGPR = scope.gpr(); + GPRReg resultGPR = result.gpr(); - cellResult(resultGPR, m_compileIndex); + m_jit.loadPtr(JITCompiler::Address(scopeGPR, JSVariableObject::offsetOfRegisters()), resultGPR); + storageResult(resultGPR, node); break; } case GetScopedVar: { - SpeculateCellOperand scopeChain(this, node.child1()); + StorageOperand registers(this, node->child1()); GPRTemporary resultTag(this); GPRTemporary resultPayload(this); + GPRReg registersGPR = registers.gpr(); GPRReg resultTagGPR = resultTag.gpr(); GPRReg resultPayloadGPR = resultPayload.gpr(); - m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), resultPayloadGPR); - m_jit.load32(JITCompiler::Address(resultPayloadGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); - m_jit.load32(JITCompiler::Address(resultPayloadGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); - jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex); + m_jit.load32(JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); + m_jit.load32(JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); + jsValueResult(resultTagGPR, resultPayloadGPR, node); break; } case PutScopedVar: { - SpeculateCellOperand scopeChain(this, node.child1()); + SpeculateCellOperand scope(this, node->child1()); + StorageOperand registers(this, node->child2()); + JSValueOperand value(this, node->child3()); GPRTemporary scratchRegister(this); + GPRReg scopeGPR = scope.gpr(); + GPRReg registersGPR = registers.gpr(); + GPRReg valueTagGPR = value.tagGPR(); + GPRReg valuePayloadGPR = value.payloadGPR(); GPRReg scratchGPR = scratchRegister.gpr(); - m_jit.loadPtr(JITCompiler::Address(scopeChain.gpr(), JSVariableObject::offsetOfRegisters()), scratchGPR); - JSValueOperand value(this, node.child2()); - m_jit.store32(value.tagGPR(), JITCompiler::Address(scratchGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); - m_jit.store32(value.payloadGPR(), JITCompiler::Address(scratchGPR, node.varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); - writeBarrier(scopeChain.gpr(), value.tagGPR(), node.child2(), WriteBarrierForVariableAccess, scratchGPR); - noResult(m_compileIndex); + + m_jit.store32(valueTagGPR, JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); + m_jit.store32(valuePayloadGPR, JITCompiler::Address(registersGPR, node->varNumber() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + writeBarrier(scopeGPR, valueTagGPR, node->child2(), WriteBarrierForVariableAccess, scratchGPR); + noResult(node); break; } case GetById: { - if (!node.prediction()) { - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); + if (!node->prediction()) { + terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); break; } - if (isCellPrediction(at(node.child1()).prediction())) { - SpeculateCellOperand base(this, node.child1()); + if (isCellSpeculation(node->child1()->prediction())) { + SpeculateCellOperand base(this, node->child1()); GPRTemporary resultTag(this, base); GPRTemporary resultPayload(this); GPRReg baseGPR = base.gpr(); GPRReg resultTagGPR = resultTag.gpr(); GPRReg resultPayloadGPR = resultPayload.gpr(); - GPRReg scratchGPR; - - if (resultTagGPR == baseGPR) - scratchGPR = resultPayloadGPR; - else - scratchGPR = resultTagGPR; - + base.use(); - cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber()); + cachedGetById(node->codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber()); - jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); + jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); break; } - JSValueOperand base(this, node.child1()); + JSValueOperand base(this, node->child1()); GPRTemporary resultTag(this, base); GPRTemporary resultPayload(this); GPRReg baseTagGPR = base.tagGPR(); GPRReg basePayloadGPR = base.payloadGPR(); GPRReg resultTagGPR = resultTag.gpr(); - GPRReg resultPayloadGPR = resultPayload.gpr(); - GPRReg scratchGPR; - - if (resultTagGPR == basePayloadGPR) - scratchGPR = resultPayloadGPR; - else - scratchGPR = resultTagGPR; + GPRReg resultPayloadGPR = resultPayload.gpr(); base.use(); JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag)); - cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell); + cachedGetById(node->codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell); - jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); + jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); break; } case GetByIdFlush: { - if (!node.prediction()) { - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); + if (!node->prediction()) { + terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); break; } - if (isCellPrediction(at(node.child1()).prediction())) { - SpeculateCellOperand base(this, node.child1()); + switch (node->child1().useKind()) { + case CellUse: { + SpeculateCellOperand base(this, node->child1()); GPRReg baseGPR = base.gpr(); @@ -3244,181 +3929,176 @@ void SpeculativeJIT::compile(Node& node) GPRReg resultTagGPR = resultTag.gpr(); GPRReg resultPayloadGPR = resultPayload.gpr(); - GPRReg scratchGPR = selectScratchGPR(baseGPR, resultTagGPR, resultPayloadGPR); - base.use(); flushRegisters(); - cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill); + cachedGetById(node->codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), JITCompiler::Jump(), DontSpill); - jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); + jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); break; } - JSValueOperand base(this, node.child1()); - GPRReg baseTagGPR = base.tagGPR(); - GPRReg basePayloadGPR = base.payloadGPR(); + case UntypedUse: { + JSValueOperand base(this, node->child1()); + GPRReg baseTagGPR = base.tagGPR(); + GPRReg basePayloadGPR = base.payloadGPR(); - GPRResult resultTag(this); - GPRResult2 resultPayload(this); - GPRReg resultTagGPR = resultTag.gpr(); - GPRReg resultPayloadGPR = resultPayload.gpr(); + GPRResult resultTag(this); + GPRResult2 resultPayload(this); + GPRReg resultTagGPR = resultTag.gpr(); + GPRReg resultPayloadGPR = resultPayload.gpr(); - GPRReg scratchGPR = selectScratchGPR(baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR); - - base.use(); + base.use(); - flushRegisters(); + flushRegisters(); - JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag)); + JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag)); - cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell, DontSpill); + cachedGetById(node->codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node->identifierNumber(), notCell, DontSpill); - jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly); + jsValueResult(resultTagGPR, resultPayloadGPR, node, UseChildrenCalledExplicitly); + break; + } + + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } break; } - case GetArrayLength: { - SpeculateCellOperand base(this, node.child1()); - GPRReg baseGPR = base.gpr(); - - if (!isArrayPrediction(m_state.forNode(node.child1()).m_type)) - speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); - - GPRTemporary result(this); - GPRReg resultGPR = result.gpr(); - - m_jit.loadPtr(MacroAssembler::Address(baseGPR, JSArray::storageOffset()), resultGPR); - m_jit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(ArrayStorage, m_length)), resultGPR); - - speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branch32(MacroAssembler::LessThan, resultGPR, MacroAssembler::TrustedImm32(0))); - - integerResult(resultGPR, m_compileIndex); + case GetArrayLength: + compileGetArrayLength(node); break; - } - - case GetStringLength: { - SpeculateCellOperand base(this, node.child1()); - GPRTemporary result(this); - - GPRReg baseGPR = base.gpr(); - GPRReg resultGPR = result.gpr(); - if (!isStringPrediction(m_state.forNode(node.child1()).m_type)) - speculationCheck(BadType, JSValueSource::unboxedCell(baseGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSString::s_info))); - - m_jit.load32(MacroAssembler::Address(baseGPR, JSString::offsetOfLength()), resultGPR); - - integerResult(resultGPR, m_compileIndex); - break; - } - - case GetInt8ArrayLength: { - compileGetTypedArrayLength(m_jit.globalData()->int8ArrayDescriptor(), node, !isInt8ArrayPrediction(m_state.forNode(node.child1()).m_type)); - break; - } - case GetInt16ArrayLength: { - compileGetTypedArrayLength(m_jit.globalData()->int16ArrayDescriptor(), node, !isInt16ArrayPrediction(m_state.forNode(node.child1()).m_type)); - break; - } - case GetInt32ArrayLength: { - compileGetTypedArrayLength(m_jit.globalData()->int32ArrayDescriptor(), node, !isInt32ArrayPrediction(m_state.forNode(node.child1()).m_type)); - break; - } - case GetUint8ArrayLength: { - compileGetTypedArrayLength(m_jit.globalData()->uint8ArrayDescriptor(), node, !isUint8ArrayPrediction(m_state.forNode(node.child1()).m_type)); - break; - } - case GetUint8ClampedArrayLength: { - compileGetTypedArrayLength(m_jit.globalData()->uint8ClampedArrayDescriptor(), node, !isUint8ClampedArrayPrediction(m_state.forNode(node.child1()).m_type)); - break; - } - case GetUint16ArrayLength: { - compileGetTypedArrayLength(m_jit.globalData()->uint16ArrayDescriptor(), node, !isUint16ArrayPrediction(m_state.forNode(node.child1()).m_type)); - break; - } - case GetUint32ArrayLength: { - compileGetTypedArrayLength(m_jit.globalData()->uint32ArrayDescriptor(), node, !isUint32ArrayPrediction(m_state.forNode(node.child1()).m_type)); - break; - } - case GetFloat32ArrayLength: { - compileGetTypedArrayLength(m_jit.globalData()->float32ArrayDescriptor(), node, !isFloat32ArrayPrediction(m_state.forNode(node.child1()).m_type)); - break; - } - case GetFloat64ArrayLength: { - compileGetTypedArrayLength(m_jit.globalData()->float64ArrayDescriptor(), node, !isFloat64ArrayPrediction(m_state.forNode(node.child1()).m_type)); + case CheckFunction: { + SpeculateCellOperand function(this, node->child1()); + speculationCheck(BadFunction, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node->function())); + noResult(node); break; } - case CheckFunction: { - SpeculateCellOperand function(this, node.child1()); - speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, function.gpr(), node.function())); - noResult(m_compileIndex); + case CheckExecutable: { + SpeculateCellOperand function(this, node->child1()); + speculationCheck(BadExecutable, JSValueSource::unboxedCell(function.gpr()), node->child1(), m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(function.gpr(), JSFunction::offsetOfExecutable()), node->executable())); + noResult(node); break; } - - case CheckStructure: { - if (m_state.forNode(node.child1()).m_structure.isSubsetOf(node.structureSet())) { - noResult(m_compileIndex); - break; - } - SpeculateCellOperand base(this, node.child1()); + case CheckStructure: + case ForwardCheckStructure: { + SpeculateCellOperand base(this, node->child1()); - ASSERT(node.structureSet().size()); + ASSERT(node->structureSet().size()); - if (node.structureSet().size() == 1) - speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, JITCompiler::Address(base.gpr(), JSCell::structureOffset()), node.structureSet()[0])); - else { + if (node->structureSet().size() == 1) { + speculationCheck( + BadCache, JSValueSource::unboxedCell(base.gpr()), 0, + m_jit.branchWeakPtr( + JITCompiler::NotEqual, + JITCompiler::Address(base.gpr(), JSCell::structureOffset()), + node->structureSet()[0])); + } else { GPRTemporary structure(this); m_jit.loadPtr(JITCompiler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr()); JITCompiler::JumpList done; - for (size_t i = 0; i < node.structureSet().size() - 1; ++i) - done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node.structureSet()[i])); + for (size_t i = 0; i < node->structureSet().size() - 1; ++i) + done.append(m_jit.branchWeakPtr(JITCompiler::Equal, structure.gpr(), node->structureSet()[i])); - speculationCheck(BadCache, JSValueRegs(), NoNode, m_jit.branchWeakPtr(JITCompiler::NotEqual, structure.gpr(), node.structureSet().last())); + speculationCheck( + BadCache, JSValueSource::unboxedCell(base.gpr()), 0, + m_jit.branchWeakPtr( + JITCompiler::NotEqual, structure.gpr(), node->structureSet().last())); done.link(&m_jit); } - noResult(m_compileIndex); + noResult(node); + break; + } + + case StructureTransitionWatchpoint: + case ForwardStructureTransitionWatchpoint: { + // There is a fascinating question here of what to do about array profiling. + // We *could* try to tell the OSR exit about where the base of the access is. + // The DFG will have kept it alive, though it may not be in a register, and + // we shouldn't really load it since that could be a waste. For now though, + // we'll just rely on the fact that when a watchpoint fires then that's + // quite a hint already. + + m_jit.addWeakReference(node->structure()); + node->structure()->addTransitionWatchpoint( + speculationWatchpoint( + node->child1()->op() == WeakJSConstant ? BadWeakConstantCache : BadCache)); + +#if !ASSERT_DISABLED + SpeculateCellOperand op1(this, node->child1()); + JITCompiler::Jump isOK = m_jit.branchPtr(JITCompiler::Equal, JITCompiler::Address(op1.gpr(), JSCell::structureOffset()), TrustedImmPtr(node->structure())); + m_jit.breakpoint(); + isOK.link(&m_jit); +#else + speculateCell(node->child1()); +#endif + + noResult(node); break; } + case PhantomPutStructure: { + ASSERT(isKnownCell(node->child1().node())); + ASSERT(node->structureTransitionData().previousStructure->transitionWatchpointSetHasBeenInvalidated()); + m_jit.addWeakReferenceTransition( + node->codeOrigin.codeOriginOwner(), + node->structureTransitionData().previousStructure, + node->structureTransitionData().newStructure); + noResult(node); + break; + } + case PutStructure: { - SpeculateCellOperand base(this, node.child1()); + ASSERT(node->structureTransitionData().previousStructure->transitionWatchpointSetHasBeenInvalidated()); + + SpeculateCellOperand base(this, node->child1()); GPRReg baseGPR = base.gpr(); m_jit.addWeakReferenceTransition( - node.codeOrigin.codeOriginOwner(), - node.structureTransitionData().previousStructure, - node.structureTransitionData().newStructure); + node->codeOrigin.codeOriginOwner(), + node->structureTransitionData().previousStructure, + node->structureTransitionData().newStructure); -#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING) +#if ENABLE(WRITE_BARRIER_PROFILING) // Must always emit this write barrier as the structure transition itself requires it - writeBarrier(baseGPR, node.structureTransitionData().newStructure, WriteBarrierForGenericAccess); + writeBarrier(baseGPR, node->structureTransitionData().newStructure, WriteBarrierForGenericAccess); #endif - m_jit.storePtr(MacroAssembler::TrustedImmPtr(node.structureTransitionData().newStructure), MacroAssembler::Address(baseGPR, JSCell::structureOffset())); + m_jit.storePtr(MacroAssembler::TrustedImmPtr(node->structureTransitionData().newStructure), MacroAssembler::Address(baseGPR, JSCell::structureOffset())); - noResult(m_compileIndex); + noResult(node); break; } - case GetPropertyStorage: { - SpeculateCellOperand base(this, node.child1()); + case AllocatePropertyStorage: + compileAllocatePropertyStorage(node); + break; + + case ReallocatePropertyStorage: + compileReallocatePropertyStorage(node); + break; + + case GetButterfly: { + SpeculateCellOperand base(this, node->child1()); GPRTemporary result(this, base); GPRReg baseGPR = base.gpr(); GPRReg resultGPR = result.gpr(); - m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR); + m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR); - storageResult(resultGPR, m_compileIndex); + storageResult(resultGPR, node); break; } @@ -3428,7 +4108,7 @@ void SpeculativeJIT::compile(Node& node) } case GetByOffset: { - StorageOperand storage(this, node.child1()); + StorageOperand storage(this, node->child1()); GPRTemporary resultTag(this, storage); GPRTemporary resultPayload(this); @@ -3436,42 +4116,42 @@ void SpeculativeJIT::compile(Node& node) GPRReg resultTagGPR = resultTag.gpr(); GPRReg resultPayloadGPR = resultPayload.gpr(); - StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()]; + StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node->storageAccessDataIndex()]; m_jit.load32(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); m_jit.load32(JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); - jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex); + jsValueResult(resultTagGPR, resultPayloadGPR, node); break; } case PutByOffset: { -#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING) - SpeculateCellOperand base(this, node.child1()); +#if ENABLE(WRITE_BARRIER_PROFILING) + SpeculateCellOperand base(this, node->child2()); #endif - StorageOperand storage(this, node.child2()); - JSValueOperand value(this, node.child3()); + StorageOperand storage(this, node->child1()); + JSValueOperand value(this, node->child3()); GPRReg storageGPR = storage.gpr(); GPRReg valueTagGPR = value.tagGPR(); GPRReg valuePayloadGPR = value.payloadGPR(); -#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING) - writeBarrier(base.gpr(), valueTagGPR, node.child3(), WriteBarrierForPropertyAccess); +#if ENABLE(WRITE_BARRIER_PROFILING) + writeBarrier(base.gpr(), valueTagGPR, node->child3(), WriteBarrierForPropertyAccess); #endif - StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node.storageAccessDataIndex()]; + StorageAccessData& storageAccessData = m_jit.graph().m_storageAccessData[node->storageAccessDataIndex()]; m_jit.storePtr(valueTagGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); m_jit.storePtr(valuePayloadGPR, JITCompiler::Address(storageGPR, storageAccessData.offset * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); - noResult(m_compileIndex); + noResult(node); break; } case PutById: { - SpeculateCellOperand base(this, node.child1()); - JSValueOperand value(this, node.child2()); + SpeculateCellOperand base(this, node->child1()); + JSValueOperand value(this, node->child2()); GPRTemporary scratch(this); GPRReg baseGPR = base.gpr(); @@ -3482,15 +4162,15 @@ void SpeculativeJIT::compile(Node& node) base.use(); value.use(); - cachedPutById(node.codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), NotDirect); + cachedPutById(node->codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, node->child2(), scratchGPR, node->identifierNumber(), NotDirect); - noResult(m_compileIndex, UseChildrenCalledExplicitly); + noResult(node, UseChildrenCalledExplicitly); break; } case PutByIdDirect: { - SpeculateCellOperand base(this, node.child1()); - JSValueOperand value(this, node.child2()); + SpeculateCellOperand base(this, node->child1()); + JSValueOperand value(this, node->child2()); GPRTemporary scratch(this); GPRReg baseGPR = base.gpr(); @@ -3501,54 +4181,109 @@ void SpeculativeJIT::compile(Node& node) base.use(); value.use(); - cachedPutById(node.codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, node.child2(), scratchGPR, node.identifierNumber(), Direct); + cachedPutById(node->codeOrigin, baseGPR, valueTagGPR, valuePayloadGPR, node->child2(), scratchGPR, node->identifierNumber(), Direct); - noResult(m_compileIndex, UseChildrenCalledExplicitly); + noResult(node, UseChildrenCalledExplicitly); break; } case GetGlobalVar: { - GPRTemporary result(this); - GPRTemporary scratch(this); + GPRTemporary resultPayload(this); + GPRTemporary resultTag(this); - JSVariableObject* globalObject = m_jit.globalObjectFor(node.codeOrigin); - m_jit.loadPtr(const_cast**>(globalObject->addressOfRegisters()), result.gpr()); - m_jit.load32(JITCompiler::tagForGlobalVar(result.gpr(), node.varNumber()), scratch.gpr()); - m_jit.load32(JITCompiler::payloadForGlobalVar(result.gpr(), node.varNumber()), result.gpr()); + m_jit.move(TrustedImmPtr(node->registerPointer()), resultPayload.gpr()); + m_jit.load32(JITCompiler::Address(resultPayload.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTag.gpr()); + m_jit.load32(JITCompiler::Address(resultPayload.gpr(), OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayload.gpr()); - jsValueResult(scratch.gpr(), result.gpr(), m_compileIndex); + jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); break; } case PutGlobalVar: { - JSValueOperand value(this, node.child1()); - GPRTemporary globalObject(this); - GPRTemporary scratch(this); - - GPRReg globalObjectReg = globalObject.gpr(); - GPRReg scratchReg = scratch.gpr(); + JSValueOperand value(this, node->child1()); + if (Heap::isWriteBarrierEnabled()) { + GPRTemporary scratch(this); + GPRReg scratchReg = scratch.gpr(); + + writeBarrier(m_jit.globalObjectFor(node->codeOrigin), value.tagGPR(), node->child1(), WriteBarrierForVariableAccess, scratchReg); + } - m_jit.move(MacroAssembler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectReg); + // FIXME: if we happen to have a spare register - and _ONLY_ if we happen to have + // a spare register - a good optimization would be to put the register pointer into + // a register and then do a zero offset store followed by a four-offset store (or + // vice-versa depending on endianness). + m_jit.store32(value.tagGPR(), node->registerPointer()->tagPointer()); + m_jit.store32(value.payloadGPR(), node->registerPointer()->payloadPointer()); + + noResult(node); + break; + } - writeBarrier(m_jit.globalObjectFor(node.codeOrigin), value.tagGPR(), node.child1(), WriteBarrierForVariableAccess, scratchReg); + case PutGlobalVarCheck: { + JSValueOperand value(this, node->child1()); + + WatchpointSet* watchpointSet = + m_jit.globalObjectFor(node->codeOrigin)->symbolTable()->get( + identifier(node->identifierNumberForCheck())->impl()).watchpointSet(); + addSlowPathGenerator( + slowPathCall( + m_jit.branchTest8( + JITCompiler::NonZero, + JITCompiler::AbsoluteAddress(watchpointSet->addressOfIsWatched())), + this, operationNotifyGlobalVarWrite, NoResult, watchpointSet)); + + if (Heap::isWriteBarrierEnabled()) { + GPRTemporary scratch(this); + GPRReg scratchReg = scratch.gpr(); + + writeBarrier(m_jit.globalObjectFor(node->codeOrigin), value.tagGPR(), node->child1(), WriteBarrierForVariableAccess, scratchReg); + } - m_jit.loadPtr(MacroAssembler::Address(globalObjectReg, JSVariableObject::offsetOfRegisters()), scratchReg); - m_jit.store32(value.tagGPR(), JITCompiler::tagForGlobalVar(scratchReg, node.varNumber())); - m_jit.store32(value.payloadGPR(), JITCompiler::payloadForGlobalVar(scratchReg, node.varNumber())); + // FIXME: if we happen to have a spare register - and _ONLY_ if we happen to have + // a spare register - a good optimization would be to put the register pointer into + // a register and then do a zero offset store followed by a four-offset store (or + // vice-versa depending on endianness). + m_jit.store32(value.tagGPR(), node->registerPointer()->tagPointer()); + m_jit.store32(value.payloadGPR(), node->registerPointer()->payloadPointer()); - noResult(m_compileIndex); + noResult(node); + break; + } + + case GlobalVarWatchpoint: { + m_jit.globalObjectFor(node->codeOrigin)->symbolTable()->get( + identifier(node->identifierNumberForCheck())->impl()).addWatchpoint( + speculationWatchpoint()); + +#if DFG_ENABLE(JIT_ASSERT) + GPRTemporary scratch(this); + GPRReg scratchGPR = scratch.gpr(); + m_jit.load32(node->registerPointer()->tagPointer(), scratchGPR); + JITCompiler::Jump notOK = m_jit.branch32( + JITCompiler::NotEqual, scratchGPR, + TrustedImm32(node->registerPointer()->get().tag())); + m_jit.load32(node->registerPointer()->payloadPointer(), scratchGPR); + JITCompiler::Jump ok = m_jit.branch32( + JITCompiler::Equal, scratchGPR, + TrustedImm32(node->registerPointer()->get().payload())); + notOK.link(&m_jit); + m_jit.breakpoint(); + ok.link(&m_jit); +#endif + + noResult(node); break; } case CheckHasInstance: { - SpeculateCellOperand base(this, node.child1()); + SpeculateCellOperand base(this, node->child1()); GPRTemporary structure(this); // Speculate that base 'ImplementsDefaultHasInstance'. m_jit.loadPtr(MacroAssembler::Address(base.gpr(), JSCell::structureOffset()), structure.gpr()); - speculationCheck(Uncountable, JSValueRegs(), NoNode, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(structure.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance))); + speculationCheck(Uncountable, JSValueRegs(), 0, m_jit.branchTest8(MacroAssembler::Zero, MacroAssembler::Address(structure.gpr(), Structure::typeInfoFlagsOffset()), MacroAssembler::TrustedImm32(ImplementsDefaultHasInstance))); - noResult(m_compileIndex); + noResult(node); break; } @@ -3558,44 +4293,63 @@ void SpeculativeJIT::compile(Node& node) } case IsUndefined: { - JSValueOperand value(this, node.child1()); + JSValueOperand value(this, node->child1()); GPRTemporary result(this); - + GPRTemporary localGlobalObject(this); + GPRTemporary remoteGlobalObject(this); + JITCompiler::Jump isCell = m_jit.branch32(JITCompiler::Equal, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag)); m_jit.compare32(JITCompiler::Equal, value.tagGPR(), TrustedImm32(JSValue::UndefinedTag), result.gpr()); JITCompiler::Jump done = m_jit.jump(); isCell.link(&m_jit); - m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureOffset()), result.gpr()); - m_jit.test8(JITCompiler::NonZero, JITCompiler::Address(result.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined), result.gpr()); - + JITCompiler::Jump notMasqueradesAsUndefined; + if (m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->isStillValid()) { + m_jit.graph().globalObjectFor(node->codeOrigin)->masqueradesAsUndefinedWatchpoint()->add(speculationWatchpoint()); + m_jit.move(TrustedImm32(0), result.gpr()); + notMasqueradesAsUndefined = m_jit.jump(); + } else { + m_jit.loadPtr(JITCompiler::Address(value.payloadGPR(), JSCell::structureOffset()), result.gpr()); + JITCompiler::Jump isMasqueradesAsUndefined = m_jit.branchTest8(JITCompiler::NonZero, JITCompiler::Address(result.gpr(), Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + m_jit.move(TrustedImm32(0), result.gpr()); + notMasqueradesAsUndefined = m_jit.jump(); + + isMasqueradesAsUndefined.link(&m_jit); + GPRReg localGlobalObjectGPR = localGlobalObject.gpr(); + GPRReg remoteGlobalObjectGPR = remoteGlobalObject.gpr(); + m_jit.move(TrustedImmPtr(m_jit.globalObjectFor(node->codeOrigin)), localGlobalObjectGPR); + m_jit.loadPtr(JITCompiler::Address(result.gpr(), Structure::globalObjectOffset()), remoteGlobalObjectGPR); + m_jit.compare32(JITCompiler::Equal, localGlobalObjectGPR, remoteGlobalObjectGPR, result.gpr()); + } + + notMasqueradesAsUndefined.link(&m_jit); done.link(&m_jit); - booleanResult(result.gpr(), m_compileIndex); + booleanResult(result.gpr(), node); break; } case IsBoolean: { - JSValueOperand value(this, node.child1()); + JSValueOperand value(this, node->child1()); GPRTemporary result(this, value); m_jit.compare32(JITCompiler::Equal, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::BooleanTag), result.gpr()); - booleanResult(result.gpr(), m_compileIndex); + booleanResult(result.gpr(), node); break; } case IsNumber: { - JSValueOperand value(this, node.child1()); + JSValueOperand value(this, node->child1()); GPRTemporary result(this, value); m_jit.add32(TrustedImm32(1), value.tagGPR(), result.gpr()); m_jit.compare32(JITCompiler::Below, result.gpr(), JITCompiler::TrustedImm32(JSValue::LowestTag + 1), result.gpr()); - booleanResult(result.gpr(), m_compileIndex); + booleanResult(result.gpr(), node); break; } case IsString: { - JSValueOperand value(this, node.child1()); + JSValueOperand value(this, node->child1()); GPRTemporary result(this, value); JITCompiler::Jump isNotCell = m_jit.branch32(JITCompiler::NotEqual, value.tagGPR(), JITCompiler::TrustedImm32(JSValue::CellTag)); @@ -3608,31 +4362,92 @@ void SpeculativeJIT::compile(Node& node) m_jit.move(TrustedImm32(0), result.gpr()); done.link(&m_jit); - booleanResult(result.gpr(), m_compileIndex); + booleanResult(result.gpr(), node); break; } case IsObject: { - JSValueOperand value(this, node.child1()); + JSValueOperand value(this, node->child1()); GPRReg valueTagGPR = value.tagGPR(); GPRReg valuePayloadGPR = value.payloadGPR(); GPRResult result(this); GPRReg resultGPR = result.gpr(); flushRegisters(); callOperation(operationIsObject, resultGPR, valueTagGPR, valuePayloadGPR); - booleanResult(result.gpr(), m_compileIndex); + booleanResult(result.gpr(), node); break; } case IsFunction: { - JSValueOperand value(this, node.child1()); + JSValueOperand value(this, node->child1()); GPRReg valueTagGPR = value.tagGPR(); GPRReg valuePayloadGPR = value.payloadGPR(); GPRResult result(this); GPRReg resultGPR = result.gpr(); flushRegisters(); callOperation(operationIsFunction, resultGPR, valueTagGPR, valuePayloadGPR); - booleanResult(result.gpr(), m_compileIndex); + booleanResult(result.gpr(), node); + break; + } + case TypeOf: { + JSValueOperand value(this, node->child1(), ManualOperandSpeculation); + GPRReg tagGPR = value.tagGPR(); + GPRReg payloadGPR = value.payloadGPR(); + GPRTemporary temp(this); + GPRReg tempGPR = temp.gpr(); + GPRResult result(this); + GPRReg resultGPR = result.gpr(); + JITCompiler::JumpList doneJumps; + + flushRegisters(); + + ASSERT(node->child1().useKind() == UntypedUse || node->child1().useKind() == CellUse || node->child1().useKind() == StringUse); + + JITCompiler::Jump isNotCell = m_jit.branch32(JITCompiler::NotEqual, tagGPR, JITCompiler::TrustedImm32(JSValue::CellTag)); + if (node->child1().useKind() != UntypedUse) + DFG_TYPE_CHECK(JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecCell, isNotCell); + + if (!node->child1()->shouldSpeculateObject() || node->child1().useKind() == StringUse) { + m_jit.loadPtr(JITCompiler::Address(payloadGPR, JSCell::structureOffset()), tempGPR); + JITCompiler::Jump notString = m_jit.branch8(JITCompiler::NotEqual, JITCompiler::Address(tempGPR, Structure::typeInfoTypeOffset()), TrustedImm32(StringType)); + if (node->child1().useKind() == StringUse) + DFG_TYPE_CHECK(JSValueRegs(tagGPR, payloadGPR), node->child1(), SpecString, notString); + m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.stringString()), resultGPR); + doneJumps.append(m_jit.jump()); + if (node->child1().useKind() != StringUse) { + notString.link(&m_jit); + callOperation(operationTypeOf, resultGPR, payloadGPR); + doneJumps.append(m_jit.jump()); + } + } else { + callOperation(operationTypeOf, resultGPR, payloadGPR); + doneJumps.append(m_jit.jump()); + } + + if (node->child1().useKind() == UntypedUse) { + isNotCell.link(&m_jit); + + m_jit.add32(TrustedImm32(1), tagGPR, tempGPR); + JITCompiler::Jump notNumber = m_jit.branch32(JITCompiler::AboveOrEqual, tempGPR, JITCompiler::TrustedImm32(JSValue::LowestTag + 1)); + m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.numberString()), resultGPR); + doneJumps.append(m_jit.jump()); + notNumber.link(&m_jit); + + JITCompiler::Jump notUndefined = m_jit.branch32(JITCompiler::NotEqual, tagGPR, TrustedImm32(JSValue::UndefinedTag)); + m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.undefinedString()), resultGPR); + doneJumps.append(m_jit.jump()); + notUndefined.link(&m_jit); + + JITCompiler::Jump notNull = m_jit.branch32(JITCompiler::NotEqual, tagGPR, TrustedImm32(JSValue::NullTag)); + m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.objectString()), resultGPR); + doneJumps.append(m_jit.jump()); + notNull.link(&m_jit); + + // Only boolean left + m_jit.move(TrustedImmPtr(m_jit.vm()->smallStrings.booleanString()), resultGPR); + } + doneJumps.link(&m_jit); + cellResult(resultGPR, node); break; } @@ -3644,7 +4459,7 @@ void SpeculativeJIT::compile(Node& node) #if ENABLE(DEBUG_WITH_BREAKPOINT) m_jit.breakpoint(); #else - ASSERT_NOT_REACHED(); + RELEASE_ASSERT_NOT_REACHED(); #endif break; @@ -3657,8 +4472,9 @@ void SpeculativeJIT::compile(Node& node) flushRegisters(); GPRResult resultPayload(this); GPRResult2 resultTag(this); - callOperation(operationResolve, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber())); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); + ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node->resolveOperationsDataIndex()]; + callOperation(operationResolve, resultTag.gpr(), resultPayload.gpr(), identifier(data.identifierNumber), data.resolveOperations); + jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); break; } @@ -3666,8 +4482,9 @@ void SpeculativeJIT::compile(Node& node) flushRegisters(); GPRResult resultPayload(this); GPRResult2 resultTag(this); - callOperation(operationResolveBase, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber())); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); + ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node->resolveOperationsDataIndex()]; + callOperation(operationResolveBase, resultTag.gpr(), resultPayload.gpr(), identifier(data.identifierNumber), data.resolveOperations, data.putToBaseOperation); + jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); break; } @@ -3675,8 +4492,9 @@ void SpeculativeJIT::compile(Node& node) flushRegisters(); GPRResult resultPayload(this); GPRResult2 resultTag(this); - callOperation(operationResolveBaseStrictPut, resultTag.gpr(), resultPayload.gpr(), identifier(node.identifierNumber())); - jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex); + ResolveOperationData& data = m_jit.graph().m_resolveOperationsData[node->resolveOperationsDataIndex()]; + callOperation(operationResolveBaseStrictPut, resultTag.gpr(), resultPayload.gpr(), identifier(data.identifierNumber), data.resolveOperations, data.putToBaseOperation); + jsValueResult(resultTag.gpr(), resultPayload.gpr(), node); break; } @@ -3691,37 +4509,41 @@ void SpeculativeJIT::compile(Node& node) GPRReg resultTagGPR = resultTag.gpr(); GPRReg resultPayloadGPR = resultPayload.gpr(); - ResolveGlobalData& data = m_jit.graph().m_resolveGlobalData[node.resolveGlobalDataIndex()]; - GlobalResolveInfo* resolveInfoAddress = &(m_jit.codeBlock()->globalResolveInfo(data.resolveInfoIndex)); + ResolveGlobalData& data = m_jit.graph().m_resolveGlobalData[node->resolveGlobalDataIndex()]; + ResolveOperation* resolveOperationAddress = &(data.resolveOperations->data()[data.resolvePropertyIndex]); // Check Structure of global object - m_jit.move(JITCompiler::TrustedImmPtr(m_jit.globalObjectFor(node.codeOrigin)), globalObjectGPR); - m_jit.move(JITCompiler::TrustedImmPtr(resolveInfoAddress), resolveInfoGPR); - m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), resultPayloadGPR); + m_jit.move(JITCompiler::TrustedImmPtr(m_jit.globalObjectFor(node->codeOrigin)), globalObjectGPR); + m_jit.move(JITCompiler::TrustedImmPtr(resolveOperationAddress), resolveInfoGPR); + m_jit.loadPtr(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(ResolveOperation, m_structure)), resultPayloadGPR); JITCompiler::Jump structuresNotMatch = m_jit.branchPtr(JITCompiler::NotEqual, resultPayloadGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset())); // Fast case - m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR); - m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR); - m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); - m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); - - JITCompiler::Jump wasFast = m_jit.jump(); - - structuresNotMatch.link(&m_jit); - silentSpillAllRegisters(resultTagGPR, resultPayloadGPR); - callOperation(operationResolveGlobal, resultTagGPR, resultPayloadGPR, resolveInfoGPR, &m_jit.codeBlock()->identifier(data.identifierNumber)); - silentFillAllRegisters(resultTagGPR, resultPayloadGPR); - - wasFast.link(&m_jit); - - jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex); + m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::butterflyOffset()), resultPayloadGPR); + m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(ResolveOperation, m_offset)), resolveInfoGPR); +#if DFG_ENABLE(JIT_ASSERT) + JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(firstOutOfLineOffset)); + m_jit.breakpoint(); + isOutOfLine.link(&m_jit); +#endif + m_jit.neg32(resolveInfoGPR); + m_jit.signExtend32ToPtr(resolveInfoGPR, resolveInfoGPR); + m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) + (firstOutOfLineOffset - 2) * static_cast(sizeof(JSValue))), resultTagGPR); + m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) + (firstOutOfLineOffset - 2) * static_cast(sizeof(JSValue))), resultPayloadGPR); + + addSlowPathGenerator( + slowPathCall( + structuresNotMatch, this, operationResolveGlobal, + JSValueRegs(resultTagGPR, resultPayloadGPR), resolveInfoGPR, globalObjectGPR, + &m_jit.codeBlock()->identifier(data.identifierNumber))); + + jsValueResult(resultTagGPR, resultPayloadGPR, node); break; } case CreateActivation: { - JSValueOperand value(this, node.child1()); + JSValueOperand value(this, node->child1()); GPRTemporary result(this, value, false); GPRReg valueTagGPR = value.tagGPR(); @@ -3730,33 +4552,348 @@ void SpeculativeJIT::compile(Node& node) m_jit.move(valuePayloadGPR, resultGPR); - JITCompiler::Jump alreadyCreated = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); - - silentSpillAllRegisters(resultGPR); - callOperation(operationCreateActivation, resultGPR); - silentFillAllRegisters(resultGPR); + JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); - alreadyCreated.link(&m_jit); + addSlowPathGenerator( + slowPathCall(notCreated, this, operationCreateActivation, resultGPR)); - cellResult(resultGPR, m_compileIndex); + cellResult(resultGPR, node); break; } - case TearOffActivation: { - JSValueOperand value(this, node.child1()); + case CreateArguments: { + JSValueOperand value(this, node->child1()); + GPRTemporary result(this, value, false); GPRReg valueTagGPR = value.tagGPR(); GPRReg valuePayloadGPR = value.payloadGPR(); + GPRReg resultGPR = result.gpr(); + + m_jit.move(valuePayloadGPR, resultGPR); JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); - silentSpillAllRegisters(InvalidGPRReg); - callOperation(operationTearOffActivation, valuePayloadGPR); - silentFillAllRegisters(InvalidGPRReg); + if (node->codeOrigin.inlineCallFrame) { + addSlowPathGenerator( + slowPathCall( + notCreated, this, operationCreateInlinedArguments, resultGPR, + node->codeOrigin.inlineCallFrame)); + } else { + addSlowPathGenerator( + slowPathCall(notCreated, this, operationCreateArguments, resultGPR)); + } + + cellResult(resultGPR, node); + break; + } + + case TearOffActivation: { + JSValueOperand activationValue(this, node->child1()); + GPRTemporary scratch(this); + + GPRReg activationValueTagGPR = activationValue.tagGPR(); + GPRReg activationValuePayloadGPR = activationValue.payloadGPR(); + GPRReg scratchGPR = scratch.gpr(); + + JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, activationValueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); + + SharedSymbolTable* symbolTable = m_jit.symbolTableFor(node->codeOrigin); + int registersOffset = JSActivation::registersOffset(symbolTable); + + int captureEnd = symbolTable->captureEnd(); + for (int i = symbolTable->captureStart(); i < captureEnd; ++i) { + m_jit.loadPtr( + JITCompiler::Address( + GPRInfo::callFrameRegister, i * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), + scratchGPR); + m_jit.storePtr( + scratchGPR, JITCompiler::Address( + activationValuePayloadGPR, registersOffset + i * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); + m_jit.loadPtr( + JITCompiler::Address( + GPRInfo::callFrameRegister, i * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), + scratchGPR); + m_jit.storePtr( + scratchGPR, JITCompiler::Address( + activationValuePayloadGPR, registersOffset + i * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + } + m_jit.addPtr(TrustedImm32(registersOffset), activationValuePayloadGPR, scratchGPR); + m_jit.storePtr(scratchGPR, JITCompiler::Address(activationValuePayloadGPR, JSActivation::offsetOfRegisters())); notCreated.link(&m_jit); + noResult(node); + break; + } + + case TearOffArguments: { + JSValueOperand unmodifiedArgumentsValue(this, node->child1()); + JSValueOperand activationValue(this, node->child2()); + GPRReg unmodifiedArgumentsValuePayloadGPR = unmodifiedArgumentsValue.payloadGPR(); + GPRReg activationValuePayloadGPR = activationValue.payloadGPR(); + + JITCompiler::Jump created = m_jit.branchTest32( + JITCompiler::NonZero, unmodifiedArgumentsValuePayloadGPR); + + if (node->codeOrigin.inlineCallFrame) { + addSlowPathGenerator( + slowPathCall( + created, this, operationTearOffInlinedArguments, NoResult, + unmodifiedArgumentsValuePayloadGPR, activationValuePayloadGPR, node->codeOrigin.inlineCallFrame)); + } else { + addSlowPathGenerator( + slowPathCall( + created, this, operationTearOffArguments, NoResult, + unmodifiedArgumentsValuePayloadGPR, activationValuePayloadGPR)); + } + + noResult(node); + break; + } + + case CheckArgumentsNotCreated: { + ASSERT(!isEmptySpeculation( + m_state.variables().operand( + m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)); + speculationCheck( + Uncountable, JSValueRegs(), 0, + m_jit.branch32( + JITCompiler::NotEqual, + JITCompiler::tagFor(m_jit.argumentsRegisterFor(node->codeOrigin)), + TrustedImm32(JSValue::EmptyValueTag))); + noResult(node); + break; + } + + case GetMyArgumentsLength: { + GPRTemporary result(this); + GPRReg resultGPR = result.gpr(); + + if (!isEmptySpeculation( + m_state.variables().operand( + m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)) { + speculationCheck( + ArgumentsEscaped, JSValueRegs(), 0, + m_jit.branch32( + JITCompiler::NotEqual, + JITCompiler::tagFor(m_jit.argumentsRegisterFor(node->codeOrigin)), + TrustedImm32(JSValue::EmptyValueTag))); + } + + ASSERT(!node->codeOrigin.inlineCallFrame); + m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultGPR); + m_jit.sub32(TrustedImm32(1), resultGPR); + integerResult(resultGPR, node); + break; + } + + case GetMyArgumentsLengthSafe: { + GPRTemporary resultPayload(this); + GPRTemporary resultTag(this); + GPRReg resultPayloadGPR = resultPayload.gpr(); + GPRReg resultTagGPR = resultTag.gpr(); + + JITCompiler::Jump created = m_jit.branch32( + JITCompiler::NotEqual, + JITCompiler::tagFor(m_jit.argumentsRegisterFor(node->codeOrigin)), + TrustedImm32(JSValue::EmptyValueTag)); - noResult(m_compileIndex); + if (node->codeOrigin.inlineCallFrame) { + m_jit.move( + Imm32(node->codeOrigin.inlineCallFrame->arguments.size() - 1), + resultPayloadGPR); + } else { + m_jit.load32(JITCompiler::payloadFor(JSStack::ArgumentCount), resultPayloadGPR); + m_jit.sub32(TrustedImm32(1), resultPayloadGPR); + } + m_jit.move(TrustedImm32(JSValue::Int32Tag), resultTagGPR); + + // FIXME: the slow path generator should perform a forward speculation that the + // result is an integer. For now we postpone the speculation by having this return + // a JSValue. + + addSlowPathGenerator( + slowPathCall( + created, this, operationGetArgumentsLength, + JSValueRegs(resultTagGPR, resultPayloadGPR), + m_jit.argumentsRegisterFor(node->codeOrigin))); + + jsValueResult(resultTagGPR, resultPayloadGPR, node); + break; + } + + case GetMyArgumentByVal: { + SpeculateStrictInt32Operand index(this, node->child1()); + GPRTemporary resultPayload(this); + GPRTemporary resultTag(this); + GPRReg indexGPR = index.gpr(); + GPRReg resultPayloadGPR = resultPayload.gpr(); + GPRReg resultTagGPR = resultTag.gpr(); + + if (!isEmptySpeculation( + m_state.variables().operand( + m_jit.graph().argumentsRegisterFor(node->codeOrigin)).m_type)) { + speculationCheck( + ArgumentsEscaped, JSValueRegs(), 0, + m_jit.branch32( + JITCompiler::NotEqual, + JITCompiler::tagFor(m_jit.argumentsRegisterFor(node->codeOrigin)), + TrustedImm32(JSValue::EmptyValueTag))); + } + + m_jit.add32(TrustedImm32(1), indexGPR, resultPayloadGPR); + + if (node->codeOrigin.inlineCallFrame) { + speculationCheck( + Uncountable, JSValueRegs(), 0, + m_jit.branch32( + JITCompiler::AboveOrEqual, + resultPayloadGPR, + Imm32(node->codeOrigin.inlineCallFrame->arguments.size()))); + } else { + speculationCheck( + Uncountable, JSValueRegs(), 0, + m_jit.branch32( + JITCompiler::AboveOrEqual, + resultPayloadGPR, + JITCompiler::payloadFor(JSStack::ArgumentCount))); + } + + JITCompiler::JumpList slowArgument; + JITCompiler::JumpList slowArgumentOutOfBounds; + if (const SlowArgument* slowArguments = m_jit.symbolTableFor(node->codeOrigin)->slowArguments()) { + slowArgumentOutOfBounds.append( + m_jit.branch32( + JITCompiler::AboveOrEqual, indexGPR, + Imm32(m_jit.symbolTableFor(node->codeOrigin)->parameterCount()))); + + COMPILE_ASSERT(sizeof(SlowArgument) == 8, SlowArgument_size_is_eight_bytes); + m_jit.move(ImmPtr(slowArguments), resultPayloadGPR); + m_jit.load32( + JITCompiler::BaseIndex( + resultPayloadGPR, indexGPR, JITCompiler::TimesEight, + OBJECT_OFFSETOF(SlowArgument, index)), + resultPayloadGPR); + + m_jit.load32( + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, + m_jit.offsetOfLocals(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), + resultTagGPR); + m_jit.load32( + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, + m_jit.offsetOfLocals(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), + resultPayloadGPR); + slowArgument.append(m_jit.jump()); + } + slowArgumentOutOfBounds.link(&m_jit); + + m_jit.neg32(resultPayloadGPR); + + m_jit.load32( + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, + m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), + resultTagGPR); + m_jit.load32( + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, + m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), + resultPayloadGPR); + + slowArgument.link(&m_jit); + jsValueResult(resultTagGPR, resultPayloadGPR, node); + break; + } + case GetMyArgumentByValSafe: { + SpeculateStrictInt32Operand index(this, node->child1()); + GPRTemporary resultPayload(this); + GPRTemporary resultTag(this); + GPRReg indexGPR = index.gpr(); + GPRReg resultPayloadGPR = resultPayload.gpr(); + GPRReg resultTagGPR = resultTag.gpr(); + + JITCompiler::JumpList slowPath; + slowPath.append( + m_jit.branch32( + JITCompiler::NotEqual, + JITCompiler::tagFor(m_jit.argumentsRegisterFor(node->codeOrigin)), + TrustedImm32(JSValue::EmptyValueTag))); + + m_jit.add32(TrustedImm32(1), indexGPR, resultPayloadGPR); + if (node->codeOrigin.inlineCallFrame) { + slowPath.append( + m_jit.branch32( + JITCompiler::AboveOrEqual, + resultPayloadGPR, + Imm32(node->codeOrigin.inlineCallFrame->arguments.size()))); + } else { + slowPath.append( + m_jit.branch32( + JITCompiler::AboveOrEqual, + resultPayloadGPR, + JITCompiler::payloadFor(JSStack::ArgumentCount))); + } + + JITCompiler::JumpList slowArgument; + JITCompiler::JumpList slowArgumentOutOfBounds; + if (const SlowArgument* slowArguments = m_jit.symbolTableFor(node->codeOrigin)->slowArguments()) { + slowArgumentOutOfBounds.append( + m_jit.branch32( + JITCompiler::AboveOrEqual, indexGPR, + Imm32(m_jit.symbolTableFor(node->codeOrigin)->parameterCount()))); + + COMPILE_ASSERT(sizeof(SlowArgument) == 8, SlowArgument_size_is_eight_bytes); + m_jit.move(ImmPtr(slowArguments), resultPayloadGPR); + m_jit.load32( + JITCompiler::BaseIndex( + resultPayloadGPR, indexGPR, JITCompiler::TimesEight, + OBJECT_OFFSETOF(SlowArgument, index)), + resultPayloadGPR); + m_jit.load32( + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, + m_jit.offsetOfLocals(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), + resultTagGPR); + m_jit.load32( + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, + m_jit.offsetOfLocals(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), + resultPayloadGPR); + slowArgument.append(m_jit.jump()); + } + slowArgumentOutOfBounds.link(&m_jit); + + m_jit.neg32(resultPayloadGPR); + + m_jit.load32( + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, + m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), + resultTagGPR); + m_jit.load32( + JITCompiler::BaseIndex( + GPRInfo::callFrameRegister, resultPayloadGPR, JITCompiler::TimesEight, + m_jit.offsetOfArgumentsIncludingThis(node->codeOrigin) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), + resultPayloadGPR); + + if (node->codeOrigin.inlineCallFrame) { + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationGetInlinedArgumentByVal, + JSValueRegs(resultTagGPR, resultPayloadGPR), + m_jit.argumentsRegisterFor(node->codeOrigin), + node->codeOrigin.inlineCallFrame, indexGPR)); + } else { + addSlowPathGenerator( + slowPathCall( + slowPath, this, operationGetArgumentByVal, + JSValueRegs(resultTagGPR, resultPayloadGPR), + m_jit.argumentsRegisterFor(node->codeOrigin), indexGPR)); + } + + slowArgument.link(&m_jit); + jsValueResult(resultTagGPR, resultPayloadGPR, node); break; } @@ -3765,25 +4902,26 @@ void SpeculativeJIT::compile(Node& node) break; case NewFunction: { - JSValueOperand value(this, node.child1()); - GPRTemporary result(this, value, false); + JSValueOperand value(this, node->child1()); + GPRTemporary resultTag(this, value); + GPRTemporary resultPayload(this, value, false); GPRReg valueTagGPR = value.tagGPR(); GPRReg valuePayloadGPR = value.payloadGPR(); - GPRReg resultGPR = result.gpr(); - - m_jit.move(valuePayloadGPR, resultGPR); + GPRReg resultTagGPR = resultTag.gpr(); + GPRReg resultPayloadGPR = resultPayload.gpr(); - JITCompiler::Jump alreadyCreated = m_jit.branch32(JITCompiler::NotEqual, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); + m_jit.move(valuePayloadGPR, resultPayloadGPR); + m_jit.move(valueTagGPR, resultTagGPR); - silentSpillAllRegisters(resultGPR); - callOperation( - operationNewFunction, resultGPR, m_jit.codeBlock()->functionDecl(node.functionDeclIndex())); - silentFillAllRegisters(resultGPR); + JITCompiler::Jump notCreated = m_jit.branch32(JITCompiler::Equal, valueTagGPR, TrustedImm32(JSValue::EmptyValueTag)); - alreadyCreated.link(&m_jit); + addSlowPathGenerator( + slowPathCall( + notCreated, this, operationNewFunction, JSValueRegs(resultTagGPR, resultPayloadGPR), + m_jit.codeBlock()->functionDecl(node->functionDeclIndex()))); - cellResult(resultGPR, m_compileIndex); + jsValueResult(resultTagGPR, resultPayloadGPR, node); break; } @@ -3791,28 +4929,57 @@ void SpeculativeJIT::compile(Node& node) compileNewFunctionExpression(node); break; + case GarbageValue: + // We should never get to the point of code emission for a GarbageValue + CRASH(); + break; + case ForceOSRExit: { - terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode); + terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), 0); break; } + case CheckWatchdogTimer: + speculationCheck( + WatchdogTimerFired, JSValueRegs(), 0, + m_jit.branchTest8( + JITCompiler::NonZero, + JITCompiler::AbsoluteAddress(m_jit.vm()->watchdog.timerDidFireAddress()))); + break; + + case CountExecution: + m_jit.add64(TrustedImm32(1), MacroAssembler::AbsoluteAddress(node->executionCounter()->address())); + break; + case Phantom: + DFG_NODE_DO_TO_CHILDREN(m_jit.graph(), node, speculate); + noResult(node); + break; + + case PhantomLocal: // This is a no-op. - noResult(m_compileIndex); + noResult(node); + break; + + case Unreachable: + RELEASE_ASSERT_NOT_REACHED(); break; - case InlineStart: case Nop: case LastNodeType: - ASSERT_NOT_REACHED(); + RELEASE_ASSERT_NOT_REACHED(); break; } - + +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + m_jit.clearRegisterAllocationOffsets(); +#endif + if (!m_compileOkay) return; - if (node.hasResult() && node.mustGenerate()) - use(m_compileIndex); + if (node->hasResult() && node->mustGenerate()) + use(node); } #endif