#include "config.h"
+#if ENABLE(JIT)
#if USE(JSVALUE32_64)
-
#include "JIT.h"
-#if ENABLE(JIT)
-
#include "CodeBlock.h"
-#include "JITInlineMethods.h"
+#include "GCAwareJITStubRoutine.h"
+#include "Interpreter.h"
+#include "JITInlines.h"
#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "JSPropertyNameIterator.h"
-#include "Interpreter.h"
+#include "JSVariableObject.h"
#include "LinkBuffer.h"
#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
+#include <wtf/StringPrintStream.h>
#ifndef NDEBUG
#include <stdio.h>
JITStubCall stubCall(this, cti_op_put_by_index);
stubCall.addArgument(base);
- stubCall.addArgument(Imm32(property));
+ stubCall.addArgument(TrustedImm32(property));
stubCall.addArgument(value);
stubCall.call();
}
-void JIT::emit_op_put_getter(Instruction* currentInstruction)
-{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned function = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_getter);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.addArgument(function);
- stubCall.call();
-}
-
-void JIT::emit_op_put_setter(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
{
unsigned base = currentInstruction[1].u.operand;
unsigned property = currentInstruction[2].u.operand;
- unsigned function = currentInstruction[3].u.operand;
+ unsigned getter = currentInstruction[3].u.operand;
+ unsigned setter = currentInstruction[4].u.operand;
- JITStubCall stubCall(this, cti_op_put_setter);
+ JITStubCall stubCall(this, cti_op_put_getter_setter);
stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.addArgument(function);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
+ stubCall.addArgument(getter);
+ stubCall.addArgument(setter);
stubCall.call();
}
JITStubCall stubCall(this, cti_op_del_by_id);
stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&m_codeBlock->identifier(property)));
- stubCall.call(dst);
-}
-
-
-#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-#error "JIT_OPTIMIZE_METHOD_CALLS requires JIT_OPTIMIZE_PROPERTY_ACCESS"
-#endif
-
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
stubCall.call(dst);
}
-void JIT::emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
+ JSInterfaceJIT jit;
+ JumpList failures;
+ failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get())));
- JITStubCall stubCall(this, cti_op_put_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.addArgument(value);
- stubCall.call();
-}
-
-void JIT::emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- ASSERT_NOT_REACHED();
-}
-
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
+ // Load string length to regT1, and start the process of loading the data pointer into regT0
+ jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
+ jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
+ failures.append(jit.branchTest32(Zero, regT0));
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.call(dst);
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ failures.append(jit.branch32(AboveOrEqual, regT2, regT1));
- m_propertyAccessInstructionIndex++;
-}
-
-void JIT::emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- m_propertyAccessInstructionIndex++;
- ASSERT_NOT_REACHED();
-}
+ // Load the character
+ JumpList is16Bit;
+ JumpList cont8Bit;
+ // Load the string flags
+ jit.loadPtr(Address(regT0, StringImpl::flagsOffset()), regT1);
+ jit.loadPtr(Address(regT0, StringImpl::dataOffset()), regT0);
+ is16Bit.append(jit.branchTest32(Zero, regT1, TrustedImm32(StringImpl::flagIs8Bit())));
+ jit.load8(BaseIndex(regT0, regT2, TimesOne, 0), regT0);
+ cont8Bit.append(jit.jump());
+ is16Bit.link(&jit);
+ jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
-void JIT::emit_op_put_by_id(Instruction* currentInstruction)
-{
- int base = currentInstruction[1].u.operand;
- int ident = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.addArgument(value);
- stubCall.call();
+ cont8Bit.link(&jit);
- m_propertyAccessInstructionIndex++;
-}
+ failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
+ jit.move(TrustedImmPtr(vm->smallStrings.singleCharacterStrings()), regT1);
+ jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
+ jit.move(TrustedImm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
+ jit.ret();
-void JIT::emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&)
-{
- m_propertyAccessInstructionIndex++;
- ASSERT_NOT_REACHED();
+ failures.link(&jit);
+ jit.move(TrustedImm32(0), regT0);
+ jit.ret();
+
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
}
-#else // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-/* ------------------------------ BEGIN: ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#if ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-void JIT::emit_op_method_check(Instruction* currentInstruction)
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
- // Assert that the following instruction is a get_by_id.
- ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
-
- currentInstruction += OPCODE_LENGTH(op_method_check);
-
- // Do the method check - check the object & its prototype's structure inline (this is the common case).
- m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_propertyAccessInstructionIndex));
- MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
- emitLoad(base, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ emitArrayProfilingSite(regT1, regT3, profile);
+ and32(TrustedImm32(IndexingShapeMask), regT1);
+
+ PatchableJump badType;
+ JumpList slowCases;
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ switch (mode) {
+ case JITInt32:
+ slowCases = emitInt32GetByVal(currentInstruction, badType);
+ break;
+ case JITDouble:
+ slowCases = emitDoubleGetByVal(currentInstruction, badType);
+ break;
+ case JITContiguous:
+ slowCases = emitContiguousGetByVal(currentInstruction, badType);
+ break;
+ case JITArrayStorage:
+ slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
+ break;
+ default:
+ CRASH();
+ }
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), info.structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(ImmPtr(0), regT2);
- Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), protoStructureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
-
- // This will be relinked to load the function without doing a load.
- DataLabelPtr putFunction = moveWithPatch(ImmPtr(0), regT0);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
-
- move(Imm32(JSValue::CellTag), regT1);
- Jump match = jump();
-
- ASSERT(differenceBetween(info.structureToCompare, protoObj) == patchOffsetMethodCheckProtoObj);
- ASSERT(differenceBetween(info.structureToCompare, protoStructureToCompare) == patchOffsetMethodCheckProtoStruct);
- ASSERT(differenceBetween(info.structureToCompare, putFunction) == patchOffsetMethodCheckPutFunction);
-
- // Link the failure cases here.
- structureCheck.link(this);
- protoStructureCheck.link(this);
-
- // Do a regular(ish) get_by_id (the slow case will be link to
- // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
- compileGetByIdHotPath();
+ addSlowCase(badType);
+ addSlowCase(slowCases);
- match.link(this);
+ Label done = label();
+
+#if !ASSERT_DISABLED
+ Jump resultOK = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
+ breakpoint();
+ resultOK.link(this);
+#endif
+
+ emitValueProfilingSite();
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_method_check), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
- // We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+ m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
}
-void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
{
- currentInstruction += OPCODE_LENGTH(op_method_check);
+ JumpList slowCases;
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
+ badType = patchableBranch32(NotEqual, regT1, TrustedImm32(expectedShape));
- compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter, true);
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
+ slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
- // We've already generated the following get_by_id, so make sure it's skipped over.
- m_bytecodeIndex += OPCODE_LENGTH(op_get_by_id);
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
+ slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+
+ return slowCases;
}
-#else //!ENABLE(JIT_OPTIMIZE_METHOD_CALLS)
-
-// Treat these as nops - the call will be handed as a regular get_by_id/op_call pair.
-void JIT::emit_op_method_check(Instruction*) {}
-void JIT::emitSlow_op_method_check(Instruction*, Vector<SlowCaseEntry>::iterator&) { ASSERT_NOT_REACHED(); }
-
-#endif
-
-PassRefPtr<NativeExecutable> JIT::stringGetByValStubGenerator(JSGlobalData* globalData, ExecutablePool* pool)
+JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
{
- JSInterfaceJIT jit;
- JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0), ImmPtr(globalData->jsStringVPtr)));
- failures.append(jit.branchTest32(NonZero, Address(regT0, OBJECT_OFFSETOF(JSString, m_fiberCount))));
+ JumpList slowCases;
- // Load string length to regT1, and start the process of loading the data pointer into regT0
- jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
- jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
- jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
+ badType = patchableBranch32(NotEqual, regT1, TrustedImm32(DoubleShape));
- // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
- failures.append(jit.branch32(AboveOrEqual, regT2, regT1));
-
- // Load the character
- jit.load16(BaseIndex(regT0, regT2, TimesTwo, 0), regT0);
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
+ slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
- failures.append(jit.branch32(AboveOrEqual, regT0, Imm32(0x100)));
- jit.move(ImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
- jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
- jit.move(Imm32(JSValue::CellTag), regT1); // We null check regT0 on return so this is safe
- jit.ret();
-
- failures.link(&jit);
- jit.move(Imm32(0), regT0);
- jit.ret();
+ loadDouble(BaseIndex(regT3, regT2, TimesEight), fpRegT0);
+ slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+ moveDoubleToInts(fpRegT0, regT0, regT1);
- LinkBuffer patchBuffer(&jit, pool, 0);
- return adoptRef(new NativeExecutable(patchBuffer.finalizeCode()));
+ return slowCases;
}
-void JIT::emit_op_get_by_val(Instruction* currentInstruction)
+JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- emitLoad2(base, regT1, regT0, property, regT3, regT2);
+ JumpList slowCases;
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
- emitJumpSlowCaseIfNotJSCell(base, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
+ add32(TrustedImm32(-ArrayStorageShape), regT1, regT3);
+ badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
+ slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset())));
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), regT1); // tag
- load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0); // payload
- addSlowCase(branch32(Equal, regT1, Imm32(JSValue::EmptyValueTag)));
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
+ load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
+ slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
- emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
+ return slowCases;
}
-
+
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
unsigned dst = currentInstruction[1].u.operand;
unsigned base = currentInstruction[2].u.operand;
unsigned property = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsStringVPtr));
- emitNakedCall(m_globalData->getThunk(stringGetByValStubGenerator)->generatedJITCode().addressForCall());
+ Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
+ emitNakedCall(m_vm->getCTIStub(stringGetByValStubGenerator).code());
Jump failed = branchTestPtr(Zero, regT0);
emitStore(dst, regT1, regT0);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
failed.link(this);
notString.link(this);
nonCell.link(this);
+
+ Jump skipProfiling = jump();
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+
+ skipProfiling.link(this);
+
+ Label slowPath = label();
+
JITStubCall stubCall(this, cti_op_get_by_val);
stubCall.addArgument(base);
stubCall.addArgument(property);
- stubCall.call(dst);
+ Call call = stubCall.call(dst);
+
+ m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+ m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+ m_byValInstructionIndex++;
+
+ emitValueProfilingSite();
}
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
unsigned base = currentInstruction[1].u.operand;
unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
emitLoad2(base, regT1, regT0, property, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr)));
- addSlowCase(branch32(AboveOrEqual, regT2, Address(regT0, OBJECT_OFFSETOF(JSArray, m_vectorLength))));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ emitArrayProfilingSite(regT1, regT3, profile);
+ and32(TrustedImm32(IndexingShapeMask), regT1);
+
+ PatchableJump badType;
+ JumpList slowCases;
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ switch (mode) {
+ case JITInt32:
+ slowCases = emitInt32PutByVal(currentInstruction, badType);
+ break;
+ case JITDouble:
+ slowCases = emitDoublePutByVal(currentInstruction, badType);
+ break;
+ case JITContiguous:
+ slowCases = emitContiguousPutByVal(currentInstruction, badType);
+ break;
+ case JITArrayStorage:
+ slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
+ break;
+ default:
+ CRASH();
+ break;
+ }
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT3);
+ addSlowCase(badType);
+ addSlowCase(slowCases);
- Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4), Imm32(JSValue::EmptyValueTag));
+ Label done = label();
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+}
+
+JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
+{
+ unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+
+ JumpList slowCases;
+
+ badType = patchableBranch32(NotEqual, regT1, TrustedImm32(ContiguousShape));
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
+ Jump outOfBounds = branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength()));
+
+ Label storeResult = label();
+ emitLoad(value, regT1, regT0);
+ switch (indexingShape) {
+ case Int32Shape:
+ slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ // Fall through.
+ case ContiguousShape:
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ break;
+ case DoubleShape: {
+ Jump notInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ convertInt32ToDouble(regT0, fpRegT0);
+ Jump ready = jump();
+ notInt.link(this);
+ moveIntsToDouble(regT0, regT1, fpRegT0, fpRegT1);
+ slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+ ready.link(this);
+ storeDouble(fpRegT0, BaseIndex(regT3, regT2, TimesEight));
+ break;
+ }
+ default:
+ CRASH();
+ break;
+ }
+
+ Jump done = jump();
+
+ outOfBounds.link(this);
+ slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfVectorLength())));
+
+ emitArrayProfileStoreToHoleSpecialCase(profile);
+
+ add32(TrustedImm32(1), regT2, regT1);
+ store32(regT1, Address(regT3, Butterfly::offsetOfPublicLength()));
+ jump().linkTo(storeResult, this);
+
+ done.link(this);
+
+ emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+
+ return slowCases;
+}
+
+JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType)
+{
+ unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+
+ JumpList slowCases;
+
+ badType = patchableBranch32(NotEqual, regT1, TrustedImm32(ArrayStorageShape));
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
+ slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset())));
+
+ Jump empty = branch32(Equal, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
Label storeResult(this);
emitLoad(value, regT1, regT0);
- store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]))); // payload
- store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + 4)); // tag
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); // payload
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); // tag
Jump end = jump();
empty.link(this);
- add32(Imm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
- branch32(Below, regT2, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+ emitArrayProfileStoreToHoleSpecialCase(profile);
+ add32(TrustedImm32(1), Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT2, Address(regT3, ArrayStorage::lengthOffset())).linkTo(storeResult, this);
- add32(Imm32(1), regT2, regT0);
- store32(regT0, Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ add32(TrustedImm32(1), regT2, regT0);
+ store32(regT0, Address(regT3, ArrayStorage::lengthOffset()));
jump().linkTo(storeResult, this);
end.link(this);
+
+ emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+
+ return slowCases;
}
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
unsigned base = currentInstruction[1].u.operand;
unsigned property = currentInstruction[2].u.operand;
unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // base not array check
- linkSlowCase(iter); // in vector check
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ switch (mode) {
+ case JITInt32:
+ case JITDouble:
+ linkSlowCase(iter); // value type check
+ break;
+ default:
+ break;
+ }
+
+ Jump skipProfiling = jump();
+ linkSlowCase(iter); // out of bounds
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+ skipProfiling.link(this);
+
+ Label slowPath = label();
JITStubCall stubPutByValCall(this, cti_op_put_by_val);
stubPutByValCall.addArgument(base);
stubPutByValCall.addArgument(property);
stubPutByValCall.addArgument(value);
- stubPutByValCall.call();
+ Call call = stubPutByValCall.call();
+
+ m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+ m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+ m_byValInstructionIndex++;
}
void JIT::emit_op_get_by_id(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int base = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
emitLoad(base, regT1, regT0);
emitJumpSlowCaseIfNotJSCell(base, regT1);
- compileGetByIdHotPath();
+ compileGetByIdHotPath(ident);
+ emitValueProfilingSite();
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
}
-void JIT::compileGetByIdHotPath()
+void JIT::compileGetByIdHotPath(Identifier* ident)
{
// As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
// Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
// to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
// to jump back to if one of these trampolies finds a match.
+ if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ emitArrayProfilingSiteForBytecodeIndex(regT2, regT3, m_bytecodeOffset);
+ }
+
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
- m_propertyAccessInstructionIndex++;
DataLabelPtr structureToCompare;
- Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
addSlowCase(structureCheck);
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
- ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT2);
- Label externalLoadComplete(this);
- ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetGetByIdExternalLoad);
- ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthGetByIdExternalLoad);
-
- DataLabel32 displacementLabel1 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
- ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetGetByIdPropertyMapOffset1);
- DataLabel32 displacementLabel2 = loadPtrWithAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
- ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetGetByIdPropertyMapOffset2);
+ ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+ DataLabelCompact displacementLabel1 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
+ DataLabelCompact displacementLabel2 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
Label putResult(this);
- ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, propertyStorageLoad, displacementLabel1, displacementLabel2, putResult));
}
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
int ident = currentInstruction[3].u.operand;
compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
+ emitValueProfilingSite();
}
-void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
+void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter)
{
// As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
// so that we only need track one pointer into the slow case code - we track a pointer to the location
BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-#ifndef NDEBUG
Label coldPathBegin(this);
-#endif
- JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
+ JITStubCall stubCall(this, cti_op_get_by_id);
stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(ImmPtr(ident));
+ stubCall.addArgument(TrustedImmPtr(ident));
Call call = stubCall.call(dst);
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
- ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
+ END_UNINTERRUPTED_SEQUENCE_FOR_PUT(sequenceGetByIdSlowCase, dst);
// Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
- m_propertyAccessInstructionIndex++;
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubGetById, coldPathBegin, call);
}
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
- m_propertyAccessInstructionIndex++;
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
DataLabelPtr structureToCompare;
- addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
- // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
- Label externalLoad = loadPtrWithPatchToLEA(Address(regT0, OBJECT_OFFSETOF(JSObject, m_externalStorage)), regT0);
- Label externalLoadComplete(this);
- ASSERT(differenceBetween(hotPathBegin, externalLoad) == patchOffsetPutByIdExternalLoad);
- ASSERT(differenceBetween(externalLoad, externalLoadComplete) == patchLengthPutByIdExternalLoad);
-
- DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT0, patchGetByIdDefaultOffset)); // payload
- DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT0, patchGetByIdDefaultOffset)); // tag
+ ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT1);
+ DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT1, patchPutByIdDefaultOffset)); // payload
+ DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT1, patchPutByIdDefaultOffset)); // tag
END_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- ASSERT(differenceBetween(hotPathBegin, displacementLabel1) == patchOffsetPutByIdPropertyMapOffset1);
- ASSERT(differenceBetween(hotPathBegin, displacementLabel2) == patchOffsetPutByIdPropertyMapOffset2);
+
+ emitWriteBarrier(regT0, regT2, regT1, regT2, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
+
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, propertyStorageLoad, displacementLabel1, displacementLabel2));
}
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
linkSlowCase(iter);
JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(ImmPtr(&(m_codeBlock->identifier(ident))));
+ stubCall.addArgument(base);
+ stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
stubCall.addArgument(regT3, regT2);
Call call = stubCall.call();
// Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex].callReturnLocation = call;
- m_propertyAccessInstructionIndex++;
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubPutById, call);
}
// Compile a store into an object's property storage. May overwrite base.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, Structure* structure, size_t cachedOffset)
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset)
{
- int offset = cachedOffset;
- if (structure->isUsingInlineStorage())
- offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
- else
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- emitStore(offset, valueTag, valuePayload, base);
+ if (isOutOfLineOffset(cachedOffset))
+ loadPtr(Address(base, JSObject::butterflyOffset()), base);
+ emitStore(indexRelativeToBase(cachedOffset), valueTag, valuePayload, base);
}
// Compile a load from an object's property storage. May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, Structure* structure, size_t cachedOffset)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset)
{
- int offset = cachedOffset;
- if (structure->isUsingInlineStorage())
- offset += OBJECT_OFFSETOF(JSObject, m_inlineStorage) / sizeof(Register);
- else
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- emitLoad(offset, resultTag, resultPayload, base);
-}
-
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID temp, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset)
-{
- if (base->isUsingInlineStorage()) {
- load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]), resultPayload);
- load32(reinterpret_cast<char*>(&base->m_inlineStorage[cachedOffset]) + 4, resultTag);
+ if (isInlineOffset(cachedOffset)) {
+ emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, base);
return;
}
- size_t offset = cachedOffset * sizeof(JSValue);
-
- PropertyStorage* protoPropertyStorage = &base->m_externalStorage;
- loadPtr(static_cast<void*>(protoPropertyStorage), temp);
- load32(Address(temp, offset), resultPayload);
- load32(Address(temp, offset + 4), resultTag);
+ RegisterID temp = resultPayload;
+ loadPtr(Address(base, JSObject::butterflyOffset()), temp);
+ emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, temp);
}
-void JIT::testPrototype(Structure* structure, JumpList& failureCases)
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset)
{
- if (structure->m_prototype.isNull())
+ if (isInlineOffset(cachedOffset)) {
+ move(TrustedImmPtr(base->locationForOffset(cachedOffset)), resultTag);
+ load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
return;
+ }
- failureCases.append(branchPtr(NotEqual, AbsoluteAddress(&asCell(structure->m_prototype)->m_structure), ImmPtr(asCell(structure->m_prototype)->m_structure)));
+ loadPtr(base->butterflyAddress(), resultTag);
+ load32(Address(resultTag, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ load32(Address(resultTag, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
}
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
- // It is assumed that regT0 contains the basePayload and regT1 contains the baseTag. The value can be found on the stack.
-
+ // The code below assumes that regT0 contains the basePayload and regT1 contains the baseTag. Restore them from the stack.
+#if CPU(MIPS) || CPU(SH4) || CPU(ARM)
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+#else
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+#endif
+
JumpList failureCases;
- failureCases.append(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
- failureCases.append(branchPtr(NotEqual, Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)), ImmPtr(oldStructure)));
- testPrototype(oldStructure, failureCases);
+ failureCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
+ testPrototype(oldStructure->storedPrototype(), failureCases, stubInfo);
if (!direct) {
// Verify that nothing in the prototype chain has a setter for this property.
- for (RefPtr<Structure>* it = chain->head(); *it; ++it)
- testPrototype(it->get(), failureCases);
+ for (WriteBarrier<Structure>* it = chain->head(); *it; ++it)
+ testPrototype((*it)->storedPrototype(), failureCases, stubInfo);
}
+ // If we succeed in all of our checks, and the code was optimizable, then make sure we
+ // decrement the rare case counter.
+#if ENABLE(VALUE_PROFILER)
+ if (m_codeBlock->canCompileWithDFG() >= DFG::MayInline) {
+ sub32(
+ TrustedImm32(1),
+ AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
+ }
+#endif
+
// Reallocate property storage if needed.
Call callTarget;
- bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ bool willNeedStorageRealloc = oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity();
if (willNeedStorageRealloc) {
// This trampoline was called to like a JIT stub; before we can can call again we need to
// remove the return address from the stack, to prevent the stack from becoming misaligned.
stubCall.skipArgument(); // base
stubCall.skipArgument(); // ident
stubCall.skipArgument(); // value
- stubCall.addArgument(Imm32(oldStructure->propertyStorageCapacity()));
- stubCall.addArgument(Imm32(newStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImm32(oldStructure->outOfLineCapacity()));
+ stubCall.addArgument(TrustedImmPtr(newStructure));
stubCall.call(regT0);
-
+
restoreReturnAddressBeforeReturn(regT3);
+
+#if CPU(MIPS) || CPU(SH4) || CPU(ARM)
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+#else
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+#endif
}
-
- sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
- add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
- storePtr(ImmPtr(newStructure), Address(regT0, OBJECT_OFFSETOF(JSCell, m_structure)));
-
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, args[2]) + sizeof(void*)), regT3);
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, args[2]) + sizeof(void*) + 4), regT2);
-
- // Write the value
- compilePutDirectOffset(regT0, regT2, regT3, newStructure, cachedOffset);
+
+ emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+
+ storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
+#if CPU(MIPS) || CPU(SH4) || CPU(ARM)
+ // For MIPS, we don't add sizeof(void*) to the stack offset.
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
+#else
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
+ load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
+#endif
+ compilePutDirectOffset(regT0, regT2, regT3, cachedOffset);
ret();
restoreArgumentReferenceForTrampoline();
Call failureCall = tailRecursiveCall();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
}
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
+ stubInfo->stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline put_by_id transition stub for %s, return point %p",
+ toCString(*m_codeBlock).data(), returnAddress.value())),
+ *m_vm,
+ m_codeBlock->ownerExecutable(),
+ willNeedStorageRealloc,
+ newStructure);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, entryLabel);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
}
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
{
RepatchBuffer repatchBuffer(codeBlock);
// Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
- int offset = sizeof(JSValue) * cachedOffset;
-
- // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
- // and makes the subsequent load's offset automatically correct
- if (structure->isUsingInlineStorage())
- repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetGetByIdExternalLoad));
-
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetGetByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset1), offset); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetGetByIdPropertyMapOffset2), offset + 4); // tag
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure);
+ repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.get.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel1), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel2), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
}
-void JIT::patchMethodCallProto(CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, JSFunction* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- ASSERT(!methodCallLinkInfo.cachedStructure);
- methodCallLinkInfo.cachedStructure = structure;
- structure->ref();
-
- Structure* prototypeStructure = proto->structure();
- methodCallLinkInfo.cachedPrototypeStructure = prototypeStructure;
- prototypeStructure->ref();
-
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel, structure);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoObj), proto);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckProtoStruct), prototypeStructure);
- repatchBuffer.repatch(methodCallLinkInfo.structureLabel.dataLabelPtrAtOffset(patchOffsetMethodCheckPutFunction), callee);
-
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id));
-}
-
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, bool direct)
{
RepatchBuffer repatchBuffer(codeBlock);
// Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- int offset = sizeof(JSValue) * cachedOffset;
-
- // If we're patching to use inline storage, convert the initial load to a lea; this avoids the extra load
- // and makes the subsequent load's offset automatically correct
- if (structure->isUsingInlineStorage())
- repatchBuffer.repatchLoadPtrToLEA(stubInfo->hotPathBegin.instructionAtOffset(patchOffsetPutByIdExternalLoad));
-
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(patchOffsetPutByIdStructure), structure);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset1), offset); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(patchOffsetPutByIdPropertyMapOffset2), offset + 4); // tag
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure);
+ repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.put.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel1), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel2), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
}
void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
// regT0 holds a JSCell*
// Check for array
- Jump failureCases1 = branchPtr(NotEqual, Address(regT0), ImmPtr(m_globalData->jsArrayVPtr));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump failureCases1 = branchTest32(Zero, regT2, TrustedImm32(IsArray));
+ Jump failureCases2 = branchTest32(Zero, regT2, TrustedImm32(IndexingShapeMask));
// Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSArray, m_storage)), regT2);
- load32(Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+ loadPtr(Address(regT0, JSArray::butterflyOffset()), regT2);
+ load32(Address(regT2, ArrayStorage::lengthOffset()), regT2);
- Jump failureCases2 = branch32(Above, regT2, Imm32(INT_MAX));
+ Jump failureCases3 = branch32(Above, regT2, TrustedImm32(INT_MAX));
move(regT2, regT0);
- move(Imm32(JSValue::Int32Tag), regT1);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
patchBuffer.link(failureCases1, slowCaseBegin);
patchBuffer.link(failureCases2, slowCaseBegin);
+ patchBuffer.link(failureCases3, slowCaseBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
// Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
+ stubInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ patchBuffer,
+ ("Baseline get_by_id array length stub for %s, return point %p",
+ toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress()));
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
}
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
// regT0 holds a JSCell*
Jump failureCases1 = checkStructure(regT0, structure);
// Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
+ Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3);
+
bool needsStubLink = false;
// Checks out okay!
if (slot.cachedPropertyType() == PropertySlot::Getter) {
needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
patchBuffer.link(failureCases1, slowCaseBegin);
- patchBuffer.link(failureCases2, slowCaseBegin);
+ if (failureCases2.isSet())
+ patchBuffer.link(failureCases2, slowCaseBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
}
// Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
+ stubInfo->stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline get_by_id proto stub for %s, return point %p",
+ toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_vm,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
{
// regT0 holds a JSCell*
Jump failureCase = checkStructure(regT0, structure);
bool needsStubLink = false;
+ bool isDirect = false;
if (slot.cachedPropertyType() == PropertySlot::Getter) {
needsStubLink = true;
- if (!structure->isUsingInlineStorage()) {
- move(regT0, regT1);
- compileGetDirectOffset(regT1, regT2, regT1, structure, cachedOffset);
- } else
- compileGetDirectOffset(regT0, regT2, regT1, structure, cachedOffset);
+ compileGetDirectOffset(regT0, regT2, regT1, cachedOffset);
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
- } else
- compileGetDirectOffset(regT0, regT1, regT0, structure, cachedOffset);
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(regT0, regT1, regT0, cachedOffset);
+ }
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
}
}
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(polymorphicStructures->list[currentIndex - 1].stubRoutine));
if (!lastProtoBegin)
- lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall);
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
patchBuffer.link(failureCase, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- structure->ref();
- polymorphicStructures->list[currentIndex].set(entryLabel, structure);
+ RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline get_by_id self list stub for %s, return point %p",
+ toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_vm,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
+
+ polymorphicStructures->list[currentIndex].set(*m_vm, m_codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
}
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
{
// regT0 holds a JSCell*
Jump failureCases1 = checkStructure(regT0, structure);
// Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(prototypeStructure), regT3);
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3);
-#else
- Jump failureCases2 = branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
+ Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3);
bool needsStubLink = false;
+ bool isDirect = false;
if (slot.cachedPropertyType() == PropertySlot::Getter) {
needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+ }
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
}
}
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
patchBuffer.link(failureCases1, lastProtoBegin);
- patchBuffer.link(failureCases2, lastProtoBegin);
+ if (failureCases2.isSet())
+ patchBuffer.link(failureCases2, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
-
- structure->ref();
- prototypeStructure->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, prototypeStructure);
+ RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline get_by_id proto list stub for %s, return point %p",
+ toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_vm,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
+
+ prototypeStructures->list[currentIndex].set(callFrame->vm(), m_codeBlock->ownerExecutable(), stubRoutine, structure, prototypeStructure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
}
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
{
// regT0 holds a JSCell*
ASSERT(count);
bucketsOfFail.append(checkStructure(regT0, structure));
Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
+ WriteBarrier<Structure>* it = chain->head();
JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
+ for (unsigned i = 0; i < count; ++i, ++it) {
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail, stubInfo);
}
ASSERT(protoObject);
bool needsStubLink = false;
+ bool isDirect = false;
if (slot.cachedPropertyType() == PropertySlot::Getter) {
needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
+ }
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
}
}
// Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
patchBuffer.link(bucketsOfFail, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
-
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
+
+ RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline get_by_id chain list stub for %s, return point %p",
+ toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_vm,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
// Track the stub we have created so that it will be deleted later.
- structure->ref();
- chain->ref();
- prototypeStructures->list[currentIndex].set(entryLabel, structure, chain);
+ prototypeStructures->list[currentIndex].set(callFrame->vm(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
}
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
// regT0 holds a JSCell*
ASSERT(count);
bucketsOfFail.append(checkStructure(regT0, structure));
Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
+ WriteBarrier<Structure>* it = chain->head();
JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
+ for (unsigned i = 0; i < count; ++i, ++it) {
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if CPU(X86_64)
- move(ImmPtr(currStructure), regT3);
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), regT3));
-#else
- bucketsOfFail.append(branchPtr(NotEqual, AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail, stubInfo);
}
ASSERT(protoObject);
bool needsStubLink = false;
if (slot.cachedPropertyType() == PropertySlot::Getter) {
needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT2, regT1, cachedOffset);
+ compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
stubCall.addArgument(regT1);
stubCall.addArgument(regT0);
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else if (slot.cachedPropertyType() == PropertySlot::Custom) {
needsStubLink = true;
JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(ImmPtr(protoObject));
- stubCall.addArgument(ImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(ImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(ImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
stubCall.call();
} else
- compileGetDirectOffset(protoObject, regT2, regT1, regT0, cachedOffset);
+ compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
Jump success = jump();
- LinkBuffer patchBuffer(this, m_codeBlock->executablePool(), 0);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
if (needsStubLink) {
for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
if (iter->to)
}
}
// Use the patch information to link the failure cases back to the original slow case routine.
- patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-patchOffsetGetByIdSlowCaseCall));
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(patchOffsetGetByIdPutResult));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
// Track the stub we have created so that it will be deleted later.
- CodeLocationLabel entryLabel = patchBuffer.finalizeCodeAddendum();
- stubInfo->stubRoutine = entryLabel;
+ RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline get_by_id chain stub for %s, return point %p",
+ toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
+ stubInfo->patch.baseline.u.get.putResult).executableAddress())),
+ *m_vm,
+ m_codeBlock->ownerExecutable(),
+ needsStubLink);
+ stubInfo->stubRoutine = stubRoutine;
// Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(patchOffsetGetByIdBranchToSlowCase);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, entryLabel);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
-/* ------------------------------ END: !ENABLE / ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS) ------------------------------ */
-
-#endif // !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
-
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID structure, RegisterID offset)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode finalObjectMode)
{
- ASSERT(sizeof(((Structure*)0)->m_propertyStorageCapacity) == sizeof(int32_t));
- ASSERT(sizeof(JSObject::inlineStorageCapacity) == sizeof(int32_t));
ASSERT(sizeof(JSValue) == 8);
- Jump notUsingInlineStorage = branch32(NotEqual, Address(structure, OBJECT_OFFSETOF(Structure, m_propertyStorageCapacity)), Imm32(JSObject::inlineStorageCapacity));
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSObject, m_inlineStorage)+OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
- Jump finishedLoad = jump();
- notUsingInlineStorage.link(this);
- loadPtr(Address(base, OBJECT_OFFSETOF(JSObject, m_externalStorage)), base);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
- finishedLoad.link(this);
+ if (finalObjectMode == MayBeFinal) {
+ Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
+ loadPtr(Address(base, JSObject::butterflyOffset()), base);
+ neg32(offset);
+ Jump done = jump();
+ isInline.link(this);
+ addPtr(TrustedImmPtr(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base);
+ done.link(this);
+ } else {
+#if !ASSERT_DISABLED
+ Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
+ breakpoint();
+ isOutOfLine.link(this);
+#endif
+ loadPtr(Address(base, JSObject::butterflyOffset()), base);
+ neg32(offset);
+ }
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultPayload);
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultTag);
}
void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
emitLoadPayload(iter, regT1);
// Test base's structure
- loadPtr(Address(regT2, OBJECT_OFFSETOF(JSCell, m_structure)), regT0);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
load32(addressFor(i), regT3);
- sub32(Imm32(1), regT3);
+ sub32(TrustedImm32(1), regT3);
addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
- compileGetDirectOffset(regT2, regT1, regT0, regT0, regT3);
+ Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
+ add32(TrustedImm32(firstOutOfLineOffset), regT3);
+ sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
+ inlineProperty.link(this);
+ compileGetDirectOffset(regT2, regT1, regT0, regT3);
emitStore(dst, regT1, regT0);
- map(m_bytecodeIndex + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
}
void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_get_by_val);
+ JITStubCall stubCall(this, cti_op_get_by_val_generic);
stubCall.addArgument(base);
stubCall.addArgument(property);
stubCall.call(dst);
}
-} // namespace JSC
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int skip = currentInstruction[3].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
+ loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
-#endif // ENABLE(JIT)
+ loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
+
+ emitLoad(index, regT1, regT0, regT2);
+ emitValueProfilingSite();
+ emitStore(dst, regT1, regT0);
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int index = currentInstruction[1].u.operand;
+ int skip = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
-#endif // ENABLE(JSVALUE32_64)
+ emitLoad(value, regT1, regT0);
+
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
+ loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
+ loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT3);
+ emitStore(index, regT1, regT0, regT3);
+ emitWriteBarrier(regT2, regT1, regT0, regT1, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+}
+
+void JIT::emit_op_init_global_const(Instruction* currentInstruction)
+{
+ WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer;
+ int value = currentInstruction[2].u.operand;
+
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+
+ emitLoad(value, regT1, regT0);
+
+ if (Heap::isWriteBarrierEnabled()) {
+ move(TrustedImmPtr(globalObject), regT2);
+
+ emitWriteBarrier(globalObject, regT1, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ }
+
+ store32(regT1, registerPointer->tagPointer());
+ store32(regT0, registerPointer->payloadPointer());
+ map(m_bytecodeOffset + OPCODE_LENGTH(op_init_global_const), value, regT1, regT0);
+}
+
+void JIT::emit_op_init_global_const_check(Instruction* currentInstruction)
+{
+ WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer;
+ int value = currentInstruction[2].u.operand;
+
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+
+ emitLoad(value, regT1, regT0);
+
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(currentInstruction[3].u.predicatePointer)));
+
+ if (Heap::isWriteBarrierEnabled()) {
+ move(TrustedImmPtr(globalObject), regT2);
+ emitWriteBarrier(globalObject, regT1, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ }
+
+ store32(regT1, registerPointer->tagPointer());
+ store32(regT0, registerPointer->payloadPointer());
+ unmap();
+}
+
+void JIT::emitSlow_op_init_global_const_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_init_global_const_check);
+ stubCall.addArgument(regT1, regT0);
+ stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand));
+ stubCall.call();
+}
+
+void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+{
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(unusedPointer));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel1), 0);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel2), 0);
+ repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
+}
+
+void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+{
+ if (isDirectPutById(stubInfo))
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
+ else
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(unusedPointer));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel1), 0);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel2), 0);
+}
+
+} // namespace JSC
+
+#endif // USE(JSVALUE32_64)
+#endif // ENABLE(JIT)