/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
*/
#include "config.h"
-#include "JIT.h"
#if ENABLE(JIT)
+#include "JIT.h"
#include "CodeBlock.h"
+#include "GetterSetter.h"
#include "JITInlineMethods.h"
+#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
#include "Interpreter.h"
+#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
using namespace std;
namespace JSC {
+#if USE(JSVALUE64)
+
+JIT::CodeRef JIT::stringGetByValStubGenerator(JSGlobalData* globalData)
+{
+ JSInterfaceJIT jit;
+ JumpList failures;
+ failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info)));
-#if !ENABLE(JIT_OPTIMIZE_PROPERTY_ACCESS)
+ // Load string length to regT2, and start the process of loading the data pointer into regT0
+ jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
+ jit.loadPtr(Address(regT0, ThunkHelpers::jsStringValueOffset()), regT0);
+ failures.append(jit.branchTest32(Zero, regT0));
-void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned)
+ // Do an unsigned compare to simultaneously filter negative indices as well as indices that are too large
+ failures.append(jit.branch32(AboveOrEqual, regT1, regT2));
+
+ // Load the character
+ JumpList is16Bit;
+ JumpList cont8Bit;
+ // Load the string flags
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplFlagsOffset()), regT2);
+ jit.loadPtr(Address(regT0, ThunkHelpers::stringImplDataOffset()), regT0);
+ is16Bit.append(jit.branchTest32(Zero, regT2, TrustedImm32(ThunkHelpers::stringImpl8BitFlag())));
+ jit.load8(BaseIndex(regT0, regT1, TimesOne, 0), regT0);
+ cont8Bit.append(jit.jump());
+ is16Bit.link(&jit);
+ jit.load16(BaseIndex(regT0, regT1, TimesTwo, 0), regT0);
+ cont8Bit.link(&jit);
+
+ failures.append(jit.branch32(AboveOrEqual, regT0, TrustedImm32(0x100)));
+ jit.move(TrustedImmPtr(globalData->smallStrings.singleCharacterStrings()), regT1);
+ jit.loadPtr(BaseIndex(regT1, regT0, ScalePtr, 0), regT0);
+ jit.ret();
+
+ failures.link(&jit);
+ jit.move(TrustedImm32(0), regT0);
+ jit.ret();
+
+ LinkBuffer patchBuffer(*globalData, &jit, GLOBAL_THUNK_ID);
+ return patchBuffer.finalizeCode();
+}
+
+void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
- // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
- // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
- // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
- // to jump back to if one of these trampolies finds a match.
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
- emitGetVirtualRegister(baseVReg, X86::eax);
+ emitGetVirtualRegisters(base, regT0, property, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArgConstant(ident, 2);
- emitCTICall(Interpreter::cti_op_get_by_id_generic);
- emitPutVirtualRegister(resultVReg);
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+ // number was signed since m_vectorLength is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(regT1, regT1);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info)));
+
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
+
+ loadPtr(BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])), regT0);
+ addSlowCase(branchTestPtr(Zero, regT0));
+
+ emitValueProfilingSite();
+ emitPutVirtualRegister(dst);
}
+void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ Jump nonCell = jump();
+ linkSlowCase(iter); // base array check
+ Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSString::s_info));
+ emitNakedCall(CodeLocationLabel(m_globalData->getCTIStub(stringGetByValStubGenerator).code()));
+ Jump failed = branchTestPtr(Zero, regT0);
+ emitPutVirtualRegister(dst, regT0);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_get_by_val));
+ failed.link(this);
+ notString.link(this);
+ nonCell.link(this);
+
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+
+ emitValueProfilingSite();
+}
-void JIT::compileGetByIdSlowCase(int, int, Identifier*, Vector<SlowCaseEntry>::iterator&, unsigned)
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch)
{
- ASSERT_NOT_REACHED();
+ loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), scratch);
+ loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result);
}
-void JIT::compilePutByIdHotPath(int baseVReg, Identifier* ident, int valueVReg, unsigned)
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
{
- // In order to be able to patch both the Structure, and the object offset, we store one pointer,
- // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
- // such that the Structure & offset are always at the same distance from this.
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
+ unsigned expected = currentInstruction[4].u.operand;
+ unsigned iter = currentInstruction[5].u.operand;
+ unsigned i = currentInstruction[6].u.operand;
+
+ emitGetVirtualRegister(property, regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, addressFor(expected)));
+ emitGetVirtualRegisters(base, regT0, iter, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ // Test base's structure
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+ load32(addressFor(i), regT3);
+ sub32(TrustedImm32(1), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ compileGetDirectOffset(regT0, regT0, regT3, regT1);
+
+ emitPutVirtualRegister(dst, regT0);
+}
- emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx);
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned dst = currentInstruction[1].u.operand;
+ unsigned base = currentInstruction[2].u.operand;
+ unsigned property = currentInstruction[3].u.operand;
- emitPutJITStubArgConstant(ident, 2);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 3);
- emitCTICall(Interpreter::cti_op_put_by_id_generic);
+ linkSlowCase(iter);
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITStubCall stubCall(this, cti_op_get_by_val);
+ stubCall.addArgument(base, regT2);
+ stubCall.addArgument(property, regT2);
+ stubCall.call(dst);
+}
+
+void JIT::emit_op_put_by_val(Instruction* currentInstruction)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(base, regT0, property, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ // See comment in op_get_by_val.
+ zeroExtend32ToPtr(regT1, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info)));
+ addSlowCase(branch32(AboveOrEqual, regT1, Address(regT0, JSArray::vectorLengthOffset())));
+
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT2);
+ Jump empty = branchTestPtr(Zero, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+
+ Label storeResult(this);
+ emitGetVirtualRegister(value, regT3);
+ storePtr(regT3, BaseIndex(regT2, regT1, ScalePtr, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ Jump end = jump();
+
+ empty.link(this);
+ add32(TrustedImm32(1), Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_numValuesInVector)));
+ branch32(Below, regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length))).linkTo(storeResult, this);
+
+ add32(TrustedImm32(1), regT1);
+ store32(regT1, Address(regT2, OBJECT_OFFSETOF(ArrayStorage, m_length)));
+ sub32(TrustedImm32(1), regT1);
+ jump().linkTo(storeResult, this);
+
+ end.link(this);
+
+ emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
+}
+
+void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned base = currentInstruction[1].u.operand;
+ unsigned property = currentInstruction[2].u.operand;
+ unsigned value = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base not array check
+ linkSlowCase(iter); // in vector check
+
+ JITStubCall stubPutByValCall(this, cti_op_put_by_val);
+ stubPutByValCall.addArgument(regT0);
+ stubPutByValCall.addArgument(property, regT2);
+ stubPutByValCall.addArgument(value, regT2);
+ stubPutByValCall.call();
}
-void JIT::compilePutByIdSlowCase(int, Identifier*, int, Vector<SlowCaseEntry>::iterator&, unsigned)
+void JIT::emit_op_put_by_index(Instruction* currentInstruction)
{
- ASSERT_NOT_REACHED();
+ JITStubCall stubCall(this, cti_op_put_by_index);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.call();
}
-#else
+void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_put_getter_setter);
+ stubCall.addArgument(currentInstruction[1].u.operand, regT2);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
+ stubCall.addArgument(currentInstruction[3].u.operand, regT2);
+ stubCall.addArgument(currentInstruction[4].u.operand, regT2);
+ stubCall.call();
+}
-void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex)
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ JITStubCall stubCall(this, cti_op_del_by_id);
+ stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
+ stubCall.call(currentInstruction[1].u.operand);
+}
+
+void JIT::emit_op_method_check(Instruction* currentInstruction)
+{
+ // Assert that the following instruction is a get_by_id.
+ ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id);
+
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+
+ // Do the method check - check the object & its prototype's structure inline (this is the common case).
+ m_methodCallCompilationInfo.append(MethodCallCompilationInfo(m_bytecodeOffset, m_propertyAccessCompilationInfo.size()));
+ MethodCallCompilationInfo& info = m_methodCallCompilationInfo.last();
+
+ Jump notCell = emitJumpIfNotJSCell(regT0);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ Jump structureCheck = branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), info.structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ DataLabelPtr protoStructureToCompare, protoObj = moveWithPatch(TrustedImmPtr(0), regT1);
+ Jump protoStructureCheck = branchPtrWithPatch(NotEqual, Address(regT1, JSCell::structureOffset()), protoStructureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+
+ // This will be relinked to load the function without doing a load.
+ DataLabelPtr putFunction = moveWithPatch(TrustedImmPtr(0), regT0);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceMethodCheck);
+
+ Jump match = jump();
+
+ // Link the failure cases here.
+ notCell.link(this);
+ structureCheck.link(this);
+ protoStructureCheck.link(this);
+
+ // Do a regular(ish) get_by_id (the slow case will be link to
+ // cti_op_get_by_id_method_check instead of cti_op_get_by_id.
+ compileGetByIdHotPath(baseVReg, ident);
+
+ match.link(this);
+ emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check));
+ emitPutVirtualRegister(resultVReg);
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+
+ m_propertyAccessCompilationInfo.last().addMethodCheckInfo(info.structureToCompare, protoObj, protoStructureToCompare, putFunction);
+}
+
+void JIT::emitSlow_op_method_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ currentInstruction += OPCODE_LENGTH(op_method_check);
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, true);
+ emitValueProfilingSite(m_bytecodeOffset + OPCODE_LENGTH(op_method_check));
+
+ // We've already generated the following get_by_id, so make sure it's skipped over.
+ m_bytecodeOffset += OPCODE_LENGTH(op_get_by_id);
+}
+
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+ compileGetByIdHotPath(baseVReg, ident);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(resultVReg);
+}
+
+void JIT::compileGetByIdHotPath(int baseVReg, Identifier*)
{
// As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
// Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
// to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
// to jump back to if one of these trampolies finds a match.
- emitGetVirtualRegister(baseVReg, X86::eax);
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
- emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg);
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
DataLabelPtr structureToCompare;
- Jump structureCheck = jnePtrWithPatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
+ PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
addSlowCase(structureCheck);
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetGetByIdStructure);
- ASSERT(differenceBetween(hotPathBegin, structureCheck) == patchOffsetGetByIdBranchToSlowCase);
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- DataLabel32 displacementLabel = loadPtrWithAddressOffsetPatch(Address(X86::eax, patchGetByIdDefaultOffset), X86::eax);
- ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetGetByIdPropertyMapOffset);
+ loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT0);
+ DataLabelCompact displacementLabel = loadPtrWithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
Label putResult(this);
- ASSERT(differenceBetween(hotPathBegin, putResult) == patchOffsetGetByIdPutResult);
- emitPutVirtualRegister(resultVReg);
+
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, displacementLabel, putResult));
}
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ unsigned resultVReg = currentInstruction[1].u.operand;
+ unsigned baseVReg = currentInstruction[2].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter, false);
+ emitValueProfilingSite();
+}
-void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
+void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck)
{
// As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
// so that we only need track one pointer into the slow case code - we track a pointer to the location
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
-#ifndef NDEBUG
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+
Label coldPathBegin(this);
-#endif
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArgConstant(ident, 2);
- Jump call = emitCTICall(Interpreter::cti_op_get_by_id);
- emitPutVirtualRegister(resultVReg);
+ JITStubCall stubCall(this, isMethodCheck ? cti_op_get_by_id_method_check : cti_op_get_by_id);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ Call call = stubCall.call(resultVReg);
- ASSERT(differenceBetween(coldPathBegin, call) == patchOffsetGetByIdSlowCaseCall);
+ END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
// Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubGetById, coldPathBegin, call);
}
-void JIT::compilePutByIdHotPath(int baseVReg, Identifier*, int valueVReg, unsigned propertyAccessInstructionIndex)
+void JIT::emit_op_put_by_id(Instruction* currentInstruction)
{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ unsigned valueVReg = currentInstruction[3].u.operand;
+
// In order to be able to patch both the Structure, and the object offset, we store one pointer,
// to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
// such that the Structure & offset are always at the same distance from this.
- emitGetVirtualRegisters(baseVReg, X86::eax, valueVReg, X86::edx);
+ emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
// Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
- emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg);
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+
+ BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
Label hotPathBegin(this);
- m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin;
// It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
DataLabelPtr structureToCompare;
- addSlowCase(jnePtrWithPatch(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), structureToCompare, ImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
- ASSERT(differenceBetween(hotPathBegin, structureToCompare) == patchOffsetPutByIdStructure);
+ addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
+
+ loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2);
+ DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
- // Plant a load from a bogus ofset in the object's property map; we will patch this later, if it is to be used.
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(X86::edx, Address(X86::eax, patchGetByIdDefaultOffset));
- ASSERT(differenceBetween(hotPathBegin, displacementLabel) == patchOffsetPutByIdPropertyMapOffset);
+ END_UNINTERRUPTED_SEQUENCE(sequencePutById);
+
+ emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
+
+ m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, displacementLabel));
}
-void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex)
+void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ unsigned baseVReg = currentInstruction[1].u.operand;
+ Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
+ unsigned direct = currentInstruction[8].u.operand;
+
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
- emitPutJITStubArgConstant(ident, 2);
- emitPutJITStubArg(X86::eax, 1);
- emitPutJITStubArg(X86::edx, 3);
- Jump call = emitCTICall(Interpreter::cti_op_put_by_id);
+ JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(ident));
+ stubCall.addArgument(regT1);
+ Call call = stubCall.call();
// Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call;
+ m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubPutById, call);
}
-static JSObject* resizePropertyStorage(JSObject* baseObject, int32_t oldSize, int32_t newSize)
+// Compile a store into an object's property storage. May overwrite the
+// value in objectReg.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, size_t cachedOffset)
{
- baseObject->allocatePropertyStorage(oldSize, newSize);
- return baseObject;
+ int offset = cachedOffset * sizeof(JSValue);
+ loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base);
+ storePtr(value, Address(base, offset));
}
-static inline bool transitionWillNeedStorageRealloc(Structure* oldStructure, Structure* newStructure)
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, size_t cachedOffset)
{
- return oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ int offset = cachedOffset * sizeof(JSValue);
+ loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), result);
+ loadPtr(Address(result, offset), result);
}
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, void* returnAddress)
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset)
+{
+ loadPtr(base->addressOfPropertyStorage(), result);
+ loadPtr(Address(result, cachedOffset * sizeof(WriteBarrier<Unknown>)), result);
+}
+
+void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
{
JumpList failureCases;
// Check eax is an object of the right Structure.
- failureCases.append(emitJumpIfNotJSCell(X86::eax));
- failureCases.append(jnePtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(oldStructure)));
- JumpList successCases;
-
- // ecx = baseObject
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
- // proto(ecx) = baseObject->structure()->prototype()
- failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
-
- loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx);
+ failureCases.append(emitJumpIfNotJSCell(regT0));
+ failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
+
+ testPrototype(oldStructure->storedPrototype(), failureCases);
+ ASSERT(oldStructure->storedPrototype().isNull() || oldStructure->storedPrototype().asCell()->structure() == chain->head()->get());
+
// ecx = baseObject->m_structure
- for (RefPtr<Structure>* it = chain->head(); *it; ++it) {
- // null check the prototype
- successCases.append(jePtr(X86::ecx, ImmPtr(JSValuePtr::encode(jsNull()))));
-
- // Check the structure id
- failureCases.append(jnePtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), ImmPtr(it->get())));
-
- loadPtr(Address(X86::ecx, FIELD_OFFSET(JSCell, m_structure)), X86::ecx);
- failureCases.append(jne32(Address(X86::ecx, FIELD_OFFSET(Structure, m_typeInfo) + FIELD_OFFSET(TypeInfo, m_type)), Imm32(ObjectType)));
- loadPtr(Address(X86::ecx, FIELD_OFFSET(Structure, m_prototype)), X86::ecx);
+ if (!direct) {
+ for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
+ ASSERT((*it)->storedPrototype().isNull() || (*it)->storedPrototype().asCell()->structure() == it[1].get());
+ testPrototype((*it)->storedPrototype(), failureCases);
+ }
}
- successCases.link(this);
-
- Jump callTarget;
-
- // emit a call only if storage realloc is needed
- if (transitionWillNeedStorageRealloc(oldStructure, newStructure)) {
- pop(X86::ebx);
-#if PLATFORM(X86_64)
- move(Imm32(newStructure->propertyStorageCapacity()), X86::edx);
- move(Imm32(oldStructure->propertyStorageCapacity()), X86::esi);
- move(X86::eax, X86::edi);
- callTarget = call();
-#else
- push(Imm32(newStructure->propertyStorageCapacity()));
- push(Imm32(oldStructure->propertyStorageCapacity()));
- push(X86::eax);
- callTarget = call();
- addPtr(Imm32(3 * sizeof(void*)), X86::esp);
+ // If we succeed in all of our checks, and the code was optimizable, then make sure we
+ // decrement the rare case counter.
+#if ENABLE(VALUE_PROFILER)
+ if (m_codeBlock->canCompileWithDFG()) {
+ sub32(
+ TrustedImm32(1),
+ AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
+ }
#endif
- emitGetJITStubArg(3, X86::edx);
- push(X86::ebx);
+
+ // emit a call only if storage realloc is needed
+ bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity();
+ if (willNeedStorageRealloc) {
+ // This trampoline was called to like a JIT stub; before we can can call again we need to
+ // remove the return address from the stack, to prevent the stack from becoming misaligned.
+ preserveReturnAddressAfterCall(regT3);
+
+ JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
+ stubCall.skipArgument(); // base
+ stubCall.skipArgument(); // ident
+ stubCall.skipArgument(); // value
+ stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity()));
+ stubCall.addArgument(TrustedImmPtr(newStructure));
+ stubCall.call(regT0);
+ emitGetJITStubArg(2, regT1);
+
+ restoreReturnAddressBeforeReturn(regT3);
}
- // Assumes m_refCount can be decremented easily, refcount decrement is safe as
- // codeblock should ensure oldStructure->m_refCount > 0
- sub32(Imm32(1), AbsoluteAddress(oldStructure->addressOfCount()));
- add32(Imm32(1), AbsoluteAddress(newStructure->addressOfCount()));
- storePtr(ImmPtr(newStructure), Address(X86::eax, FIELD_OFFSET(JSCell, m_structure)));
+ // Planting the new structure triggers the write barrier so we need
+ // an unconditional barrier here.
+ emitWriteBarrier(regT0, regT1, regT2, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
- // write the value
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValuePtr)));
+ ASSERT(newStructure->classInfo() == oldStructure->classInfo());
+ storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
+ compilePutDirectOffset(regT0, regT1, cachedOffset);
ret();
- Jump failureJump;
- bool plantedFailureJump = false;
- if (!failureCases.empty()) {
- failureCases.link(this);
- restoreArgumentReferenceForTrampoline();
- failureJump = jump();
- plantedFailureJump = true;
- }
+ ASSERT(!failureCases.empty());
+ failureCases.link(this);
+ restoreArgumentReferenceForTrampoline();
+ Call failureCall = tailRecursiveCall();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
- if (plantedFailureJump)
- patchBuffer.link(failureJump, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
+ patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
- if (transitionWillNeedStorageRealloc(oldStructure, newStructure))
- patchBuffer.link(callTarget, reinterpret_cast<void*>(resizePropertyStorage));
-
- stubInfo->stubRoutine = code;
+ if (willNeedStorageRealloc) {
+ ASSERT(m_calls.size() == 1);
+ patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+ }
- Jump::patch(returnAddress, code);
+ stubInfo->stubRoutine = patchBuffer.finalizeCode();
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code()));
}
-void JIT::patchGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress)
{
+ RepatchBuffer repatchBuffer(codeBlock);
+
// We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
- // Should probably go to Interpreter::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
- Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
+ // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+
+ int offset = sizeof(JSValue) * cachedOffset;
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- void* structureAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdStructure);
- void* displacementAddress = reinterpret_cast<void*>(reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPropertyMapOffset);
- DataLabelPtr::patch(structureAddress, structure);
- DataLabel32::patch(displacementAddress, cachedOffset * sizeof(JSValuePtr));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offset);
}
-void JIT::patchPutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct)
{
+ RepatchBuffer repatchBuffer(codeBlock);
+
// We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- // Should probably go to Interpreter::cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_generic));
+ // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
+
+ int offset = sizeof(JSValue) * cachedOffset;
// Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- void* structureAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetPutByIdStructure;
- void* displacementAddress = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetPutByIdPropertyMapOffset;
- DataLabelPtr::patch(structureAddress, structure);
- DataLabel32::patch(displacementAddress, cachedOffset * sizeof(JSValuePtr));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offset);
}
-void JIT::privateCompilePatchGetArrayLength(void* returnAddress)
+void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
{
StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_array_fail));
-
// Check eax is an array
- Jump failureCases1 = jnePtr(Address(X86::eax), ImmPtr(m_interpreter->m_jsArrayVptr));
+ Jump failureCases1 = branchPtr(NotEqual, Address(regT0, JSCell::classInfoOffset()), TrustedImmPtr(&JSArray::s_info));
// Checks out okay! - get the length from the storage
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSArray, m_storage)), X86::ecx);
- load32(Address(X86::ecx, FIELD_OFFSET(ArrayStorage, m_length)), X86::ecx);
-
- Jump failureCases2 = ja32(X86::ecx, Imm32(JSImmediate::maxImmediateInt));
+ loadPtr(Address(regT0, JSArray::storageOffset()), regT3);
+ load32(Address(regT3, OBJECT_OFFSETOF(ArrayStorage, m_length)), regT2);
+ Jump failureCases2 = branch32(LessThan, regT2, TrustedImm32(0));
- emitFastArithIntToImmNoCheck(X86::ecx, X86::eax);
+ emitFastArithIntToImmNoCheck(regT2, regT0);
Jump success = jump();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
- void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
patchBuffer.link(failureCases1, slowCaseBegin);
patchBuffer.link(failureCases2, slowCaseBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- void* hotPathPutResult = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, hotPathPutResult);
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
// Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = code;
-
- // Finally patch the jump to sow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
-}
+ stubInfo->stubRoutine = patchBuffer.finalizeCode();
-void JIT::privateCompileGetByIdSelf(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
-{
- // Check eax is an object of the right Structure.
- Jump failureCases1 = emitJumpIfNotJSCell(X86::eax);
- Jump failureCases2 = checkStructure(X86::eax, structure);
-
- // Checks out okay! - getDirectOffset
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValuePtr)), X86::eax);
- ret();
-
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
-
- patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
- patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_self_fail));
-
- stubInfo->stubRoutine = code;
+ // Finally patch the jump to slow case back in the hot path to jump here instead.
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
- Jump::patch(returnAddress, code);
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
}
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
-#if USE(CTI_REPATCH_PIC)
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
-
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(static_cast<void*>(protoPropertyStorage), X86::edx);
// Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(X86::eax, structure);
+ Jump failureCases1 = checkStructure(regT0, structure);
// Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
- move(ImmPtr(prototypeStructure), X86::ebx);
- Jump failureCases2 = jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress));
-#else
- Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
- // Checks out okay! - getDirectOffset
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ move(TrustedImmPtr(protoObject), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
+ bool needsStubLink = false;
+
+ // Checks out okay!
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
-
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
// Use the patch information to link the failure cases back to the original slow case routine.
- void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
+ CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
patchBuffer.link(failureCases1, slowCaseBegin);
patchBuffer.link(failureCases2, slowCaseBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
// Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = code;
+ stubInfo->stubRoutine = patchBuffer.finalizeCode();
// Finally patch the jump to slow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
-#else
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, X86::edx);
-
- // Check eax is an object of the right Structure.
- Jump failureCases1 = emitJumpIfNotJSCell(X86::eax);
- Jump failureCases2 = checkStructure(X86::eax, structure);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code()));
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
- Jump failureCases3 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-
- // Checks out okay! - getDirectOffset
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
-
- ret();
-
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
-
- patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
- patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
- patchBuffer.link(failureCases3, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
-
- stubInfo->stubRoutine = code;
-
- Jump::patch(returnAddress, code);
-#endif
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
}
-#if USE(CTI_REPATCH_PIC)
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, size_t cachedOffset)
+void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset)
{
- Jump failureCase = checkStructure(X86::eax, structure);
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- loadPtr(Address(X86::eax, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ Jump failureCase = checkStructure(regT0, structure);
+ bool needsStubLink = false;
+ bool isDirect = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(regT0, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(regT0, regT0, cachedOffset);
+ }
Jump success = jump();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- ASSERT(code);
- PatchBuffer patchBuffer(code);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
// Use the patch information to link the failure cases back to the original slow case routine.
- void* lastProtoBegin = polymorphicStructures->list[currentIndex - 1].stubRoutine;
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code());
if (!lastProtoBegin)
- lastProtoBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
+ lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
patchBuffer.link(failureCase, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
- structure->ref();
- polymorphicStructures->list[currentIndex].set(code, structure);
+ MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode();
+
+ polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
}
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
{
// The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
// referencing the prototype object - let's speculatively load it's table nice and early!)
JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, X86::edx);
// Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(X86::eax, structure);
+ Jump failureCases1 = checkStructure(regT0, structure);
// Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
- move(ImmPtr(prototypeStructure), X86::ebx);
- Jump failureCases2 = jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress));
-#else
- Jump failureCases2 = jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(prototypeStructure));
-#endif
-
- // Checks out okay! - getDirectOffset
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ move(TrustedImmPtr(protoObject), regT3);
+ Jump failureCases2 = branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototypeStructure));
+
+ // Checks out okay!
+ bool needsStubLink = false;
+ bool isDirect = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ }
Jump success = jump();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
// Use the patch information to link the failure cases back to the original slow case routine.
- void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
patchBuffer.link(failureCases1, lastProtoBegin);
patchBuffer.link(failureCases2, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
- structure->ref();
- prototypeStructure->ref();
- prototypeStructures->list[currentIndex].set(code, structure, prototypeStructure);
+ MacroAssemblerCodeRef stubCode = patchBuffer.finalizeCode();
+ prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code()));
}
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame)
{
ASSERT(count);
-
JumpList bucketsOfFail;
// Check eax is an object of the right Structure.
- Jump baseObjectCheck = checkStructure(X86::eax, structure);
+ Jump baseObjectCheck = checkStructure(regT0, structure);
bucketsOfFail.append(baseObjectCheck);
Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
+ WriteBarrier<Structure>* it = chain->head();
JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
+ for (unsigned i = 0; i < count; ++i, ++it) {
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
- move(ImmPtr(currStructure), X86::ebx);
- bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)));
-#else
- bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
}
ASSERT(protoObject);
-
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, X86::edx);
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+
+ bool needsStubLink = false;
+ bool isDirect = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else {
+ isDirect = true;
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
+ }
Jump success = jump();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
+
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
// Use the patch information to link the failure cases back to the original slow case routine.
- void* lastProtoBegin = prototypeStructures->list[currentIndex - 1].stubRoutine;
+ CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code());
patchBuffer.link(bucketsOfFail, lastProtoBegin);
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
+
+ CodeRef stubRoutine = patchBuffer.finalizeCode();
// Track the stub we have created so that it will be deleted later.
- structure->ref();
- chain->ref();
- prototypeStructures->list[currentIndex].set(code, structure, chain);
+ prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
// Finally patch the jump to slow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
}
-#endif
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, size_t cachedOffset, void* returnAddress, CallFrame* callFrame)
+void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
{
-#if USE(CTI_REPATCH_PIC)
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- Jump::patch(returnAddress, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_list));
-
ASSERT(count);
-
+
JumpList bucketsOfFail;
// Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(X86::eax, structure));
+ bucketsOfFail.append(checkStructure(regT0, structure));
Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
+ WriteBarrier<Structure>* it = chain->head();
JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
+ for (unsigned i = 0; i < count; ++i, ++it) {
protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
- move(ImmPtr(currStructure), X86::ebx);
- bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)));
-#else
- bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+ currStructure = it->get();
+ testPrototype(protoObject, bucketsOfFail);
}
ASSERT(protoObject);
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, X86::edx);
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
+ bool needsStubLink = false;
+ if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ needsStubLink = true;
+ compileGetDirectOffset(protoObject, regT1, cachedOffset);
+ JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
+ stubCall.addArgument(regT1);
+ stubCall.addArgument(regT0);
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
+ needsStubLink = true;
+ JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
+ stubCall.addArgument(TrustedImmPtr(protoObject));
+ stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
+ stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
+ stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
+ stubCall.call();
+ } else
+ compileGetDirectOffset(protoObject, regT0, cachedOffset);
Jump success = jump();
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
+ LinkBuffer patchBuffer(*m_globalData, this, m_codeBlock);
- // Use the patch information to link the failure cases back to the original slow case routine.
- void* slowCaseBegin = reinterpret_cast<char*>(stubInfo->callReturnLocation) - patchOffsetGetByIdSlowCaseCall;
+ if (needsStubLink) {
+ for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
+ if (iter->to)
+ patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ }
+ }
- patchBuffer.link(bucketsOfFail, slowCaseBegin);
+ // Use the patch information to link the failure cases back to the original slow case routine.
+ patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
// On success return back to the hot patch code, at a point it will perform the store to dest for us.
- intptr_t successDest = reinterpret_cast<intptr_t>(stubInfo->hotPathBegin) + patchOffsetGetByIdPutResult;
- patchBuffer.link(success, reinterpret_cast<void*>(successDest));
+ patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
// Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = code;
+ CodeRef stubRoutine = patchBuffer.finalizeCode();
+ stubInfo->stubRoutine = stubRoutine;
// Finally patch the jump to slow case back in the hot path to jump here instead.
- void* jumpLocation = reinterpret_cast<char*>(stubInfo->hotPathBegin) + patchOffsetGetByIdBranchToSlowCase;
- Jump::patch(jumpLocation, code);
-#else
- ASSERT(count);
-
- JumpList bucketsOfFail;
+ CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code()));
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(emitJumpIfNotJSCell(X86::eax));
- bucketsOfFail.append(checkStructure(X86::eax, structure));
+ // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+}
- Structure* currStructure = structure;
- RefPtr<Structure>* chainEntries = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = chainEntries[i].get();
-
- // Check the prototype object's Structure had not changed.
- Structure** prototypeStructureAddress = &(protoObject->m_structure);
-#if PLATFORM(X86_64)
- move(ImmPtr(currStructure), X86::ebx);
- bucketsOfFail.append(jnePtr(X86::ebx, AbsoluteAddress(prototypeStructureAddress)));
-#else
- bucketsOfFail.append(jnePtr(AbsoluteAddress(prototypeStructureAddress), ImmPtr(currStructure)));
-#endif
+void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[3].u.operand;
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT0);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+ activationNotCreated.link(this);
}
- ASSERT(protoObject);
+ while (skip--)
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, next)), regT0);
+
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(ScopeChainNode, object)), regT0);
+ loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
+ loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
- PropertyStorage* protoPropertyStorage = &protoObject->m_propertyStorage;
- loadPtr(protoPropertyStorage, X86::edx);
- loadPtr(Address(X86::edx, cachedOffset * sizeof(JSValuePtr)), X86::eax);
- ret();
+void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+{
+ int skip = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
+
+ emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT1);
+ bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
+ ASSERT(skip || !checkTopLevel);
+ if (checkTopLevel && skip--) {
+ Jump activationNotCreated;
+ if (checkTopLevel)
+ activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
+ activationNotCreated.link(this);
+ }
+ while (skip--)
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, next)), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(ScopeChainNode, object)), regT1);
+
+ emitWriteBarrier(regT1, regT0, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
+ loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
+ storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
+}
- patchBuffer.link(bucketsOfFail, reinterpret_cast<void*>(Interpreter::cti_op_get_by_id_proto_fail));
+void JIT::emit_op_get_global_var(Instruction* currentInstruction)
+{
+ JSVariableObject* globalObject = m_codeBlock->globalObject();
+ loadPtr(&globalObject->m_registers, regT0);
+ loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
- stubInfo->stubRoutine = code;
+void JIT::emit_op_put_global_var(Instruction* currentInstruction)
+{
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
- Jump::patch(returnAddress, code);
-#endif
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+
+ move(TrustedImmPtr(globalObject), regT1);
+ loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
+ storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
+ emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
}
-void JIT::privateCompilePutByIdReplace(StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, void* returnAddress)
+void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
{
- // Check eax is an object of the right Structure.
- Jump failureCases1 = emitJumpIfNotJSCell(X86::eax);
- Jump failureCases2 = checkStructure(X86::eax, structure);
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), 0);
+ repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
+}
- // checks out okay! - putDirectOffset
- loadPtr(Address(X86::eax, FIELD_OFFSET(JSObject, m_propertyStorage)), X86::eax);
- storePtr(X86::edx, Address(X86::eax, cachedOffset * sizeof(JSValuePtr)));
- ret();
+void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+{
+ if (isDirectPutById(stubInfo))
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
+ else
+ repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(-1));
+ repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.put.displacementLabel), 0);
+}
- void* code = m_assembler.executableCopy(m_codeBlock->executablePool());
- PatchBuffer patchBuffer(code);
-
- patchBuffer.link(failureCases1, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
- patchBuffer.link(failureCases2, reinterpret_cast<void*>(Interpreter::cti_op_put_by_id_fail));
+#endif // USE(JSVALUE64)
- stubInfo->stubRoutine = code;
+void JIT::emitWriteBarrier(RegisterID owner, RegisterID value, RegisterID scratch, RegisterID scratch2, WriteBarrierMode mode, WriteBarrierUseKind useKind)
+{
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(mode);
+ ASSERT(owner != scratch);
+ ASSERT(owner != scratch2);
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ emitCount(WriteBarrierCounters::jitCounterFor(useKind));
+#endif
- Jump::patch(returnAddress, code);
+#if ENABLE(GGC)
+ Jump filterCells;
+ if (mode == ShouldFilterImmediates)
+ filterCells = emitJumpIfNotJSCell(value);
+ move(owner, scratch);
+ andPtr(TrustedImm32(static_cast<int32_t>(MarkedBlock::blockMask)), scratch);
+ move(owner, scratch2);
+ // consume additional 8 bits as we're using an approximate filter
+ rshift32(TrustedImm32(MarkedBlock::atomShift + 8), scratch2);
+ andPtr(TrustedImm32(MarkedBlock::atomMask >> 8), scratch2);
+ Jump filter = branchTest8(Zero, BaseIndex(scratch, scratch2, TimesOne, MarkedBlock::offsetOfMarks()));
+ move(owner, scratch2);
+ rshift32(TrustedImm32(MarkedBlock::cardShift), scratch2);
+ andPtr(TrustedImm32(MarkedBlock::cardMask), scratch2);
+ store8(TrustedImm32(1), BaseIndex(scratch, scratch2, TimesOne, MarkedBlock::offsetOfCards()));
+ filter.link(this);
+ if (mode == ShouldFilterImmediates)
+ filterCells.link(this);
+#endif
}
+void JIT::emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode mode, WriteBarrierUseKind useKind)
+{
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch);
+ UNUSED_PARAM(useKind);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(mode);
+
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ emitCount(WriteBarrierCounters::jitCounterFor(useKind));
#endif
+
+#if ENABLE(GGC)
+ Jump filterCells;
+ if (mode == ShouldFilterImmediates)
+ filterCells = emitJumpIfNotJSCell(value);
+ uint8_t* cardAddress = Heap::addressOfCardFor(owner);
+ move(TrustedImmPtr(cardAddress), scratch);
+ store8(TrustedImm32(1), Address(scratch));
+ if (mode == ShouldFilterImmediates)
+ filterCells.link(this);
+#endif
+}
+
+void JIT::testPrototype(JSValue prototype, JumpList& failureCases)
+{
+ if (prototype.isNull())
+ return;
+
+ ASSERT(prototype.isCell());
+ move(TrustedImmPtr(prototype.asCell()), regT3);
+ failureCases.append(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), TrustedImmPtr(prototype.asCell()->structure())));
+}
+
+void JIT::patchMethodCallProto(JSGlobalData& globalData, CodeBlock* codeBlock, MethodCallLinkInfo& methodCallLinkInfo, StructureStubInfo& stubInfo, JSObject* callee, Structure* structure, JSObject* proto, ReturnAddressPtr returnAddress)
+{
+ RepatchBuffer repatchBuffer(codeBlock);
+
+ CodeLocationDataLabelPtr structureLocation = methodCallLinkInfo.cachedStructure.location();
+ methodCallLinkInfo.cachedStructure.set(globalData, structureLocation, codeBlock->ownerExecutable(), structure);
+
+ Structure* prototypeStructure = proto->structure();
+ methodCallLinkInfo.cachedPrototypeStructure.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckProtoStructureToCompare), codeBlock->ownerExecutable(), prototypeStructure);
+ methodCallLinkInfo.cachedPrototype.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckProtoObj), codeBlock->ownerExecutable(), proto);
+ methodCallLinkInfo.cachedFunction.set(globalData, structureLocation.dataLabelPtrAtOffset(stubInfo.patch.baseline.methodCheckPutFunction), codeBlock->ownerExecutable(), callee);
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_method_check_update));
+}
+
+bool JIT::isDirectPutById(StructureStubInfo* stubInfo)
+{
+ switch (stubInfo->accessType) {
+ case access_put_by_id_transition_normal:
+ return false;
+ case access_put_by_id_transition_direct:
+ return true;
+ case access_put_by_id_replace:
+ case access_put_by_id_generic: {
+ void* oldCall = MacroAssembler::readCallTarget(stubInfo->callReturnLocation).executableAddress();
+ if (oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_generic)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_fail))
+ return true;
+ ASSERT(oldCall == bitwise_cast<void*>(cti_op_put_by_id)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_generic)
+ || oldCall == bitwise_cast<void*>(cti_op_put_by_id_fail));
+ return false;
+ }
+ default:
+ ASSERT_NOT_REACHED();
+ return false;
+ }
+}
} // namespace JSC