X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/f9bf01c6616d5ddcf65b13b33cedf9e387ff7a63..cb9aa2694aba0ae4f946ed34b8e0f6c99c1cfe44:/assembler/AbstractMacroAssembler.h diff --git a/assembler/AbstractMacroAssembler.h b/assembler/AbstractMacroAssembler.h index 198e8d1..a209900 100644 --- a/assembler/AbstractMacroAssembler.h +++ b/assembler/AbstractMacroAssembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,23 +26,72 @@ #ifndef AbstractMacroAssembler_h #define AbstractMacroAssembler_h -#include - -#include -#include +#include "AbortReason.h" +#include "AssemblerBuffer.h" +#include "CodeLocation.h" +#include "MacroAssemblerCodeRef.h" +#include "Options.h" +#include "WeakRandom.h" +#include #include -#include #if ENABLE(ASSEMBLER) namespace JSC { +inline bool isARMv7s() +{ +#if CPU(APPLE_ARMV7S) + return true; +#else + return false; +#endif +} + +inline bool isARM64() +{ +#if CPU(ARM64) + return true; +#else + return false; +#endif +} + +inline bool isX86() +{ +#if CPU(X86_64) || CPU(X86) + return true; +#else + return false; +#endif +} + +inline bool optimizeForARMv7s() +{ + return isARMv7s() && Options::enableArchitectureSpecificOptimizations(); +} + +inline bool optimizeForARM64() +{ + return isARM64() && Options::enableArchitectureSpecificOptimizations(); +} + +inline bool optimizeForX86() +{ + return isX86() && Options::enableArchitectureSpecificOptimizations(); +} + class LinkBuffer; class RepatchBuffer; +class Watchpoint; +namespace DFG { +struct OSRExit; +} template class AbstractMacroAssembler { public: + friend class JITWriteBarrierBase; typedef AssemblerType AssemblerType_T; typedef MacroAssemblerCodePtr CodePtr; @@ -52,22 +101,31 @@ public: typedef typename AssemblerType::RegisterID RegisterID; typedef typename AssemblerType::FPRegisterID FPRegisterID; - typedef typename AssemblerType::JmpSrc JmpSrc; - typedef typename AssemblerType::JmpDst JmpDst; + + static RegisterID firstRegister() { return AssemblerType::firstRegister(); } + static RegisterID lastRegister() { return AssemblerType::lastRegister(); } + static FPRegisterID firstFPRegister() { return AssemblerType::firstFPRegister(); } + static FPRegisterID lastFPRegister() { return AssemblerType::lastFPRegister(); } // Section 1: MacroAssembler operand types // // The following types are used as operands to MacroAssembler operations, // describing immediate and memory operands to the instructions to be planted. - enum Scale { TimesOne, TimesTwo, TimesFour, TimesEight, }; + + static Scale timesPtr() + { + if (sizeof(void*) == 4) + return TimesFour; + return TimesEight; + } // Address: // @@ -78,11 +136,27 @@ public: , offset(offset) { } - + + Address withOffset(int32_t additionalOffset) + { + return Address(base, offset + additionalOffset); + } + RegisterID base; int32_t offset; }; + struct ExtendedAddress { + explicit ExtendedAddress(RegisterID base, intptr_t offset = 0) + : base(base) + , offset(offset) + { + } + + RegisterID base; + intptr_t offset; + }; + // ImplicitAddress: // // This class is used for explicit 'load' and 'store' operations @@ -137,71 +211,137 @@ public: // Describes an memory operand given by a pointer. For regular load & store // operations an unwrapped void* will be used, rather than using this. struct AbsoluteAddress { - explicit AbsoluteAddress(void* ptr) + explicit AbsoluteAddress(const void* ptr) : m_ptr(ptr) { } - void* m_ptr; + const void* m_ptr; }; - // ImmPtr: + // TrustedImmPtr: // // A pointer sized immediate operand to an instruction - this is wrapped // in a class requiring explicit construction in order to differentiate // from pointers used as absolute addresses to memory operations - struct ImmPtr { - explicit ImmPtr(void* value) + struct TrustedImmPtr { + TrustedImmPtr() { } + + explicit TrustedImmPtr(const void* value) : m_value(value) { } + + // This is only here so that TrustedImmPtr(0) does not confuse the C++ + // overload handling rules. + explicit TrustedImmPtr(int value) + : m_value(0) + { + ASSERT_UNUSED(value, !value); + } + + explicit TrustedImmPtr(size_t value) + : m_value(reinterpret_cast(value)) + { + } intptr_t asIntptr() { return reinterpret_cast(m_value); } - void* m_value; + const void* m_value; + }; + + struct ImmPtr : private TrustedImmPtr + { + explicit ImmPtr(const void* value) + : TrustedImmPtr(value) + { + } + + TrustedImmPtr asTrustedImmPtr() { return *this; } }; - // Imm32: + // TrustedImm32: // // A 32bit immediate operand to an instruction - this is wrapped in a // class requiring explicit construction in order to prevent RegisterIDs // (which are implemented as an enum) from accidentally being passed as // immediate values. - struct Imm32 { - explicit Imm32(int32_t value) + struct TrustedImm32 { + TrustedImm32() { } + + explicit TrustedImm32(int32_t value) : m_value(value) -#if CPU(ARM) - , m_isPointer(false) -#endif { } #if !CPU(X86_64) - explicit Imm32(ImmPtr ptr) + explicit TrustedImm32(TrustedImmPtr ptr) : m_value(ptr.asIntptr()) -#if CPU(ARM) - , m_isPointer(true) -#endif { } #endif int32_t m_value; -#if CPU(ARM) - // We rely on being able to regenerate code to recover exception handling - // information. Since ARMv7 supports 16-bit immediates there is a danger - // that if pointer values change the layout of the generated code will change. - // To avoid this problem, always generate pointers (and thus Imm32s constructed - // from ImmPtrs) with a code sequence that is able to represent any pointer - // value - don't use a more compact form in these cases. - bool m_isPointer; + }; + + + struct Imm32 : private TrustedImm32 { + explicit Imm32(int32_t value) + : TrustedImm32(value) + { + } +#if !CPU(X86_64) + explicit Imm32(TrustedImmPtr ptr) + : TrustedImm32(ptr) + { + } #endif + const TrustedImm32& asTrustedImm32() const { return *this; } + }; + + // TrustedImm64: + // + // A 64bit immediate operand to an instruction - this is wrapped in a + // class requiring explicit construction in order to prevent RegisterIDs + // (which are implemented as an enum) from accidentally being passed as + // immediate values. + struct TrustedImm64 { + TrustedImm64() { } + + explicit TrustedImm64(int64_t value) + : m_value(value) + { + } +#if CPU(X86_64) || CPU(ARM64) + explicit TrustedImm64(TrustedImmPtr ptr) + : m_value(ptr.asIntptr()) + { + } +#endif + int64_t m_value; + }; + + struct Imm64 : private TrustedImm64 + { + explicit Imm64(int64_t value) + : TrustedImm64(value) + { + } +#if CPU(X86_64) || CPU(ARM64) + explicit Imm64(TrustedImmPtr ptr) + : TrustedImm64(ptr) + { + } +#endif + const TrustedImm64& asTrustedImm64() const { return *this; } + }; + // Section 2: MacroAssembler code buffer handles // // The following types are used to reference items in the code buffer @@ -217,9 +357,11 @@ public: class Label { template friend class AbstractMacroAssembler; + friend struct DFG::OSRExit; friend class Jump; friend class MacroAssemblerCodeRef; friend class LinkBuffer; + friend class Watchpoint; public: Label() @@ -229,12 +371,42 @@ public: Label(AbstractMacroAssembler* masm) : m_label(masm->m_assembler.label()) { + masm->invalidateAllTempRegisters(); } + + bool isSet() const { return m_label.isSet(); } + private: + AssemblerLabel m_label; + }; + + // ConvertibleLoadLabel: + // + // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr + // so that: + // + // loadPtr(Address(a, i), b) + // + // becomes: + // + // addPtr(TrustedImmPtr(i), a, b) + class ConvertibleLoadLabel { + template + friend class AbstractMacroAssembler; + friend class LinkBuffer; - bool isUsed() const { return m_label.isUsed(); } - void used() { m_label.used(); } + public: + ConvertibleLoadLabel() + { + } + + ConvertibleLoadLabel(AbstractMacroAssembler* masm) + : m_label(masm->m_assembler.labelIgnoringWatchpoints()) + { + } + + bool isSet() const { return m_label.isSet(); } private: - JmpDst m_label; + AssemblerLabel m_label; }; // DataLabelPtr: @@ -254,14 +426,16 @@ public: : m_label(masm->m_assembler.label()) { } + + bool isSet() const { return m_label.isSet(); } private: - JmpDst m_label; + AssemblerLabel m_label; }; // DataLabel32: // - // A DataLabelPtr is used to refer to a location in the code containing a pointer to be + // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be // patched after the code has been generated. class DataLabel32 { template @@ -277,8 +451,39 @@ public: { } + AssemblerLabel label() const { return m_label; } + + private: + AssemblerLabel m_label; + }; + + // DataLabelCompact: + // + // A DataLabelCompact is used to refer to a location in the code containing a + // compact immediate to be patched after the code has been generated. + class DataLabelCompact { + template + friend class AbstractMacroAssembler; + friend class LinkBuffer; + public: + DataLabelCompact() + { + } + + DataLabelCompact(AbstractMacroAssembler* masm) + : m_label(masm->m_assembler.label()) + { + } + + DataLabelCompact(AssemblerLabel label) + : m_label(label) + { + } + + AssemblerLabel label() const { return m_label; } + private: - JmpDst m_label; + AssemblerLabel m_label; }; // Call: @@ -304,8 +509,8 @@ public: { } - Call(JmpSrc jmp, Flags flags) - : m_jmp(jmp) + Call(AssemblerLabel jmp, Flags flags) + : m_label(jmp) , m_flags(flags) { } @@ -317,10 +522,10 @@ public: static Call fromTailJump(Jump jump) { - return Call(jump.m_jmp, Linkable); + return Call(jump.m_label, Linkable); } - JmpSrc m_jmp; + AssemblerLabel m_label; private: Flags m_flags; }; @@ -335,29 +540,144 @@ public: template friend class AbstractMacroAssembler; friend class Call; + friend struct DFG::OSRExit; friend class LinkBuffer; public: Jump() { } - Jump(JmpSrc jmp) - : m_jmp(jmp) +#if CPU(ARM_THUMB2) + // Fixme: this information should be stored in the instruction stream, not in the Jump object. + Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid) + : m_label(jmp) + , m_type(type) + , m_condition(condition) { } +#elif CPU(ARM64) + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + { + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + , m_is64Bit(is64Bit) + , m_compareRegister(compareRegister) + { + ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize)); + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + , m_bitNumber(bitNumber) + , m_compareRegister(compareRegister) + { + ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize)); + } +#elif CPU(SH4) + Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar) + : m_label(jmp) + , m_type(type) + { + } +#else + Jump(AssemblerLabel jmp) + : m_label(jmp) + { + } +#endif - void link(AbstractMacroAssembler* masm) + Label label() const + { + Label result; + result.m_label = m_label; + return result; + } + + void link(AbstractMacroAssembler* masm) const { - masm->m_assembler.linkJump(m_jmp, masm->m_assembler.label()); + masm->invalidateAllTempRegisters(); + +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset()); +#endif + +#if CPU(ARM_THUMB2) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); +#elif CPU(ARM64) + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); +#elif CPU(SH4) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type); +#else + masm->m_assembler.linkJump(m_label, masm->m_assembler.label()); +#endif } - void linkTo(Label label, AbstractMacroAssembler* masm) + void linkTo(Label label, AbstractMacroAssembler* masm) const { - masm->m_assembler.linkJump(m_jmp, label.m_label); +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset); +#endif + +#if CPU(ARM_THUMB2) + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); +#elif CPU(ARM64) + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); +#else + masm->m_assembler.linkJump(m_label, label.m_label); +#endif } + bool isSet() const { return m_label.isSet(); } + private: - JmpSrc m_jmp; + AssemblerLabel m_label; +#if CPU(ARM_THUMB2) + ARMv7Assembler::JumpType m_type; + ARMv7Assembler::Condition m_condition; +#elif CPU(ARM64) + ARM64Assembler::JumpType m_type; + ARM64Assembler::Condition m_condition; + bool m_is64Bit; + unsigned m_bitNumber; + ARM64Assembler::RegisterID m_compareRegister; +#endif +#if CPU(SH4) + SH4Assembler::JumpType m_type; +#endif + }; + + struct PatchableJump { + PatchableJump() + { + } + + explicit PatchableJump(Jump jump) + : m_jump(jump) + { + } + + operator Jump&() { return m_jump; } + + Jump m_jump; }; // JumpList: @@ -368,7 +688,15 @@ public: friend class LinkBuffer; public: - typedef Vector JumpVector; + typedef Vector JumpVector; + + JumpList() { } + + JumpList(Jump jump) + { + if (jump.isSet()) + append(jump); + } void link(AbstractMacroAssembler* masm) { @@ -391,7 +719,7 @@ public: m_jumps.append(jump); } - void append(JumpList& other) + void append(const JumpList& other) { m_jumps.append(other.m_jumps.begin(), other.m_jumps.size()); } @@ -401,7 +729,12 @@ public: return !m_jumps.size(); } - const JumpVector& jumps() { return m_jumps; } + void clear() + { + m_jumps.clear(); + } + + const JumpVector& jumps() const { return m_jumps; } private: JumpVector m_jumps; @@ -409,97 +742,218 @@ public: // Section 3: Misc admin methods - - static CodePtr trampolineAt(CodeRef ref, Label label) +#if ENABLE(DFG_JIT) + Label labelIgnoringWatchpoints() { - return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label)); + Label result; + result.m_label = m_assembler.labelIgnoringWatchpoints(); + return result; } - - size_t size() +#else + Label labelIgnoringWatchpoints() { - return m_assembler.size(); + return label(); } - +#endif + Label label() { return Label(this); } + void padBeforePatch() + { + // Rely on the fact that asking for a label already does the padding. + (void)label(); + } + + Label watchpointLabel() + { + Label result; + result.m_label = m_assembler.labelForWatchpoint(); + return result; + } + Label align() { m_assembler.align(16); return Label(this); } - ptrdiff_t differenceBetween(Label from, Jump to) +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + class RegisterAllocationOffset { + public: + RegisterAllocationOffset(unsigned offset) + : m_offset(offset) + { + } + + void checkOffsets(unsigned low, unsigned high) + { + RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high); + } + + private: + unsigned m_offset; + }; + + void addRegisterAllocationAtOffset(unsigned offset) { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset)); } - ptrdiff_t differenceBetween(Label from, Call to) + void clearRegisterAllocationOffsets() { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + m_registerAllocationForOffsets.clear(); } - ptrdiff_t differenceBetween(Label from, Label to) + void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2) { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + if (offset1 > offset2) + std::swap(offset1, offset2); + + size_t size = m_registerAllocationForOffsets.size(); + for (size_t i = 0; i < size; ++i) + m_registerAllocationForOffsets[i].checkOffsets(offset1, offset2); } +#endif - ptrdiff_t differenceBetween(Label from, DataLabelPtr to) + template + static ptrdiff_t differenceBetween(T from, U to) { return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); } - ptrdiff_t differenceBetween(Label from, DataLabel32 to) + static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b) { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + return reinterpret_cast(b.executableAddress()) - reinterpret_cast(a.executableAddress()); } - ptrdiff_t differenceBetween(DataLabelPtr from, Jump to) + unsigned debugOffset() { return m_assembler.debugOffset(); } + + ALWAYS_INLINE static void cacheFlush(void* code, size_t size) { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + AssemblerType::cacheFlush(code, size); } - ptrdiff_t differenceBetween(DataLabelPtr from, DataLabelPtr to) + AssemblerType m_assembler; + +protected: + AbstractMacroAssembler() + : m_randomSource(cryptographicallyRandomNumber()) { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + invalidateAllTempRegisters(); } - ptrdiff_t differenceBetween(DataLabelPtr from, Call to) + uint32_t random() { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + return m_randomSource.getUint32(); } -protected: - AssemblerType m_assembler; + WeakRandom m_randomSource; + +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + Vector m_registerAllocationForOffsets; +#endif + + static bool haveScratchRegisterForBlinding() + { + return false; + } + static RegisterID scratchRegisterForBlinding() + { + UNREACHABLE_FOR_PLATFORM(); + return firstRegister(); + } + static bool canBlind() { return false; } + static bool shouldBlindForSpecificArch(uint32_t) { return false; } + static bool shouldBlindForSpecificArch(uint64_t) { return false; } + + class CachedTempRegister { + friend class DataLabelPtr; + friend class DataLabel32; + friend class DataLabelCompact; + friend class Jump; + friend class Label; + + public: + CachedTempRegister(AbstractMacroAssembler* masm, RegisterID registerID) + : m_masm(masm) + , m_registerID(registerID) + , m_value(0) + , m_validBit(1 << static_cast(registerID)) + { + ASSERT(static_cast(registerID) < (sizeof(unsigned) * 8)); + } + + ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; } + + ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; } + + bool value(intptr_t& value) + { + value = m_value; + return m_masm->isTempRegisterValid(m_validBit); + } + + void setValue(intptr_t value) + { + m_value = value; + m_masm->setTempRegisterValid(m_validBit); + } + + ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); } + + private: + AbstractMacroAssembler* m_masm; + RegisterID m_registerID; + intptr_t m_value; + unsigned m_validBit; + }; + + ALWAYS_INLINE void invalidateAllTempRegisters() + { + m_tempRegistersValidBits = 0; + } + + ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask) + { + return (m_tempRegistersValidBits & registerMask); + } + + ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask) + { + m_tempRegistersValidBits &= ~registerMask; + } + + ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask) + { + m_tempRegistersValidBits |= registerMask; + } + + unsigned m_tempRegistersValidBits; friend class LinkBuffer; friend class RepatchBuffer; static void linkJump(void* code, Jump jump, CodeLocationLabel target) { - AssemblerType::linkJump(code, jump.m_jmp, target.dataLocation()); + AssemblerType::linkJump(code, jump.m_label, target.dataLocation()); } - static void linkPointer(void* code, typename AssemblerType::JmpDst label, void* value) + static void linkPointer(void* code, AssemblerLabel label, void* value) { AssemblerType::linkPointer(code, label, value); } - static void* getLinkerAddress(void* code, typename AssemblerType::JmpSrc label) - { - return AssemblerType::getRelocatedAddress(code, label); - } - - static void* getLinkerAddress(void* code, typename AssemblerType::JmpDst label) + static void* getLinkerAddress(void* code, AssemblerLabel label) { return AssemblerType::getRelocatedAddress(code, label); } static unsigned getLinkerCallReturnOffset(Call call) { - return AssemblerType::getCallReturnOffset(call.m_jmp); + return AssemblerType::getCallReturnOffset(call.m_label); } static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination) @@ -512,6 +966,11 @@ protected: AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress()); } + static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value) + { + AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value); + } + static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value) { AssemblerType::repatchInt32(dataLabel32.dataLocation(), value); @@ -521,10 +980,20 @@ protected: { AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value); } - - static void repatchLoadPtrToLEA(CodeLocationInstruction instruction) + + static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr) + { + return AssemblerType::readPointer(dataLabelPtr.dataLocation()); + } + + static void replaceWithLoad(CodeLocationConvertibleLoad label) + { + AssemblerType::replaceWithLoad(label.dataLocation()); + } + + static void replaceWithAddressComputation(CodeLocationConvertibleLoad label) { - AssemblerType::repatchLoadPtrToLEA(instruction.dataLocation()); + AssemblerType::replaceWithAddressComputation(label.dataLocation()); } };