/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#ifndef AbstractMacroAssembler_h
#define AbstractMacroAssembler_h
-#include <wtf/Platform.h>
-
-#include <MacroAssemblerCodeRef.h>
-#include <CodeLocation.h>
+#include "AssemblerBuffer.h"
+#include "CodeLocation.h"
+#include "MacroAssemblerCodeRef.h"
+#include <wtf/CryptographicallyRandomNumber.h>
#include <wtf/Noncopyable.h>
-#include <wtf/UnusedParam.h>
#if ENABLE(ASSEMBLER)
+
+#if PLATFORM(QT)
+#define ENABLE_JIT_CONSTANT_BLINDING 0
+#endif
+
+#ifndef ENABLE_JIT_CONSTANT_BLINDING
+#define ENABLE_JIT_CONSTANT_BLINDING 1
+#endif
+
namespace JSC {
+inline bool isARMv7s()
+{
+#if CPU(APPLE_ARMV7S)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline bool isARM64()
+{
+#if CPU(ARM64)
+ return true;
+#else
+ return false;
+#endif
+}
+
+inline bool isX86()
+{
+#if CPU(X86_64) || CPU(X86)
+ return true;
+#else
+ return false;
+#endif
+}
+
+class JumpReplacementWatchpoint;
class LinkBuffer;
class RepatchBuffer;
+class Watchpoint;
+namespace DFG {
+struct OSRExit;
+}
template <class AssemblerType>
class AbstractMacroAssembler {
public:
+ friend class JITWriteBarrierBase;
typedef AssemblerType AssemblerType_T;
typedef MacroAssemblerCodePtr CodePtr;
class Jump;
typedef typename AssemblerType::RegisterID RegisterID;
- typedef typename AssemblerType::FPRegisterID FPRegisterID;
- typedef typename AssemblerType::JmpSrc JmpSrc;
- typedef typename AssemblerType::JmpDst JmpDst;
-
// Section 1: MacroAssembler operand types
//
// The following types are used as operands to MacroAssembler operations,
// describing immediate and memory operands to the instructions to be planted.
-
enum Scale {
TimesOne,
TimesTwo,
int32_t offset;
};
+ struct ExtendedAddress {
+ explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
+ : base(base)
+ , offset(offset)
+ {
+ }
+
+ RegisterID base;
+ intptr_t offset;
+ };
+
// ImplicitAddress:
//
// This class is used for explicit 'load' and 'store' operations
// Describes an memory operand given by a pointer. For regular load & store
// operations an unwrapped void* will be used, rather than using this.
struct AbsoluteAddress {
- explicit AbsoluteAddress(void* ptr)
+ explicit AbsoluteAddress(const void* ptr)
: m_ptr(ptr)
{
}
- void* m_ptr;
+ const void* m_ptr;
};
- // ImmPtr:
+ // TrustedImmPtr:
//
// A pointer sized immediate operand to an instruction - this is wrapped
// in a class requiring explicit construction in order to differentiate
// from pointers used as absolute addresses to memory operations
- struct ImmPtr {
- explicit ImmPtr(void* value)
+ struct TrustedImmPtr {
+ TrustedImmPtr() { }
+
+ explicit TrustedImmPtr(const void* value)
: m_value(value)
{
}
+
+ // This is only here so that TrustedImmPtr(0) does not confuse the C++
+ // overload handling rules.
+ explicit TrustedImmPtr(int value)
+ : m_value(0)
+ {
+ ASSERT_UNUSED(value, !value);
+ }
+
+ explicit TrustedImmPtr(size_t value)
+ : m_value(reinterpret_cast<void*>(value))
+ {
+ }
intptr_t asIntptr()
{
return reinterpret_cast<intptr_t>(m_value);
}
- void* m_value;
+ const void* m_value;
+ };
+
+ struct ImmPtr :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImmPtr
+#else
+ public TrustedImmPtr
+#endif
+ {
+ explicit ImmPtr(const void* value)
+ : TrustedImmPtr(value)
+ {
+ }
+
+ TrustedImmPtr asTrustedImmPtr() { return *this; }
};
- // Imm32:
+ // TrustedImm32:
//
// A 32bit immediate operand to an instruction - this is wrapped in a
// class requiring explicit construction in order to prevent RegisterIDs
// (which are implemented as an enum) from accidentally being passed as
// immediate values.
- struct Imm32 {
- explicit Imm32(int32_t value)
+ struct TrustedImm32 {
+ TrustedImm32() { }
+
+ explicit TrustedImm32(int32_t value)
: m_value(value)
-#if PLATFORM_ARM_ARCH(7)
- , m_isPointer(false)
-#endif
{
}
-#if !PLATFORM(X86_64)
- explicit Imm32(ImmPtr ptr)
+#if !CPU(X86_64)
+ explicit TrustedImm32(TrustedImmPtr ptr)
: m_value(ptr.asIntptr())
-#if PLATFORM_ARM_ARCH(7)
- , m_isPointer(true)
-#endif
{
}
#endif
int32_t m_value;
-#if PLATFORM_ARM_ARCH(7)
- // We rely on being able to regenerate code to recover exception handling
- // information. Since ARMv7 supports 16-bit immediates there is a danger
- // that if pointer values change the layout of the generated code will change.
- // To avoid this problem, always generate pointers (and thus Imm32s constructed
- // from ImmPtrs) with a code sequence that is able to represent any pointer
- // value - don't use a more compact form in these cases.
- bool m_isPointer;
+ };
+
+
+ struct Imm32 :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImm32
+#else
+ public TrustedImm32
+#endif
+ {
+ explicit Imm32(int32_t value)
+ : TrustedImm32(value)
+ {
+ }
+#if !CPU(X86_64)
+ explicit Imm32(TrustedImmPtr ptr)
+ : TrustedImm32(ptr)
+ {
+ }
#endif
+ const TrustedImm32& asTrustedImm32() const { return *this; }
+
};
+
+ // TrustedImm64:
+ //
+ // A 64bit immediate operand to an instruction - this is wrapped in a
+ // class requiring explicit construction in order to prevent RegisterIDs
+ // (which are implemented as an enum) from accidentally being passed as
+ // immediate values.
+ struct TrustedImm64 {
+ TrustedImm64() { }
+
+ explicit TrustedImm64(int64_t value)
+ : m_value(value)
+ {
+ }
+#if CPU(X86_64) || CPU(ARM64)
+ explicit TrustedImm64(TrustedImmPtr ptr)
+ : m_value(ptr.asIntptr())
+ {
+ }
+#endif
+
+ int64_t m_value;
+ };
+ struct Imm64 :
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ private TrustedImm64
+#else
+ public TrustedImm64
+#endif
+ {
+ explicit Imm64(int64_t value)
+ : TrustedImm64(value)
+ {
+ }
+#if CPU(X86_64) || CPU(ARM64)
+ explicit Imm64(TrustedImmPtr ptr)
+ : TrustedImm64(ptr)
+ {
+ }
+#endif
+ const TrustedImm64& asTrustedImm64() const { return *this; }
+ };
+
// Section 2: MacroAssembler code buffer handles
//
// The following types are used to reference items in the code buffer
class Label {
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
+ friend struct DFG::OSRExit;
friend class Jump;
+ friend class JumpReplacementWatchpoint;
friend class MacroAssemblerCodeRef;
friend class LinkBuffer;
+ friend class Watchpoint;
public:
Label()
Label(AbstractMacroAssembler<AssemblerType>* masm)
: m_label(masm->m_assembler.label())
+ {
+ masm->invalidateAllTempRegisters();
+ }
+
+ bool isSet() const { return m_label.isSet(); }
+ private:
+ AssemblerLabel m_label;
+ };
+
+ // ConvertibleLoadLabel:
+ //
+ // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
+ // so that:
+ //
+ // loadPtr(Address(a, i), b)
+ //
+ // becomes:
+ //
+ // addPtr(TrustedImmPtr(i), a, b)
+ class ConvertibleLoadLabel {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+
+ public:
+ ConvertibleLoadLabel()
{
}
- bool isUsed() const { return m_label.isUsed(); }
- void used() { m_label.used(); }
+ ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.labelIgnoringWatchpoints())
+ {
+ }
+
+ bool isSet() const { return m_label.isSet(); }
private:
- JmpDst m_label;
+ AssemblerLabel m_label;
};
// DataLabelPtr:
: m_label(masm->m_assembler.label())
{
}
+
+ bool isSet() const { return m_label.isSet(); }
private:
- JmpDst m_label;
+ AssemblerLabel m_label;
};
// DataLabel32:
{
}
+ AssemblerLabel label() const { return m_label; }
+
private:
- JmpDst m_label;
+ AssemblerLabel m_label;
+ };
+
+ // DataLabelCompact:
+ //
+ // A DataLabelCompact is used to refer to a location in the code containing a
+ // compact immediate to be patched after the code has been generated.
+ class DataLabelCompact {
+ template<class TemplateAssemblerType>
+ friend class AbstractMacroAssembler;
+ friend class LinkBuffer;
+ public:
+ DataLabelCompact()
+ {
+ }
+
+ DataLabelCompact(AbstractMacroAssembler<AssemblerType>* masm)
+ : m_label(masm->m_assembler.label())
+ {
+ }
+
+ DataLabelCompact(AssemblerLabel label)
+ : m_label(label)
+ {
+ }
+
+ private:
+ AssemblerLabel m_label;
};
// Call:
{
}
- Call(JmpSrc jmp, Flags flags)
- : m_jmp(jmp)
+ Call(AssemblerLabel jmp, Flags flags)
+ : m_label(jmp)
, m_flags(flags)
{
}
static Call fromTailJump(Jump jump)
{
- return Call(jump.m_jmp, Linkable);
+ return Call(jump.m_label, Linkable);
}
- JmpSrc m_jmp;
-
+ AssemblerLabel m_label;
private:
Flags m_flags;
};
template<class TemplateAssemblerType>
friend class AbstractMacroAssembler;
friend class Call;
+ friend struct DFG::OSRExit;
friend class LinkBuffer;
public:
Jump()
{
}
- Jump(JmpSrc jmp)
- : m_jmp(jmp)
+#if CPU(ARM_THUMB2)
+ // Fixme: this information should be stored in the instruction stream, not in the Jump object.
+ Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ {
+ }
+#elif CPU(ARM64)
+ Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ {
+ }
+
+ Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ , m_is64Bit(is64Bit)
+ , m_compareRegister(compareRegister)
+ {
+ ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize));
+ }
+
+ Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister)
+ : m_label(jmp)
+ , m_type(type)
+ , m_condition(condition)
+ , m_bitNumber(bitNumber)
+ , m_compareRegister(compareRegister)
+ {
+ ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
+ }
+#elif CPU(SH4)
+ Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar)
+ : m_label(jmp)
+ , m_type(type)
+ {
+ }
+#else
+ Jump(AssemblerLabel jmp)
+ : m_label(jmp)
{
}
+#endif
- void link(AbstractMacroAssembler<AssemblerType>* masm)
+ Label label() const
{
- masm->m_assembler.linkJump(m_jmp, masm->m_assembler.label());
+ Label result;
+ result.m_label = m_label;
+ return result;
+ }
+
+ void link(AbstractMacroAssembler<AssemblerType>* masm) const
+ {
+ masm->invalidateAllTempRegisters();
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
+#endif
+
+#if CPU(ARM_THUMB2)
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+#elif CPU(ARM64)
+ if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister);
+ else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
+ else
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
+#elif CPU(SH4)
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type);
+#else
+ masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
+#endif
}
- void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
+ void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const
{
- masm->m_assembler.linkJump(m_jmp, label.m_label);
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
+#endif
+
+#if CPU(ARM_THUMB2)
+ masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
+#elif CPU(ARM64)
+ if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
+ masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister);
+ else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
+ masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister);
+ else
+ masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
+#else
+ masm->m_assembler.linkJump(m_label, label.m_label);
+#endif
}
+ bool isSet() const { return m_label.isSet(); }
+
private:
- JmpSrc m_jmp;
+ AssemblerLabel m_label;
+#if CPU(ARM_THUMB2)
+ ARMv7Assembler::JumpType m_type;
+ ARMv7Assembler::Condition m_condition;
+#elif CPU(ARM64)
+ ARM64Assembler::JumpType m_type;
+ ARM64Assembler::Condition m_condition;
+ bool m_is64Bit;
+ unsigned m_bitNumber;
+ ARM64Assembler::RegisterID m_compareRegister;
+#endif
+#if CPU(SH4)
+ SH4Assembler::JumpType m_type;
+#endif
+ };
+
+ struct PatchableJump {
+ PatchableJump()
+ {
+ }
+
+ explicit PatchableJump(Jump jump)
+ : m_jump(jump)
+ {
+ }
+
+ operator Jump&() { return m_jump; }
+
+ Jump m_jump;
};
// JumpList:
friend class LinkBuffer;
public:
- typedef Vector<Jump, 16> JumpVector;
+ typedef Vector<Jump, 2> JumpVector;
+
+ JumpList() { }
+
+ JumpList(Jump jump)
+ {
+ append(jump);
+ }
void link(AbstractMacroAssembler<AssemblerType>* masm)
{
m_jumps.append(jump);
}
- void append(JumpList& other)
+ void append(const JumpList& other)
{
m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
}
return !m_jumps.size();
}
- const JumpVector& jumps() { return m_jumps; }
+ void clear()
+ {
+ m_jumps.clear();
+ }
+
+ const JumpVector& jumps() const { return m_jumps; }
private:
JumpVector m_jumps;
// Section 3: Misc admin methods
-
- static CodePtr trampolineAt(CodeRef ref, Label label)
+#if ENABLE(DFG_JIT)
+ Label labelIgnoringWatchpoints()
{
- return CodePtr(AssemblerType::getRelocatedAddress(ref.m_code.dataLocation(), label.m_label));
+ Label result;
+ result.m_label = m_assembler.labelIgnoringWatchpoints();
+ return result;
}
-
- size_t size()
+#else
+ Label labelIgnoringWatchpoints()
{
- return m_assembler.size();
+ return label();
}
-
+#endif
+
Label label()
{
return Label(this);
}
+ void padBeforePatch()
+ {
+ // Rely on the fact that asking for a label already does the padding.
+ (void)label();
+ }
+
+ Label watchpointLabel()
+ {
+ Label result;
+ result.m_label = m_assembler.labelForWatchpoint();
+ return result;
+ }
+
Label align()
{
m_assembler.align(16);
return Label(this);
}
- ptrdiff_t differenceBetween(Label from, Jump to)
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ class RegisterAllocationOffset {
+ public:
+ RegisterAllocationOffset(unsigned offset)
+ : m_offset(offset)
+ {
+ }
+
+ void check(unsigned low, unsigned high)
+ {
+ RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
+ }
+
+ private:
+ unsigned m_offset;
+ };
+
+ void addRegisterAllocationAtOffset(unsigned offset)
{
- return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+ m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
}
- ptrdiff_t differenceBetween(Label from, Call to)
+ void clearRegisterAllocationOffsets()
{
- return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+ m_registerAllocationForOffsets.clear();
}
- ptrdiff_t differenceBetween(Label from, Label to)
+ void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
{
- return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ if (offset1 > offset2)
+ std::swap(offset1, offset2);
+
+ size_t size = m_registerAllocationForOffsets.size();
+ for (size_t i = 0; i < size; ++i)
+ m_registerAllocationForOffsets[i].check(offset1, offset2);
}
+#endif
- ptrdiff_t differenceBetween(Label from, DataLabelPtr to)
+ template<typename T, typename U>
+ static ptrdiff_t differenceBetween(T from, U to)
{
return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
}
- ptrdiff_t differenceBetween(Label from, DataLabel32 to)
+ static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
{
- return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
}
- ptrdiff_t differenceBetween(DataLabelPtr from, Jump to)
+ unsigned debugOffset() { return m_assembler.debugOffset(); }
+
+ ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
+ {
+ AssemblerType::cacheFlush(code, size);
+ }
+protected:
+ AbstractMacroAssembler()
+ : m_randomSource(cryptographicallyRandomNumber())
{
- return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
}
- ptrdiff_t differenceBetween(DataLabelPtr from, DataLabelPtr to)
+ AssemblerType m_assembler;
+
+ uint32_t random()
{
- return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
+ return m_randomSource.getUint32();
}
- ptrdiff_t differenceBetween(DataLabelPtr from, Call to)
+ WeakRandom m_randomSource;
+
+#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
+ Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
+#endif
+
+#if ENABLE(JIT_CONSTANT_BLINDING)
+ static bool scratchRegisterForBlinding() { return false; }
+ static bool shouldBlindForSpecificArch(uint32_t) { return true; }
+ static bool shouldBlindForSpecificArch(uint64_t) { return true; }
+#endif
+
+ class CachedTempRegister {
+ friend class DataLabelPtr;
+ friend class DataLabel32;
+ friend class DataLabelCompact;
+ friend class Jump;
+ friend class Label;
+
+ public:
+ CachedTempRegister(AbstractMacroAssembler<AssemblerType>* masm, RegisterID registerID)
+ : m_masm(masm)
+ , m_registerID(registerID)
+ , m_value(0)
+ , m_validBit(1 << static_cast<unsigned>(registerID))
+ {
+ ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8));
+ }
+
+ ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; }
+
+ ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; }
+
+ bool value(intptr_t& value)
+ {
+ value = m_value;
+ return m_masm->isTempRegisterValid(m_validBit);
+ }
+
+ void setValue(intptr_t value)
+ {
+ m_value = value;
+ m_masm->setTempRegisterValid(m_validBit);
+ }
+
+ ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
+
+ private:
+ AbstractMacroAssembler<AssemblerType>* m_masm;
+ RegisterID m_registerID;
+ intptr_t m_value;
+ unsigned m_validBit;
+ };
+
+ ALWAYS_INLINE void invalidateAllTempRegisters()
{
- return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp);
+ m_tempRegistersValidBits = 0;
}
-protected:
- AssemblerType m_assembler;
+ ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask)
+ {
+ return (m_tempRegistersValidBits & registerMask);
+ }
+
+ ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask)
+ {
+ m_tempRegistersValidBits &= ~registerMask;
+ }
+
+ ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask)
+ {
+ m_tempRegistersValidBits |= registerMask;
+ }
+
+ unsigned m_tempRegistersValidBits;
friend class LinkBuffer;
friend class RepatchBuffer;
static void linkJump(void* code, Jump jump, CodeLocationLabel target)
{
- AssemblerType::linkJump(code, jump.m_jmp, target.dataLocation());
+ AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
}
- static void linkPointer(void* code, typename AssemblerType::JmpDst label, void* value)
+ static void linkPointer(void* code, AssemblerLabel label, void* value)
{
AssemblerType::linkPointer(code, label, value);
}
- static void* getLinkerAddress(void* code, typename AssemblerType::JmpSrc label)
- {
- return AssemblerType::getRelocatedAddress(code, label);
- }
-
- static void* getLinkerAddress(void* code, typename AssemblerType::JmpDst label)
+ static void* getLinkerAddress(void* code, AssemblerLabel label)
{
return AssemblerType::getRelocatedAddress(code, label);
}
static unsigned getLinkerCallReturnOffset(Call call)
{
- return AssemblerType::getCallReturnOffset(call.m_jmp);
+ return AssemblerType::getCallReturnOffset(call.m_label);
}
static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
}
+ static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
+ {
+ AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
+ }
+
static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
{
AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
{
AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
}
-
- static void repatchLoadPtrToLEA(CodeLocationInstruction instruction)
+
+ static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
+ {
+ return AssemblerType::readPointer(dataLabelPtr.dataLocation());
+ }
+
+ static void replaceWithLoad(CodeLocationConvertibleLoad label)
+ {
+ AssemblerType::replaceWithLoad(label.dataLocation());
+ }
+
+ static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
{
- AssemblerType::repatchLoadPtrToLEA(instruction.dataLocation());
+ AssemblerType::replaceWithAddressComputation(label.dataLocation());
}
};