X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/14957cd040308e3eeec43d26bae5d76da13fcd85..8b637bb680022adfddad653280734877951535a9:/assembler/AbstractMacroAssembler.h?ds=inline diff --git a/assembler/AbstractMacroAssembler.h b/assembler/AbstractMacroAssembler.h index 7d9d092..71b9d1f 100644 --- a/assembler/AbstractMacroAssembler.h +++ b/assembler/AbstractMacroAssembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,17 +26,59 @@ #ifndef AbstractMacroAssembler_h #define AbstractMacroAssembler_h +#include "AssemblerBuffer.h" #include "CodeLocation.h" #include "MacroAssemblerCodeRef.h" +#include #include -#include #if ENABLE(ASSEMBLER) + +#if PLATFORM(QT) +#define ENABLE_JIT_CONSTANT_BLINDING 0 +#endif + +#ifndef ENABLE_JIT_CONSTANT_BLINDING +#define ENABLE_JIT_CONSTANT_BLINDING 1 +#endif + namespace JSC { +inline bool isARMv7s() +{ +#if CPU(APPLE_ARMV7S) + return true; +#else + return false; +#endif +} + +inline bool isARM64() +{ +#if CPU(ARM64) + return true; +#else + return false; +#endif +} + +inline bool isX86() +{ +#if CPU(X86_64) || CPU(X86) + return true; +#else + return false; +#endif +} + +class JumpReplacementWatchpoint; class LinkBuffer; class RepatchBuffer; +class Watchpoint; +namespace DFG { +struct OSRExit; +} template class AbstractMacroAssembler { @@ -56,7 +98,6 @@ public: // The following types are used as operands to MacroAssembler operations, // describing immediate and memory operands to the instructions to be planted. - enum Scale { TimesOne, TimesTwo, @@ -157,10 +198,25 @@ public: // in a class requiring explicit construction in order to differentiate // from pointers used as absolute addresses to memory operations struct TrustedImmPtr { + TrustedImmPtr() { } + explicit TrustedImmPtr(const void* value) : m_value(value) { } + + // This is only here so that TrustedImmPtr(0) does not confuse the C++ + // overload handling rules. + explicit TrustedImmPtr(int value) + : m_value(0) + { + ASSERT_UNUSED(value, !value); + } + + explicit TrustedImmPtr(size_t value) + : m_value(reinterpret_cast(value)) + { + } intptr_t asIntptr() { @@ -170,11 +226,19 @@ public: const void* m_value; }; - struct ImmPtr : public TrustedImmPtr { + struct ImmPtr : +#if ENABLE(JIT_CONSTANT_BLINDING) + private TrustedImmPtr +#else + public TrustedImmPtr +#endif + { explicit ImmPtr(const void* value) : TrustedImmPtr(value) { } + + TrustedImmPtr asTrustedImmPtr() { return *this; } }; // TrustedImm32: @@ -184,39 +248,31 @@ public: // (which are implemented as an enum) from accidentally being passed as // immediate values. struct TrustedImm32 { + TrustedImm32() { } + explicit TrustedImm32(int32_t value) : m_value(value) -#if CPU(ARM) || CPU(MIPS) - , m_isPointer(false) -#endif { } #if !CPU(X86_64) explicit TrustedImm32(TrustedImmPtr ptr) : m_value(ptr.asIntptr()) -#if CPU(ARM) || CPU(MIPS) - , m_isPointer(true) -#endif { } #endif int32_t m_value; -#if CPU(ARM) || CPU(MIPS) - // We rely on being able to regenerate code to recover exception handling - // information. Since ARMv7 supports 16-bit immediates there is a danger - // that if pointer values change the layout of the generated code will change. - // To avoid this problem, always generate pointers (and thus Imm32s constructed - // from ImmPtrs) with a code sequence that is able to represent any pointer - // value - don't use a more compact form in these cases. - // Same for MIPS. - bool m_isPointer; -#endif }; - struct Imm32 : public TrustedImm32 { + struct Imm32 : +#if ENABLE(JIT_CONSTANT_BLINDING) + private TrustedImm32 +#else + public TrustedImm32 +#endif + { explicit Imm32(int32_t value) : TrustedImm32(value) { @@ -227,6 +283,52 @@ public: { } #endif + const TrustedImm32& asTrustedImm32() const { return *this; } + + }; + + // TrustedImm64: + // + // A 64bit immediate operand to an instruction - this is wrapped in a + // class requiring explicit construction in order to prevent RegisterIDs + // (which are implemented as an enum) from accidentally being passed as + // immediate values. + struct TrustedImm64 { + TrustedImm64() { } + + explicit TrustedImm64(int64_t value) + : m_value(value) + { + } + +#if CPU(X86_64) || CPU(ARM64) + explicit TrustedImm64(TrustedImmPtr ptr) + : m_value(ptr.asIntptr()) + { + } +#endif + + int64_t m_value; + }; + + struct Imm64 : +#if ENABLE(JIT_CONSTANT_BLINDING) + private TrustedImm64 +#else + public TrustedImm64 +#endif + { + explicit Imm64(int64_t value) + : TrustedImm64(value) + { + } +#if CPU(X86_64) || CPU(ARM64) + explicit Imm64(TrustedImmPtr ptr) + : TrustedImm64(ptr) + { + } +#endif + const TrustedImm64& asTrustedImm64() const { return *this; } }; // Section 2: MacroAssembler code buffer handles @@ -244,9 +346,12 @@ public: class Label { template friend class AbstractMacroAssembler; + friend struct DFG::OSRExit; friend class Jump; + friend class JumpReplacementWatchpoint; friend class MacroAssemblerCodeRef; friend class LinkBuffer; + friend class Watchpoint; public: Label() @@ -255,6 +360,37 @@ public: Label(AbstractMacroAssembler* masm) : m_label(masm->m_assembler.label()) + { + masm->invalidateAllTempRegisters(); + } + + bool isSet() const { return m_label.isSet(); } + private: + AssemblerLabel m_label; + }; + + // ConvertibleLoadLabel: + // + // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr + // so that: + // + // loadPtr(Address(a, i), b) + // + // becomes: + // + // addPtr(TrustedImmPtr(i), a, b) + class ConvertibleLoadLabel { + template + friend class AbstractMacroAssembler; + friend class LinkBuffer; + + public: + ConvertibleLoadLabel() + { + } + + ConvertibleLoadLabel(AbstractMacroAssembler* masm) + : m_label(masm->m_assembler.labelIgnoringWatchpoints()) { } @@ -280,7 +416,7 @@ public: : m_label(masm->m_assembler.label()) { } - + bool isSet() const { return m_label.isSet(); } private: @@ -328,7 +464,7 @@ public: : m_label(masm->m_assembler.label()) { } - + DataLabelCompact(AssemblerLabel label) : m_label(label) { @@ -362,7 +498,7 @@ public: } Call(AssemblerLabel jmp, Flags flags) - : m_jmp(jmp) + : m_label(jmp) , m_flags(flags) { } @@ -374,10 +510,10 @@ public: static Call fromTailJump(Jump jump) { - return Call(jump.m_jmp, Linkable); + return Call(jump.m_label, Linkable); } - AssemblerLabel m_jmp; + AssemblerLabel m_label; private: Flags m_flags; }; @@ -392,6 +528,7 @@ public: template friend class AbstractMacroAssembler; friend class Call; + friend struct DFG::OSRExit; friend class LinkBuffer; public: Jump() @@ -400,45 +537,135 @@ public: #if CPU(ARM_THUMB2) // Fixme: this information should be stored in the instruction stream, not in the Jump object. - Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid) - : m_jmp(jmp) + Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + { + } +#elif CPU(ARM64) + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid) + : m_label(jmp) , m_type(type) , m_condition(condition) { } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + , m_is64Bit(is64Bit) + , m_compareRegister(compareRegister) + { + ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize)); + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + , m_bitNumber(bitNumber) + , m_compareRegister(compareRegister) + { + ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize)); + } +#elif CPU(SH4) + Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar) + : m_label(jmp) + , m_type(type) + { + } #else Jump(AssemblerLabel jmp) - : m_jmp(jmp) + : m_label(jmp) { } #endif + + Label label() const + { + Label result; + result.m_label = m_label; + return result; + } void link(AbstractMacroAssembler* masm) const { + masm->invalidateAllTempRegisters(); + +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset()); +#endif + #if CPU(ARM_THUMB2) - masm->m_assembler.linkJump(m_jmp, masm->m_assembler.label(), m_type, m_condition); + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); +#elif CPU(ARM64) + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); +#elif CPU(SH4) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type); #else - masm->m_assembler.linkJump(m_jmp, masm->m_assembler.label()); + masm->m_assembler.linkJump(m_label, masm->m_assembler.label()); #endif } void linkTo(Label label, AbstractMacroAssembler* masm) const { +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset); +#endif + #if CPU(ARM_THUMB2) - masm->m_assembler.linkJump(m_jmp, label.m_label, m_type, m_condition); + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); +#elif CPU(ARM64) + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); #else - masm->m_assembler.linkJump(m_jmp, label.m_label); + masm->m_assembler.linkJump(m_label, label.m_label); #endif } - bool isSet() const { return m_jmp.isSet(); } + bool isSet() const { return m_label.isSet(); } private: - AssemblerLabel m_jmp; + AssemblerLabel m_label; #if CPU(ARM_THUMB2) ARMv7Assembler::JumpType m_type; ARMv7Assembler::Condition m_condition; +#elif CPU(ARM64) + ARM64Assembler::JumpType m_type; + ARM64Assembler::Condition m_condition; + bool m_is64Bit; + unsigned m_bitNumber; + ARM64Assembler::RegisterID m_compareRegister; #endif +#if CPU(SH4) + SH4Assembler::JumpType m_type; +#endif + }; + + struct PatchableJump { + PatchableJump() + { + } + + explicit PatchableJump(Jump jump) + : m_jump(jump) + { + } + + operator Jump&() { return m_jump; } + + Jump m_jump; }; // JumpList: @@ -449,7 +676,14 @@ public: friend class LinkBuffer; public: - typedef Vector JumpVector; + typedef Vector JumpVector; + + JumpList() { } + + JumpList(Jump jump) + { + append(jump); + } void link(AbstractMacroAssembler* masm) { @@ -472,7 +706,7 @@ public: m_jumps.append(jump); } - void append(JumpList& other) + void append(const JumpList& other) { m_jumps.append(other.m_jumps.begin(), other.m_jumps.size()); } @@ -487,7 +721,7 @@ public: m_jumps.clear(); } - const JumpVector& jumps() { return m_jumps; } + const JumpVector& jumps() const { return m_jumps; } private: JumpVector m_jumps; @@ -495,83 +729,194 @@ public: // Section 3: Misc admin methods +#if ENABLE(DFG_JIT) + Label labelIgnoringWatchpoints() + { + Label result; + result.m_label = m_assembler.labelIgnoringWatchpoints(); + return result; + } +#else + Label labelIgnoringWatchpoints() + { + return label(); + } +#endif + Label label() { return Label(this); } + void padBeforePatch() + { + // Rely on the fact that asking for a label already does the padding. + (void)label(); + } + + Label watchpointLabel() + { + Label result; + result.m_label = m_assembler.labelForWatchpoint(); + return result; + } + Label align() { m_assembler.align(16); return Label(this); } - ptrdiff_t differenceBetween(Label from, Jump to) +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + class RegisterAllocationOffset { + public: + RegisterAllocationOffset(unsigned offset) + : m_offset(offset) + { + } + + void check(unsigned low, unsigned high) + { + RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high); + } + + private: + unsigned m_offset; + }; + + void addRegisterAllocationAtOffset(unsigned offset) { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset)); } - ptrdiff_t differenceBetween(Label from, Call to) + void clearRegisterAllocationOffsets() { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + m_registerAllocationForOffsets.clear(); } - ptrdiff_t differenceBetween(Label from, Label to) + void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2) { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + if (offset1 > offset2) + std::swap(offset1, offset2); + + size_t size = m_registerAllocationForOffsets.size(); + for (size_t i = 0; i < size; ++i) + m_registerAllocationForOffsets[i].check(offset1, offset2); } +#endif - ptrdiff_t differenceBetween(Label from, DataLabelPtr to) + template + static ptrdiff_t differenceBetween(T from, U to) { return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); } - ptrdiff_t differenceBetween(Label from, DataLabel32 to) + static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b) { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + return reinterpret_cast(b.executableAddress()) - reinterpret_cast(a.executableAddress()); } - - ptrdiff_t differenceBetween(Label from, DataLabelCompact to) + + unsigned debugOffset() { return m_assembler.debugOffset(); } + + ALWAYS_INLINE static void cacheFlush(void* code, size_t size) { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + AssemblerType::cacheFlush(code, size); } - - ptrdiff_t differenceBetween(DataLabelPtr from, Jump to) +protected: + AbstractMacroAssembler() + : m_randomSource(cryptographicallyRandomNumber()) { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); } - ptrdiff_t differenceBetween(DataLabelPtr from, DataLabelPtr to) + AssemblerType m_assembler; + + uint32_t random() { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label); + return m_randomSource.getUint32(); } - ptrdiff_t differenceBetween(DataLabelPtr from, Call to) + WeakRandom m_randomSource; + +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + Vector m_registerAllocationForOffsets; +#endif + +#if ENABLE(JIT_CONSTANT_BLINDING) + static bool scratchRegisterForBlinding() { return false; } + static bool shouldBlindForSpecificArch(uint32_t) { return true; } + static bool shouldBlindForSpecificArch(uint64_t) { return true; } +#endif + + class CachedTempRegister { + friend class DataLabelPtr; + friend class DataLabel32; + friend class DataLabelCompact; + friend class Jump; + friend class Label; + + public: + CachedTempRegister(AbstractMacroAssembler* masm, RegisterID registerID) + : m_masm(masm) + , m_registerID(registerID) + , m_value(0) + , m_validBit(1 << static_cast(registerID)) + { + ASSERT(static_cast(registerID) < (sizeof(unsigned) * 8)); + } + + ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; } + + ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; } + + bool value(intptr_t& value) + { + value = m_value; + return m_masm->isTempRegisterValid(m_validBit); + } + + void setValue(intptr_t value) + { + m_value = value; + m_masm->setTempRegisterValid(m_validBit); + } + + ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); } + + private: + AbstractMacroAssembler* m_masm; + RegisterID m_registerID; + intptr_t m_value; + unsigned m_validBit; + }; + + ALWAYS_INLINE void invalidateAllTempRegisters() { - return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_jmp); + m_tempRegistersValidBits = 0; } - // Temporary interface; likely to be removed, since may be hard to port to all architectures. -#if CPU(X86) || CPU(X86_64) - void rewindToLabel(Label rewindTo) { m_assembler.rewindToLabel(rewindTo.m_label); } -#endif + ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask) + { + return (m_tempRegistersValidBits & registerMask); + } - void beginUninterruptedSequence() { } - void endUninterruptedSequence() { } + ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask) + { + m_tempRegistersValidBits &= ~registerMask; + } -#ifndef NDEBUG - unsigned debugOffset() { return m_assembler.debugOffset(); } -#endif + ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask) + { + m_tempRegistersValidBits |= registerMask; + } -protected: - AssemblerType m_assembler; + unsigned m_tempRegistersValidBits; friend class LinkBuffer; friend class RepatchBuffer; static void linkJump(void* code, Jump jump, CodeLocationLabel target) { - AssemblerType::linkJump(code, jump.m_jmp, target.dataLocation()); + AssemblerType::linkJump(code, jump.m_label, target.dataLocation()); } static void linkPointer(void* code, AssemblerLabel label, void* value) @@ -586,7 +931,7 @@ protected: static unsigned getLinkerCallReturnOffset(Call call) { - return AssemblerType::getCallReturnOffset(call.m_jmp); + return AssemblerType::getCallReturnOffset(call.m_label); } static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination) @@ -618,6 +963,16 @@ protected: { return AssemblerType::readPointer(dataLabelPtr.dataLocation()); } + + static void replaceWithLoad(CodeLocationConvertibleLoad label) + { + AssemblerType::replaceWithLoad(label.dataLocation()); + } + + static void replaceWithAddressComputation(CodeLocationConvertibleLoad label) + { + AssemblerType::replaceWithAddressComputation(label.dataLocation()); + } }; } // namespace JSC