X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/6fe7ccc865dc7d7541b93c5bcaf6368d2c98a174..8b637bb680022adfddad653280734877951535a9:/assembler/AbstractMacroAssembler.h?ds=sidebyside diff --git a/assembler/AbstractMacroAssembler.h b/assembler/AbstractMacroAssembler.h index e7cc70f..71b9d1f 100644 --- a/assembler/AbstractMacroAssembler.h +++ b/assembler/AbstractMacroAssembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,7 +31,6 @@ #include "MacroAssemblerCodeRef.h" #include #include -#include #if ENABLE(ASSEMBLER) @@ -46,10 +45,39 @@ namespace JSC { +inline bool isARMv7s() +{ +#if CPU(APPLE_ARMV7S) + return true; +#else + return false; +#endif +} + +inline bool isARM64() +{ +#if CPU(ARM64) + return true; +#else + return false; +#endif +} + +inline bool isX86() +{ +#if CPU(X86_64) || CPU(X86) + return true; +#else + return false; +#endif +} + +class JumpReplacementWatchpoint; class LinkBuffer; class RepatchBuffer; +class Watchpoint; namespace DFG { -class CorrectableJumpPoint; +struct OSRExit; } template @@ -70,7 +98,6 @@ public: // The following types are used as operands to MacroAssembler operations, // describing immediate and memory operands to the instructions to be planted. - enum Scale { TimesOne, TimesTwo, @@ -171,6 +198,8 @@ public: // in a class requiring explicit construction in order to differentiate // from pointers used as absolute addresses to memory operations struct TrustedImmPtr { + TrustedImmPtr() { } + explicit TrustedImmPtr(const void* value) : m_value(value) { @@ -219,35 +248,21 @@ public: // (which are implemented as an enum) from accidentally being passed as // immediate values. struct TrustedImm32 { + TrustedImm32() { } + explicit TrustedImm32(int32_t value) : m_value(value) -#if CPU(ARM) || CPU(MIPS) - , m_isPointer(false) -#endif { } #if !CPU(X86_64) explicit TrustedImm32(TrustedImmPtr ptr) : m_value(ptr.asIntptr()) -#if CPU(ARM) || CPU(MIPS) - , m_isPointer(true) -#endif { } #endif int32_t m_value; -#if CPU(ARM) || CPU(MIPS) - // We rely on being able to regenerate code to recover exception handling - // information. Since ARMv7 supports 16-bit immediates there is a danger - // that if pointer values change the layout of the generated code will change. - // To avoid this problem, always generate pointers (and thus Imm32s constructed - // from ImmPtrs) with a code sequence that is able to represent any pointer - // value - don't use a more compact form in these cases. - // Same for MIPS. - bool m_isPointer; -#endif }; @@ -272,6 +287,50 @@ public: }; + // TrustedImm64: + // + // A 64bit immediate operand to an instruction - this is wrapped in a + // class requiring explicit construction in order to prevent RegisterIDs + // (which are implemented as an enum) from accidentally being passed as + // immediate values. + struct TrustedImm64 { + TrustedImm64() { } + + explicit TrustedImm64(int64_t value) + : m_value(value) + { + } + +#if CPU(X86_64) || CPU(ARM64) + explicit TrustedImm64(TrustedImmPtr ptr) + : m_value(ptr.asIntptr()) + { + } +#endif + + int64_t m_value; + }; + + struct Imm64 : +#if ENABLE(JIT_CONSTANT_BLINDING) + private TrustedImm64 +#else + public TrustedImm64 +#endif + { + explicit Imm64(int64_t value) + : TrustedImm64(value) + { + } +#if CPU(X86_64) || CPU(ARM64) + explicit Imm64(TrustedImmPtr ptr) + : TrustedImm64(ptr) + { + } +#endif + const TrustedImm64& asTrustedImm64() const { return *this; } + }; + // Section 2: MacroAssembler code buffer handles // // The following types are used to reference items in the code buffer @@ -287,10 +346,12 @@ public: class Label { template friend class AbstractMacroAssembler; - friend class DFG::CorrectableJumpPoint; + friend struct DFG::OSRExit; friend class Jump; + friend class JumpReplacementWatchpoint; friend class MacroAssemblerCodeRef; friend class LinkBuffer; + friend class Watchpoint; public: Label() @@ -299,6 +360,37 @@ public: Label(AbstractMacroAssembler* masm) : m_label(masm->m_assembler.label()) + { + masm->invalidateAllTempRegisters(); + } + + bool isSet() const { return m_label.isSet(); } + private: + AssemblerLabel m_label; + }; + + // ConvertibleLoadLabel: + // + // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr + // so that: + // + // loadPtr(Address(a, i), b) + // + // becomes: + // + // addPtr(TrustedImmPtr(i), a, b) + class ConvertibleLoadLabel { + template + friend class AbstractMacroAssembler; + friend class LinkBuffer; + + public: + ConvertibleLoadLabel() + { + } + + ConvertibleLoadLabel(AbstractMacroAssembler* masm) + : m_label(masm->m_assembler.labelIgnoringWatchpoints()) { } @@ -324,7 +416,7 @@ public: : m_label(masm->m_assembler.label()) { } - + bool isSet() const { return m_label.isSet(); } private: @@ -372,7 +464,7 @@ public: : m_label(masm->m_assembler.label()) { } - + DataLabelCompact(AssemblerLabel label) : m_label(label) { @@ -436,7 +528,7 @@ public: template friend class AbstractMacroAssembler; friend class Call; - friend class DFG::CorrectableJumpPoint; + friend struct DFG::OSRExit; friend class LinkBuffer; public: Jump() @@ -445,11 +537,38 @@ public: #if CPU(ARM_THUMB2) // Fixme: this information should be stored in the instruction stream, not in the Jump object. - Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid) + Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + { + } +#elif CPU(ARM64) + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + { + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister) + : m_label(jmp) + , m_type(type) + , m_condition(condition) + , m_is64Bit(is64Bit) + , m_compareRegister(compareRegister) + { + ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize)); + } + + Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister) : m_label(jmp) , m_type(type) , m_condition(condition) + , m_bitNumber(bitNumber) + , m_compareRegister(compareRegister) { + ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize)); } #elif CPU(SH4) Jump(AssemblerLabel jmp, SH4Assembler::JumpType type = SH4Assembler::JumpFar) @@ -463,11 +582,31 @@ public: { } #endif + + Label label() const + { + Label result; + result.m_label = m_label; + return result; + } void link(AbstractMacroAssembler* masm) const { + masm->invalidateAllTempRegisters(); + +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset()); +#endif + #if CPU(ARM_THUMB2) masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); +#elif CPU(ARM64) + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition); #elif CPU(SH4) masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type); #else @@ -477,8 +616,19 @@ public: void linkTo(Label label, AbstractMacroAssembler* masm) const { +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset); +#endif + #if CPU(ARM_THUMB2) masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); +#elif CPU(ARM64) + if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize)) + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister); + else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize)) + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister); + else + masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition); #else masm->m_assembler.linkJump(m_label, label.m_label); #endif @@ -491,6 +641,12 @@ public: #if CPU(ARM_THUMB2) ARMv7Assembler::JumpType m_type; ARMv7Assembler::Condition m_condition; +#elif CPU(ARM64) + ARM64Assembler::JumpType m_type; + ARM64Assembler::Condition m_condition; + bool m_is64Bit; + unsigned m_bitNumber; + ARM64Assembler::RegisterID m_compareRegister; #endif #if CPU(SH4) SH4Assembler::JumpType m_type; @@ -520,7 +676,14 @@ public: friend class LinkBuffer; public: - typedef Vector JumpVector; + typedef Vector JumpVector; + + JumpList() { } + + JumpList(Jump jump) + { + append(jump); + } void link(AbstractMacroAssembler* masm) { @@ -543,7 +706,7 @@ public: m_jumps.append(jump); } - void append(JumpList& other) + void append(const JumpList& other) { m_jumps.append(other.m_jumps.begin(), other.m_jumps.size()); } @@ -558,7 +721,7 @@ public: m_jumps.clear(); } - const JumpVector& jumps() { return m_jumps; } + const JumpVector& jumps() const { return m_jumps; } private: JumpVector m_jumps; @@ -566,17 +729,82 @@ public: // Section 3: Misc admin methods +#if ENABLE(DFG_JIT) + Label labelIgnoringWatchpoints() + { + Label result; + result.m_label = m_assembler.labelIgnoringWatchpoints(); + return result; + } +#else + Label labelIgnoringWatchpoints() + { + return label(); + } +#endif + Label label() { return Label(this); } + void padBeforePatch() + { + // Rely on the fact that asking for a label already does the padding. + (void)label(); + } + + Label watchpointLabel() + { + Label result; + result.m_label = m_assembler.labelForWatchpoint(); + return result; + } + Label align() { m_assembler.align(16); return Label(this); } +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + class RegisterAllocationOffset { + public: + RegisterAllocationOffset(unsigned offset) + : m_offset(offset) + { + } + + void check(unsigned low, unsigned high) + { + RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high); + } + + private: + unsigned m_offset; + }; + + void addRegisterAllocationAtOffset(unsigned offset) + { + m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset)); + } + + void clearRegisterAllocationOffsets() + { + m_registerAllocationForOffsets.clear(); + } + + void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2) + { + if (offset1 > offset2) + std::swap(offset1, offset2); + + size_t size = m_registerAllocationForOffsets.size(); + for (size_t i = 0; i < size; ++i) + m_registerAllocationForOffsets[i].check(offset1, offset2); + } +#endif + template static ptrdiff_t differenceBetween(T from, U to) { @@ -609,12 +837,80 @@ protected: WeakRandom m_randomSource; +#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION) + Vector m_registerAllocationForOffsets; +#endif + #if ENABLE(JIT_CONSTANT_BLINDING) static bool scratchRegisterForBlinding() { return false; } static bool shouldBlindForSpecificArch(uint32_t) { return true; } static bool shouldBlindForSpecificArch(uint64_t) { return true; } #endif + class CachedTempRegister { + friend class DataLabelPtr; + friend class DataLabel32; + friend class DataLabelCompact; + friend class Jump; + friend class Label; + + public: + CachedTempRegister(AbstractMacroAssembler* masm, RegisterID registerID) + : m_masm(masm) + , m_registerID(registerID) + , m_value(0) + , m_validBit(1 << static_cast(registerID)) + { + ASSERT(static_cast(registerID) < (sizeof(unsigned) * 8)); + } + + ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; } + + ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; } + + bool value(intptr_t& value) + { + value = m_value; + return m_masm->isTempRegisterValid(m_validBit); + } + + void setValue(intptr_t value) + { + m_value = value; + m_masm->setTempRegisterValid(m_validBit); + } + + ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); } + + private: + AbstractMacroAssembler* m_masm; + RegisterID m_registerID; + intptr_t m_value; + unsigned m_validBit; + }; + + ALWAYS_INLINE void invalidateAllTempRegisters() + { + m_tempRegistersValidBits = 0; + } + + ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask) + { + return (m_tempRegistersValidBits & registerMask); + } + + ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask) + { + m_tempRegistersValidBits &= ~registerMask; + } + + ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask) + { + m_tempRegistersValidBits |= registerMask; + } + + unsigned m_tempRegistersValidBits; + friend class LinkBuffer; friend class RepatchBuffer; @@ -668,16 +964,14 @@ protected: return AssemblerType::readPointer(dataLabelPtr.dataLocation()); } - static void unreachableForPlatform() + static void replaceWithLoad(CodeLocationConvertibleLoad label) { -#if COMPILER(CLANG) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wmissing-noreturn" - ASSERT_NOT_REACHED(); -#pragma clang diagnostic pop -#else - ASSERT_NOT_REACHED(); -#endif + AssemblerType::replaceWithLoad(label.dataLocation()); + } + + static void replaceWithAddressComputation(CodeLocationConvertibleLoad label) + { + AssemblerType::replaceWithAddressComputation(label.dataLocation()); } };