2 * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef AbstractMacroAssembler_h
27 #define AbstractMacroAssembler_h
29 #include "AbortReason.h"
30 #include "AssemblerBuffer.h"
31 #include "CodeLocation.h"
32 #include "MacroAssemblerCodeRef.h"
34 #include "WeakRandom.h"
35 #include <wtf/CryptographicallyRandomNumber.h>
36 #include <wtf/Noncopyable.h>
42 inline bool isARMv7s()
62 #if CPU(X86_64) || CPU(X86)
69 inline bool optimizeForARMv7s()
71 return isARMv7s() && Options::enableArchitectureSpecificOptimizations();
74 inline bool optimizeForARM64()
76 return isARM64() && Options::enableArchitectureSpecificOptimizations();
79 inline bool optimizeForX86()
81 return isX86() && Options::enableArchitectureSpecificOptimizations();
91 template <class AssemblerType
>
92 class AbstractMacroAssembler
{
94 friend class JITWriteBarrierBase
;
95 typedef AssemblerType AssemblerType_T
;
97 typedef MacroAssemblerCodePtr CodePtr
;
98 typedef MacroAssemblerCodeRef CodeRef
;
102 typedef typename
AssemblerType::RegisterID RegisterID
;
103 typedef typename
AssemblerType::FPRegisterID FPRegisterID
;
105 static RegisterID
firstRegister() { return AssemblerType::firstRegister(); }
106 static RegisterID
lastRegister() { return AssemblerType::lastRegister(); }
108 static FPRegisterID
firstFPRegister() { return AssemblerType::firstFPRegister(); }
109 static FPRegisterID
lastFPRegister() { return AssemblerType::lastFPRegister(); }
111 // Section 1: MacroAssembler operand types
113 // The following types are used as operands to MacroAssembler operations,
114 // describing immediate and memory operands to the instructions to be planted.
123 static Scale
timesPtr()
125 if (sizeof(void*) == 4)
132 // Describes a simple base-offset address.
134 explicit Address(RegisterID base
, int32_t offset
= 0)
140 Address
withOffset(int32_t additionalOffset
)
142 return Address(base
, offset
+ additionalOffset
);
149 struct ExtendedAddress
{
150 explicit ExtendedAddress(RegisterID base
, intptr_t offset
= 0)
162 // This class is used for explicit 'load' and 'store' operations
163 // (as opposed to situations in which a memory operand is provided
164 // to a generic operation, such as an integer arithmetic instruction).
166 // In the case of a load (or store) operation we want to permit
167 // addresses to be implicitly constructed, e.g. the two calls:
169 // load32(Address(addrReg), destReg);
170 // load32(addrReg, destReg);
172 // Are equivalent, and the explicit wrapping of the Address in the former
174 struct ImplicitAddress
{
175 ImplicitAddress(RegisterID base
)
181 ImplicitAddress(Address address
)
183 , offset(address
.offset
)
193 // Describes a complex addressing mode.
195 BaseIndex(RegisterID base
, RegisterID index
, Scale scale
, int32_t offset
= 0)
211 // Describes an memory operand given by a pointer. For regular load & store
212 // operations an unwrapped void* will be used, rather than using this.
213 struct AbsoluteAddress
{
214 explicit AbsoluteAddress(const void* ptr
)
224 // A pointer sized immediate operand to an instruction - this is wrapped
225 // in a class requiring explicit construction in order to differentiate
226 // from pointers used as absolute addresses to memory operations
227 struct TrustedImmPtr
{
230 explicit TrustedImmPtr(const void* value
)
235 // This is only here so that TrustedImmPtr(0) does not confuse the C++
236 // overload handling rules.
237 explicit TrustedImmPtr(int value
)
240 ASSERT_UNUSED(value
, !value
);
243 explicit TrustedImmPtr(size_t value
)
244 : m_value(reinterpret_cast<void*>(value
))
250 return reinterpret_cast<intptr_t>(m_value
);
256 struct ImmPtr
: private TrustedImmPtr
258 explicit ImmPtr(const void* value
)
259 : TrustedImmPtr(value
)
263 TrustedImmPtr
asTrustedImmPtr() { return *this; }
268 // A 32bit immediate operand to an instruction - this is wrapped in a
269 // class requiring explicit construction in order to prevent RegisterIDs
270 // (which are implemented as an enum) from accidentally being passed as
272 struct TrustedImm32
{
275 explicit TrustedImm32(int32_t value
)
281 explicit TrustedImm32(TrustedImmPtr ptr
)
282 : m_value(ptr
.asIntptr())
291 struct Imm32
: private TrustedImm32
{
292 explicit Imm32(int32_t value
)
293 : TrustedImm32(value
)
297 explicit Imm32(TrustedImmPtr ptr
)
302 const TrustedImm32
& asTrustedImm32() const { return *this; }
308 // A 64bit immediate operand to an instruction - this is wrapped in a
309 // class requiring explicit construction in order to prevent RegisterIDs
310 // (which are implemented as an enum) from accidentally being passed as
312 struct TrustedImm64
{
315 explicit TrustedImm64(int64_t value
)
320 #if CPU(X86_64) || CPU(ARM64)
321 explicit TrustedImm64(TrustedImmPtr ptr
)
322 : m_value(ptr
.asIntptr())
330 struct Imm64
: private TrustedImm64
332 explicit Imm64(int64_t value
)
333 : TrustedImm64(value
)
336 #if CPU(X86_64) || CPU(ARM64)
337 explicit Imm64(TrustedImmPtr ptr
)
342 const TrustedImm64
& asTrustedImm64() const { return *this; }
345 // Section 2: MacroAssembler code buffer handles
347 // The following types are used to reference items in the code buffer
348 // during JIT code generation. For example, the type Jump is used to
349 // track the location of a jump instruction so that it may later be
350 // linked to a label marking its destination.
355 // A Label records a point in the generated instruction stream, typically such that
356 // it may be used as a destination for a jump.
358 template<class TemplateAssemblerType
>
359 friend class AbstractMacroAssembler
;
360 friend struct DFG::OSRExit
;
362 friend class MacroAssemblerCodeRef
;
363 friend class LinkBuffer
;
364 friend class Watchpoint
;
371 Label(AbstractMacroAssembler
<AssemblerType
>* masm
)
372 : m_label(masm
->m_assembler
.label())
374 masm
->invalidateAllTempRegisters();
377 bool isSet() const { return m_label
.isSet(); }
379 AssemblerLabel m_label
;
382 // ConvertibleLoadLabel:
384 // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
387 // loadPtr(Address(a, i), b)
391 // addPtr(TrustedImmPtr(i), a, b)
392 class ConvertibleLoadLabel
{
393 template<class TemplateAssemblerType
>
394 friend class AbstractMacroAssembler
;
395 friend class LinkBuffer
;
398 ConvertibleLoadLabel()
402 ConvertibleLoadLabel(AbstractMacroAssembler
<AssemblerType
>* masm
)
403 : m_label(masm
->m_assembler
.labelIgnoringWatchpoints())
407 bool isSet() const { return m_label
.isSet(); }
409 AssemblerLabel m_label
;
414 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
415 // patched after the code has been generated.
417 template<class TemplateAssemblerType
>
418 friend class AbstractMacroAssembler
;
419 friend class LinkBuffer
;
425 DataLabelPtr(AbstractMacroAssembler
<AssemblerType
>* masm
)
426 : m_label(masm
->m_assembler
.label())
430 bool isSet() const { return m_label
.isSet(); }
433 AssemblerLabel m_label
;
438 // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be
439 // patched after the code has been generated.
441 template<class TemplateAssemblerType
>
442 friend class AbstractMacroAssembler
;
443 friend class LinkBuffer
;
449 DataLabel32(AbstractMacroAssembler
<AssemblerType
>* masm
)
450 : m_label(masm
->m_assembler
.label())
454 AssemblerLabel
label() const { return m_label
; }
457 AssemblerLabel m_label
;
462 // A DataLabelCompact is used to refer to a location in the code containing a
463 // compact immediate to be patched after the code has been generated.
464 class DataLabelCompact
{
465 template<class TemplateAssemblerType
>
466 friend class AbstractMacroAssembler
;
467 friend class LinkBuffer
;
473 DataLabelCompact(AbstractMacroAssembler
<AssemblerType
>* masm
)
474 : m_label(masm
->m_assembler
.label())
478 DataLabelCompact(AssemblerLabel label
)
483 AssemblerLabel
label() const { return m_label
; }
486 AssemblerLabel m_label
;
491 // A Call object is a reference to a call instruction that has been planted
492 // into the code buffer - it is typically used to link the call, setting the
493 // relative offset such that when executed it will call to the desired
496 template<class TemplateAssemblerType
>
497 friend class AbstractMacroAssembler
;
512 Call(AssemblerLabel jmp
, Flags flags
)
518 bool isFlagSet(Flags flag
)
520 return m_flags
& flag
;
523 static Call
fromTailJump(Jump jump
)
525 return Call(jump
.m_label
, Linkable
);
528 AssemblerLabel m_label
;
535 // A jump object is a reference to a jump instruction that has been planted
536 // into the code buffer - it is typically used to link the jump, setting the
537 // relative offset such that when executed it will jump to the desired
540 template<class TemplateAssemblerType
>
541 friend class AbstractMacroAssembler
;
543 friend struct DFG::OSRExit
;
544 friend class LinkBuffer
;
551 // Fixme: this information should be stored in the instruction stream, not in the Jump object.
552 Jump(AssemblerLabel jmp
, ARMv7Assembler::JumpType type
= ARMv7Assembler::JumpNoCondition
, ARMv7Assembler::Condition condition
= ARMv7Assembler::ConditionInvalid
)
555 , m_condition(condition
)
559 Jump(AssemblerLabel jmp
, ARM64Assembler::JumpType type
= ARM64Assembler::JumpNoCondition
, ARM64Assembler::Condition condition
= ARM64Assembler::ConditionInvalid
)
562 , m_condition(condition
)
566 Jump(AssemblerLabel jmp
, ARM64Assembler::JumpType type
, ARM64Assembler::Condition condition
, bool is64Bit
, ARM64Assembler::RegisterID compareRegister
)
569 , m_condition(condition
)
571 , m_compareRegister(compareRegister
)
573 ASSERT((type
== ARM64Assembler::JumpCompareAndBranch
) || (type
== ARM64Assembler::JumpCompareAndBranchFixedSize
));
576 Jump(AssemblerLabel jmp
, ARM64Assembler::JumpType type
, ARM64Assembler::Condition condition
, unsigned bitNumber
, ARM64Assembler::RegisterID compareRegister
)
579 , m_condition(condition
)
580 , m_bitNumber(bitNumber
)
581 , m_compareRegister(compareRegister
)
583 ASSERT((type
== ARM64Assembler::JumpTestBit
) || (type
== ARM64Assembler::JumpTestBitFixedSize
));
586 Jump(AssemblerLabel jmp
, SH4Assembler::JumpType type
= SH4Assembler::JumpFar
)
592 Jump(AssemblerLabel jmp
)
601 result
.m_label
= m_label
;
605 void link(AbstractMacroAssembler
<AssemblerType
>* masm
) const
607 masm
->invalidateAllTempRegisters();
609 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
610 masm
->checkRegisterAllocationAgainstBranchRange(m_label
.m_offset
, masm
->debugOffset());
614 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label(), m_type
, m_condition
);
616 if ((m_type
== ARM64Assembler::JumpCompareAndBranch
) || (m_type
== ARM64Assembler::JumpCompareAndBranchFixedSize
))
617 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label(), m_type
, m_condition
, m_is64Bit
, m_compareRegister
);
618 else if ((m_type
== ARM64Assembler::JumpTestBit
) || (m_type
== ARM64Assembler::JumpTestBitFixedSize
))
619 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label(), m_type
, m_condition
, m_bitNumber
, m_compareRegister
);
621 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label(), m_type
, m_condition
);
623 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label(), m_type
);
625 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label());
629 void linkTo(Label label
, AbstractMacroAssembler
<AssemblerType
>* masm
) const
631 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
632 masm
->checkRegisterAllocationAgainstBranchRange(label
.m_label
.m_offset
, m_label
.m_offset
);
636 masm
->m_assembler
.linkJump(m_label
, label
.m_label
, m_type
, m_condition
);
638 if ((m_type
== ARM64Assembler::JumpCompareAndBranch
) || (m_type
== ARM64Assembler::JumpCompareAndBranchFixedSize
))
639 masm
->m_assembler
.linkJump(m_label
, label
.m_label
, m_type
, m_condition
, m_is64Bit
, m_compareRegister
);
640 else if ((m_type
== ARM64Assembler::JumpTestBit
) || (m_type
== ARM64Assembler::JumpTestBitFixedSize
))
641 masm
->m_assembler
.linkJump(m_label
, label
.m_label
, m_type
, m_condition
, m_bitNumber
, m_compareRegister
);
643 masm
->m_assembler
.linkJump(m_label
, label
.m_label
, m_type
, m_condition
);
645 masm
->m_assembler
.linkJump(m_label
, label
.m_label
);
649 bool isSet() const { return m_label
.isSet(); }
652 AssemblerLabel m_label
;
654 ARMv7Assembler::JumpType m_type
;
655 ARMv7Assembler::Condition m_condition
;
657 ARM64Assembler::JumpType m_type
;
658 ARM64Assembler::Condition m_condition
;
660 unsigned m_bitNumber
;
661 ARM64Assembler::RegisterID m_compareRegister
;
664 SH4Assembler::JumpType m_type
;
668 struct PatchableJump
{
673 explicit PatchableJump(Jump jump
)
678 operator Jump
&() { return m_jump
; }
685 // A JumpList is a set of Jump objects.
686 // All jumps in the set will be linked to the same destination.
688 friend class LinkBuffer
;
691 typedef Vector
<Jump
, 2> JumpVector
;
701 void link(AbstractMacroAssembler
<AssemblerType
>* masm
)
703 size_t size
= m_jumps
.size();
704 for (size_t i
= 0; i
< size
; ++i
)
705 m_jumps
[i
].link(masm
);
709 void linkTo(Label label
, AbstractMacroAssembler
<AssemblerType
>* masm
)
711 size_t size
= m_jumps
.size();
712 for (size_t i
= 0; i
< size
; ++i
)
713 m_jumps
[i
].linkTo(label
, masm
);
717 void append(Jump jump
)
719 m_jumps
.append(jump
);
722 void append(const JumpList
& other
)
724 m_jumps
.append(other
.m_jumps
.begin(), other
.m_jumps
.size());
729 return !m_jumps
.size();
737 const JumpVector
& jumps() const { return m_jumps
; }
744 // Section 3: Misc admin methods
746 Label
labelIgnoringWatchpoints()
749 result
.m_label
= m_assembler
.labelIgnoringWatchpoints();
753 Label
labelIgnoringWatchpoints()
764 void padBeforePatch()
766 // Rely on the fact that asking for a label already does the padding.
770 Label
watchpointLabel()
773 result
.m_label
= m_assembler
.labelForWatchpoint();
779 m_assembler
.align(16);
783 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
784 class RegisterAllocationOffset
{
786 RegisterAllocationOffset(unsigned offset
)
791 void checkOffsets(unsigned low
, unsigned high
)
793 RELEASE_ASSERT_WITH_MESSAGE(!(low
<= m_offset
&& m_offset
<= high
), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset
, low
, high
);
800 void addRegisterAllocationAtOffset(unsigned offset
)
802 m_registerAllocationForOffsets
.append(RegisterAllocationOffset(offset
));
805 void clearRegisterAllocationOffsets()
807 m_registerAllocationForOffsets
.clear();
810 void checkRegisterAllocationAgainstBranchRange(unsigned offset1
, unsigned offset2
)
812 if (offset1
> offset2
)
813 std::swap(offset1
, offset2
);
815 size_t size
= m_registerAllocationForOffsets
.size();
816 for (size_t i
= 0; i
< size
; ++i
)
817 m_registerAllocationForOffsets
[i
].checkOffsets(offset1
, offset2
);
821 template<typename T
, typename U
>
822 static ptrdiff_t differenceBetween(T from
, U to
)
824 return AssemblerType::getDifferenceBetweenLabels(from
.m_label
, to
.m_label
);
827 static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr
& a
, const MacroAssemblerCodePtr
& b
)
829 return reinterpret_cast<ptrdiff_t>(b
.executableAddress()) - reinterpret_cast<ptrdiff_t>(a
.executableAddress());
832 unsigned debugOffset() { return m_assembler
.debugOffset(); }
834 ALWAYS_INLINE
static void cacheFlush(void* code
, size_t size
)
836 AssemblerType::cacheFlush(code
, size
);
839 AssemblerType m_assembler
;
842 AbstractMacroAssembler()
843 : m_randomSource(cryptographicallyRandomNumber())
845 invalidateAllTempRegisters();
850 return m_randomSource
.getUint32();
853 WeakRandom m_randomSource
;
855 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
856 Vector
<RegisterAllocationOffset
, 10> m_registerAllocationForOffsets
;
859 static bool haveScratchRegisterForBlinding()
863 static RegisterID
scratchRegisterForBlinding()
865 UNREACHABLE_FOR_PLATFORM();
866 return firstRegister();
868 static bool canBlind() { return false; }
869 static bool shouldBlindForSpecificArch(uint32_t) { return false; }
870 static bool shouldBlindForSpecificArch(uint64_t) { return false; }
872 class CachedTempRegister
{
873 friend class DataLabelPtr
;
874 friend class DataLabel32
;
875 friend class DataLabelCompact
;
880 CachedTempRegister(AbstractMacroAssembler
<AssemblerType
>* masm
, RegisterID registerID
)
882 , m_registerID(registerID
)
884 , m_validBit(1 << static_cast<unsigned>(registerID
))
886 ASSERT(static_cast<unsigned>(registerID
) < (sizeof(unsigned) * 8));
889 ALWAYS_INLINE RegisterID
registerIDInvalidate() { invalidate(); return m_registerID
; }
891 ALWAYS_INLINE RegisterID
registerIDNoInvalidate() { return m_registerID
; }
893 bool value(intptr_t& value
)
896 return m_masm
->isTempRegisterValid(m_validBit
);
899 void setValue(intptr_t value
)
902 m_masm
->setTempRegisterValid(m_validBit
);
905 ALWAYS_INLINE
void invalidate() { m_masm
->clearTempRegisterValid(m_validBit
); }
908 AbstractMacroAssembler
<AssemblerType
>* m_masm
;
909 RegisterID m_registerID
;
914 ALWAYS_INLINE
void invalidateAllTempRegisters()
916 m_tempRegistersValidBits
= 0;
919 ALWAYS_INLINE
bool isTempRegisterValid(unsigned registerMask
)
921 return (m_tempRegistersValidBits
& registerMask
);
924 ALWAYS_INLINE
void clearTempRegisterValid(unsigned registerMask
)
926 m_tempRegistersValidBits
&= ~registerMask
;
929 ALWAYS_INLINE
void setTempRegisterValid(unsigned registerMask
)
931 m_tempRegistersValidBits
|= registerMask
;
934 unsigned m_tempRegistersValidBits
;
936 friend class LinkBuffer
;
937 friend class RepatchBuffer
;
939 static void linkJump(void* code
, Jump jump
, CodeLocationLabel target
)
941 AssemblerType::linkJump(code
, jump
.m_label
, target
.dataLocation());
944 static void linkPointer(void* code
, AssemblerLabel label
, void* value
)
946 AssemblerType::linkPointer(code
, label
, value
);
949 static void* getLinkerAddress(void* code
, AssemblerLabel label
)
951 return AssemblerType::getRelocatedAddress(code
, label
);
954 static unsigned getLinkerCallReturnOffset(Call call
)
956 return AssemblerType::getCallReturnOffset(call
.m_label
);
959 static void repatchJump(CodeLocationJump jump
, CodeLocationLabel destination
)
961 AssemblerType::relinkJump(jump
.dataLocation(), destination
.dataLocation());
964 static void repatchNearCall(CodeLocationNearCall nearCall
, CodeLocationLabel destination
)
966 AssemblerType::relinkCall(nearCall
.dataLocation(), destination
.executableAddress());
969 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact
, int32_t value
)
971 AssemblerType::repatchCompact(dataLabelCompact
.dataLocation(), value
);
974 static void repatchInt32(CodeLocationDataLabel32 dataLabel32
, int32_t value
)
976 AssemblerType::repatchInt32(dataLabel32
.dataLocation(), value
);
979 static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr
, void* value
)
981 AssemblerType::repatchPointer(dataLabelPtr
.dataLocation(), value
);
984 static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr
)
986 return AssemblerType::readPointer(dataLabelPtr
.dataLocation());
989 static void replaceWithLoad(CodeLocationConvertibleLoad label
)
991 AssemblerType::replaceWithLoad(label
.dataLocation());
994 static void replaceWithAddressComputation(CodeLocationConvertibleLoad label
)
996 AssemblerType::replaceWithAddressComputation(label
.dataLocation());
1002 #endif // ENABLE(ASSEMBLER)
1004 #endif // AbstractMacroAssembler_h