2 * Copyright (C) 2008, 2012, 2014, 2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef AbstractMacroAssembler_h
27 #define AbstractMacroAssembler_h
29 #include "AbortReason.h"
30 #include "AssemblerBuffer.h"
31 #include "CodeLocation.h"
32 #include "MacroAssemblerCodeRef.h"
34 #include "WeakRandom.h"
35 #include <wtf/CryptographicallyRandomNumber.h>
36 #include <wtf/Noncopyable.h>
42 inline bool isARMv7IDIVSupported()
44 #if HAVE(ARM_IDIV_INSTRUCTIONS)
62 #if CPU(X86_64) || CPU(X86)
69 inline bool optimizeForARMv7IDIVSupported()
71 return isARMv7IDIVSupported() && Options::enableArchitectureSpecificOptimizations();
74 inline bool optimizeForARM64()
76 return isARM64() && Options::enableArchitectureSpecificOptimizations();
79 inline bool optimizeForX86()
81 return isX86() && Options::enableArchitectureSpecificOptimizations();
91 template <class AssemblerType
, class MacroAssemblerType
>
92 class AbstractMacroAssembler
{
94 friend class JITWriteBarrierBase
;
95 typedef AbstractMacroAssembler
<AssemblerType
, MacroAssemblerType
> AbstractMacroAssemblerType
;
96 typedef AssemblerType AssemblerType_T
;
98 typedef MacroAssemblerCodePtr CodePtr
;
99 typedef MacroAssemblerCodeRef CodeRef
;
103 typedef typename
AssemblerType::RegisterID RegisterID
;
104 typedef typename
AssemblerType::FPRegisterID FPRegisterID
;
106 static RegisterID
firstRegister() { return AssemblerType::firstRegister(); }
107 static RegisterID
lastRegister() { return AssemblerType::lastRegister(); }
109 static FPRegisterID
firstFPRegister() { return AssemblerType::firstFPRegister(); }
110 static FPRegisterID
lastFPRegister() { return AssemblerType::lastFPRegister(); }
112 // Section 1: MacroAssembler operand types
114 // The following types are used as operands to MacroAssembler operations,
115 // describing immediate and memory operands to the instructions to be planted.
124 static Scale
timesPtr()
126 if (sizeof(void*) == 4)
133 // Describes a simple base-offset address.
135 explicit Address(RegisterID base
, int32_t offset
= 0)
141 Address
withOffset(int32_t additionalOffset
)
143 return Address(base
, offset
+ additionalOffset
);
150 struct ExtendedAddress
{
151 explicit ExtendedAddress(RegisterID base
, intptr_t offset
= 0)
163 // This class is used for explicit 'load' and 'store' operations
164 // (as opposed to situations in which a memory operand is provided
165 // to a generic operation, such as an integer arithmetic instruction).
167 // In the case of a load (or store) operation we want to permit
168 // addresses to be implicitly constructed, e.g. the two calls:
170 // load32(Address(addrReg), destReg);
171 // load32(addrReg, destReg);
173 // Are equivalent, and the explicit wrapping of the Address in the former
175 struct ImplicitAddress
{
176 ImplicitAddress(RegisterID base
)
182 ImplicitAddress(Address address
)
184 , offset(address
.offset
)
194 // Describes a complex addressing mode.
196 BaseIndex(RegisterID base
, RegisterID index
, Scale scale
, int32_t offset
= 0)
209 BaseIndex
withOffset(int32_t additionalOffset
)
211 return BaseIndex(base
, index
, scale
, offset
+ additionalOffset
);
217 // Describes an memory operand given by a pointer. For regular load & store
218 // operations an unwrapped void* will be used, rather than using this.
219 struct AbsoluteAddress
{
220 explicit AbsoluteAddress(const void* ptr
)
230 // A pointer sized immediate operand to an instruction - this is wrapped
231 // in a class requiring explicit construction in order to differentiate
232 // from pointers used as absolute addresses to memory operations
233 struct TrustedImmPtr
{
236 explicit TrustedImmPtr(const void* value
)
241 // This is only here so that TrustedImmPtr(0) does not confuse the C++
242 // overload handling rules.
243 explicit TrustedImmPtr(int value
)
246 ASSERT_UNUSED(value
, !value
);
249 explicit TrustedImmPtr(size_t value
)
250 : m_value(reinterpret_cast<void*>(value
))
256 return reinterpret_cast<intptr_t>(m_value
);
262 struct ImmPtr
: private TrustedImmPtr
264 explicit ImmPtr(const void* value
)
265 : TrustedImmPtr(value
)
269 TrustedImmPtr
asTrustedImmPtr() { return *this; }
274 // A 32bit immediate operand to an instruction - this is wrapped in a
275 // class requiring explicit construction in order to prevent RegisterIDs
276 // (which are implemented as an enum) from accidentally being passed as
278 struct TrustedImm32
{
281 explicit TrustedImm32(int32_t value
)
287 explicit TrustedImm32(TrustedImmPtr ptr
)
288 : m_value(ptr
.asIntptr())
297 struct Imm32
: private TrustedImm32
{
298 explicit Imm32(int32_t value
)
299 : TrustedImm32(value
)
303 explicit Imm32(TrustedImmPtr ptr
)
308 const TrustedImm32
& asTrustedImm32() const { return *this; }
314 // A 64bit immediate operand to an instruction - this is wrapped in a
315 // class requiring explicit construction in order to prevent RegisterIDs
316 // (which are implemented as an enum) from accidentally being passed as
318 struct TrustedImm64
{
321 explicit TrustedImm64(int64_t value
)
326 #if CPU(X86_64) || CPU(ARM64)
327 explicit TrustedImm64(TrustedImmPtr ptr
)
328 : m_value(ptr
.asIntptr())
336 struct Imm64
: private TrustedImm64
338 explicit Imm64(int64_t value
)
339 : TrustedImm64(value
)
342 #if CPU(X86_64) || CPU(ARM64)
343 explicit Imm64(TrustedImmPtr ptr
)
348 const TrustedImm64
& asTrustedImm64() const { return *this; }
351 // Section 2: MacroAssembler code buffer handles
353 // The following types are used to reference items in the code buffer
354 // during JIT code generation. For example, the type Jump is used to
355 // track the location of a jump instruction so that it may later be
356 // linked to a label marking its destination.
361 // A Label records a point in the generated instruction stream, typically such that
362 // it may be used as a destination for a jump.
364 template<class TemplateAssemblerType
, class TemplateMacroAssemblerType
>
365 friend class AbstractMacroAssembler
;
366 friend struct DFG::OSRExit
;
368 friend class MacroAssemblerCodeRef
;
369 friend class LinkBuffer
;
370 friend class Watchpoint
;
377 Label(AbstractMacroAssemblerType
* masm
)
378 : m_label(masm
->m_assembler
.label())
380 masm
->invalidateAllTempRegisters();
383 bool isSet() const { return m_label
.isSet(); }
385 AssemblerLabel m_label
;
388 // ConvertibleLoadLabel:
390 // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
393 // loadPtr(Address(a, i), b)
397 // addPtr(TrustedImmPtr(i), a, b)
398 class ConvertibleLoadLabel
{
399 template<class TemplateAssemblerType
, class TemplateMacroAssemblerType
>
400 friend class AbstractMacroAssembler
;
401 friend class LinkBuffer
;
404 ConvertibleLoadLabel()
408 ConvertibleLoadLabel(AbstractMacroAssemblerType
* masm
)
409 : m_label(masm
->m_assembler
.labelIgnoringWatchpoints())
413 bool isSet() const { return m_label
.isSet(); }
415 AssemblerLabel m_label
;
420 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
421 // patched after the code has been generated.
423 template<class TemplateAssemblerType
, class TemplateMacroAssemblerType
>
424 friend class AbstractMacroAssembler
;
425 friend class LinkBuffer
;
431 DataLabelPtr(AbstractMacroAssemblerType
* masm
)
432 : m_label(masm
->m_assembler
.label())
436 bool isSet() const { return m_label
.isSet(); }
439 AssemblerLabel m_label
;
444 // A DataLabel32 is used to refer to a location in the code containing a 32-bit constant to be
445 // patched after the code has been generated.
447 template<class TemplateAssemblerType
, class TemplateMacroAssemblerType
>
448 friend class AbstractMacroAssembler
;
449 friend class LinkBuffer
;
455 DataLabel32(AbstractMacroAssemblerType
* masm
)
456 : m_label(masm
->m_assembler
.label())
460 AssemblerLabel
label() const { return m_label
; }
463 AssemblerLabel m_label
;
468 // A DataLabelCompact is used to refer to a location in the code containing a
469 // compact immediate to be patched after the code has been generated.
470 class DataLabelCompact
{
471 template<class TemplateAssemblerType
, class TemplateMacroAssemblerType
>
472 friend class AbstractMacroAssembler
;
473 friend class LinkBuffer
;
479 DataLabelCompact(AbstractMacroAssemblerType
* masm
)
480 : m_label(masm
->m_assembler
.label())
484 DataLabelCompact(AssemblerLabel label
)
489 AssemblerLabel
label() const { return m_label
; }
492 AssemblerLabel m_label
;
497 // A Call object is a reference to a call instruction that has been planted
498 // into the code buffer - it is typically used to link the call, setting the
499 // relative offset such that when executed it will call to the desired
502 template<class TemplateAssemblerType
, class TemplateMacroAssemblerType
>
503 friend class AbstractMacroAssembler
;
518 Call(AssemblerLabel jmp
, Flags flags
)
524 bool isFlagSet(Flags flag
)
526 return m_flags
& flag
;
529 static Call
fromTailJump(Jump jump
)
531 return Call(jump
.m_label
, Linkable
);
534 AssemblerLabel m_label
;
541 // A jump object is a reference to a jump instruction that has been planted
542 // into the code buffer - it is typically used to link the jump, setting the
543 // relative offset such that when executed it will jump to the desired
546 template<class TemplateAssemblerType
, class TemplateMacroAssemblerType
>
547 friend class AbstractMacroAssembler
;
549 friend struct DFG::OSRExit
;
550 friend class LinkBuffer
;
557 // Fixme: this information should be stored in the instruction stream, not in the Jump object.
558 Jump(AssemblerLabel jmp
, ARMv7Assembler::JumpType type
= ARMv7Assembler::JumpNoCondition
, ARMv7Assembler::Condition condition
= ARMv7Assembler::ConditionInvalid
)
561 , m_condition(condition
)
565 Jump(AssemblerLabel jmp
, ARM64Assembler::JumpType type
= ARM64Assembler::JumpNoCondition
, ARM64Assembler::Condition condition
= ARM64Assembler::ConditionInvalid
)
568 , m_condition(condition
)
572 Jump(AssemblerLabel jmp
, ARM64Assembler::JumpType type
, ARM64Assembler::Condition condition
, bool is64Bit
, ARM64Assembler::RegisterID compareRegister
)
575 , m_condition(condition
)
577 , m_compareRegister(compareRegister
)
579 ASSERT((type
== ARM64Assembler::JumpCompareAndBranch
) || (type
== ARM64Assembler::JumpCompareAndBranchFixedSize
));
582 Jump(AssemblerLabel jmp
, ARM64Assembler::JumpType type
, ARM64Assembler::Condition condition
, unsigned bitNumber
, ARM64Assembler::RegisterID compareRegister
)
585 , m_condition(condition
)
586 , m_bitNumber(bitNumber
)
587 , m_compareRegister(compareRegister
)
589 ASSERT((type
== ARM64Assembler::JumpTestBit
) || (type
== ARM64Assembler::JumpTestBitFixedSize
));
592 Jump(AssemblerLabel jmp
, SH4Assembler::JumpType type
= SH4Assembler::JumpFar
)
598 Jump(AssemblerLabel jmp
)
607 result
.m_label
= m_label
;
611 void link(AbstractMacroAssemblerType
* masm
) const
613 masm
->invalidateAllTempRegisters();
615 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
616 masm
->checkRegisterAllocationAgainstBranchRange(m_label
.m_offset
, masm
->debugOffset());
620 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label(), m_type
, m_condition
);
622 if ((m_type
== ARM64Assembler::JumpCompareAndBranch
) || (m_type
== ARM64Assembler::JumpCompareAndBranchFixedSize
))
623 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label(), m_type
, m_condition
, m_is64Bit
, m_compareRegister
);
624 else if ((m_type
== ARM64Assembler::JumpTestBit
) || (m_type
== ARM64Assembler::JumpTestBitFixedSize
))
625 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label(), m_type
, m_condition
, m_bitNumber
, m_compareRegister
);
627 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label(), m_type
, m_condition
);
629 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label(), m_type
);
631 masm
->m_assembler
.linkJump(m_label
, masm
->m_assembler
.label());
635 void linkTo(Label label
, AbstractMacroAssemblerType
* masm
) const
637 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
638 masm
->checkRegisterAllocationAgainstBranchRange(label
.m_label
.m_offset
, m_label
.m_offset
);
642 masm
->m_assembler
.linkJump(m_label
, label
.m_label
, m_type
, m_condition
);
644 if ((m_type
== ARM64Assembler::JumpCompareAndBranch
) || (m_type
== ARM64Assembler::JumpCompareAndBranchFixedSize
))
645 masm
->m_assembler
.linkJump(m_label
, label
.m_label
, m_type
, m_condition
, m_is64Bit
, m_compareRegister
);
646 else if ((m_type
== ARM64Assembler::JumpTestBit
) || (m_type
== ARM64Assembler::JumpTestBitFixedSize
))
647 masm
->m_assembler
.linkJump(m_label
, label
.m_label
, m_type
, m_condition
, m_bitNumber
, m_compareRegister
);
649 masm
->m_assembler
.linkJump(m_label
, label
.m_label
, m_type
, m_condition
);
651 masm
->m_assembler
.linkJump(m_label
, label
.m_label
);
655 bool isSet() const { return m_label
.isSet(); }
658 AssemblerLabel m_label
;
660 ARMv7Assembler::JumpType m_type
;
661 ARMv7Assembler::Condition m_condition
;
663 ARM64Assembler::JumpType m_type
;
664 ARM64Assembler::Condition m_condition
;
666 unsigned m_bitNumber
;
667 ARM64Assembler::RegisterID m_compareRegister
;
670 SH4Assembler::JumpType m_type
;
674 struct PatchableJump
{
679 explicit PatchableJump(Jump jump
)
684 operator Jump
&() { return m_jump
; }
691 // A JumpList is a set of Jump objects.
692 // All jumps in the set will be linked to the same destination.
694 friend class LinkBuffer
;
697 typedef Vector
<Jump
, 2> JumpVector
;
707 void link(AbstractMacroAssemblerType
* masm
)
709 size_t size
= m_jumps
.size();
710 for (size_t i
= 0; i
< size
; ++i
)
711 m_jumps
[i
].link(masm
);
715 void linkTo(Label label
, AbstractMacroAssemblerType
* masm
)
717 size_t size
= m_jumps
.size();
718 for (size_t i
= 0; i
< size
; ++i
)
719 m_jumps
[i
].linkTo(label
, masm
);
723 void append(Jump jump
)
725 m_jumps
.append(jump
);
728 void append(const JumpList
& other
)
730 m_jumps
.append(other
.m_jumps
.begin(), other
.m_jumps
.size());
735 return !m_jumps
.size();
743 const JumpVector
& jumps() const { return m_jumps
; }
750 // Section 3: Misc admin methods
752 Label
labelIgnoringWatchpoints()
755 result
.m_label
= m_assembler
.labelIgnoringWatchpoints();
759 Label
labelIgnoringWatchpoints()
770 void padBeforePatch()
772 // Rely on the fact that asking for a label already does the padding.
776 Label
watchpointLabel()
779 result
.m_label
= m_assembler
.labelForWatchpoint();
785 m_assembler
.align(16);
789 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
790 class RegisterAllocationOffset
{
792 RegisterAllocationOffset(unsigned offset
)
797 void checkOffsets(unsigned low
, unsigned high
)
799 RELEASE_ASSERT_WITH_MESSAGE(!(low
<= m_offset
&& m_offset
<= high
), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset
, low
, high
);
806 void addRegisterAllocationAtOffset(unsigned offset
)
808 m_registerAllocationForOffsets
.append(RegisterAllocationOffset(offset
));
811 void clearRegisterAllocationOffsets()
813 m_registerAllocationForOffsets
.clear();
816 void checkRegisterAllocationAgainstBranchRange(unsigned offset1
, unsigned offset2
)
818 if (offset1
> offset2
)
819 std::swap(offset1
, offset2
);
821 size_t size
= m_registerAllocationForOffsets
.size();
822 for (size_t i
= 0; i
< size
; ++i
)
823 m_registerAllocationForOffsets
[i
].checkOffsets(offset1
, offset2
);
827 template<typename T
, typename U
>
828 static ptrdiff_t differenceBetween(T from
, U to
)
830 return AssemblerType::getDifferenceBetweenLabels(from
.m_label
, to
.m_label
);
833 static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr
& a
, const MacroAssemblerCodePtr
& b
)
835 return reinterpret_cast<ptrdiff_t>(b
.executableAddress()) - reinterpret_cast<ptrdiff_t>(a
.executableAddress());
838 unsigned debugOffset() { return m_assembler
.debugOffset(); }
840 ALWAYS_INLINE
static void cacheFlush(void* code
, size_t size
)
842 AssemblerType::cacheFlush(code
, size
);
845 #if ENABLE(MASM_PROBE)
848 #define DECLARE_REGISTER(_type, _regName) \
850 FOR_EACH_CPU_REGISTER(DECLARE_REGISTER
)
851 #undef DECLARE_REGISTER
853 static const char* registerName(RegisterID regID
)
856 #define DECLARE_REGISTER(_type, _regName) \
857 case RegisterID::_regName: \
859 FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER
)
860 #undef DECLARE_REGISTER
862 RELEASE_ASSERT_NOT_REACHED();
865 static const char* registerName(FPRegisterID regID
)
868 #define DECLARE_REGISTER(_type, _regName) \
869 case FPRegisterID::_regName: \
871 FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER
)
872 #undef DECLARE_REGISTER
874 RELEASE_ASSERT_NOT_REACHED();
877 void* registerValue(RegisterID regID
)
880 #define DECLARE_REGISTER(_type, _regName) \
881 case RegisterID::_regName: \
883 FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER
)
884 #undef DECLARE_REGISTER
886 RELEASE_ASSERT_NOT_REACHED();
889 double registerValue(FPRegisterID regID
)
892 #define DECLARE_REGISTER(_type, _regName) \
893 case FPRegisterID::_regName: \
895 FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER
)
896 #undef DECLARE_REGISTER
898 RELEASE_ASSERT_NOT_REACHED();
904 typedef void (*ProbeFunction
)(struct ProbeContext
*);
906 struct ProbeContext
{
907 ProbeFunction probeFunction
;
912 void print(int indentation
= 0)
914 #define INDENT MacroAssemblerType::printIndent(indentation)
916 INDENT
, dataLogF("ProbeContext %p {\n", this);
919 INDENT
, dataLogF("probeFunction: %p\n", probeFunction
);
920 INDENT
, dataLogF("arg1: %p %llu\n", arg1
, reinterpret_cast<int64_t>(arg1
));
921 INDENT
, dataLogF("arg2: %p %llu\n", arg2
, reinterpret_cast<int64_t>(arg2
));
922 MacroAssemblerType::printCPU(cpu
, indentation
);
925 INDENT
, dataLog("}\n");
931 static void printIndent(int indentation
)
933 for (; indentation
> 0; indentation
--)
937 static void printCPU(CPUState
& cpu
, int indentation
= 0)
939 #define INDENT printIndent(indentation)
941 INDENT
, dataLog("cpu: {\n");
942 MacroAssemblerType::printCPURegisters(cpu
, indentation
+ 1);
943 INDENT
, dataLog("}\n");
948 // This is a marker type only used with print(). See print() below for details.
949 struct AllRegisters
{ };
951 // Emits code which will print debugging info at runtime. The type of values that
952 // can be printed is encapsulated in the PrintArg struct below. Here are some
955 // print("Hello world\n"); // Emits code to print the string.
957 // CodeBlock* cb = ...;
958 // print(cb); // Emits code to print the pointer value.
960 // RegisterID regID = ...;
961 // print(regID); // Emits code to print the register value (not the id).
963 // // Emits code to print all registers. Unlike other items, this prints
964 // // multiple lines as follows:
966 // // eax: 0x123456789
967 // // ebx: 0x000000abc
970 // print(AllRegisters());
972 // // Print multiple things at once. This incurs the probe overhead only once
973 // // to print all the items.
974 // print("cb:", cb, " regID:", regID, " cpu:\n", AllRegisters());
976 template<typename
... Arguments
>
977 void print(Arguments
... args
)
979 printInternal(static_cast<MacroAssemblerType
*>(this), args
...);
982 // This function will be called by printCPU() to print the contents of the
983 // target specific registers which are saved away in the CPUState struct.
984 // printCPURegisters() should make use of printIndentation() to print the
985 // registers with the appropriate amount of indentation.
987 // Note: printCPURegisters() should be implemented by the target specific
988 // MacroAssembler. This prototype is only provided here to document the
991 static void printCPURegisters(CPUState
&, int indentation
= 0);
993 // This function will be called by print() to print the contents of a
994 // specific register (from the CPUState) in line with other items in the
995 // print stream. Hence, no indentation is needed.
997 // Note: printRegister() should be implemented by the target specific
998 // MacroAssembler. These prototypes are only provided here to document their
1001 static void printRegister(CPUState
&, RegisterID
);
1002 static void printRegister(CPUState
&, FPRegisterID
);
1004 // This function emits code to preserve the CPUState (e.g. registers),
1005 // call a user supplied probe function, and restore the CPUState before
1006 // continuing with other JIT generated code.
1008 // The user supplied probe function will be called with a single pointer to
1009 // a ProbeContext struct (defined above) which contains, among other things,
1010 // the preserved CPUState. This allows the user probe function to inspect
1011 // the CPUState at that point in the JIT generated code.
1013 // If the user probe function alters the register values in the ProbeContext,
1014 // the altered values will be loaded into the CPU registers when the probe
1017 // The ProbeContext is stack allocated and is only valid for the duration
1018 // of the call to the user probe function.
1020 // Note: probe() should be implemented by the target specific MacroAssembler.
1021 // This prototype is only provided here to document the interface.
1023 void probe(ProbeFunction
, void* arg1
= 0, void* arg2
= 0);
1025 #endif // ENABLE(MASM_PROBE)
1027 AssemblerType m_assembler
;
1030 AbstractMacroAssembler()
1031 : m_randomSource(cryptographicallyRandomNumber())
1033 invalidateAllTempRegisters();
1038 return m_randomSource
.getUint32();
1041 WeakRandom m_randomSource
;
1043 #if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
1044 Vector
<RegisterAllocationOffset
, 10> m_registerAllocationForOffsets
;
1047 static bool haveScratchRegisterForBlinding()
1051 static RegisterID
scratchRegisterForBlinding()
1053 UNREACHABLE_FOR_PLATFORM();
1054 return firstRegister();
1056 static bool canBlind() { return false; }
1057 static bool shouldBlindForSpecificArch(uint32_t) { return false; }
1058 static bool shouldBlindForSpecificArch(uint64_t) { return false; }
1060 class CachedTempRegister
{
1061 friend class DataLabelPtr
;
1062 friend class DataLabel32
;
1063 friend class DataLabelCompact
;
1068 CachedTempRegister(AbstractMacroAssemblerType
* masm
, RegisterID registerID
)
1070 , m_registerID(registerID
)
1072 , m_validBit(1 << static_cast<unsigned>(registerID
))
1074 ASSERT(static_cast<unsigned>(registerID
) < (sizeof(unsigned) * 8));
1077 ALWAYS_INLINE RegisterID
registerIDInvalidate() { invalidate(); return m_registerID
; }
1079 ALWAYS_INLINE RegisterID
registerIDNoInvalidate() { return m_registerID
; }
1081 bool value(intptr_t& value
)
1084 return m_masm
->isTempRegisterValid(m_validBit
);
1087 void setValue(intptr_t value
)
1090 m_masm
->setTempRegisterValid(m_validBit
);
1093 ALWAYS_INLINE
void invalidate() { m_masm
->clearTempRegisterValid(m_validBit
); }
1096 AbstractMacroAssemblerType
* m_masm
;
1097 RegisterID m_registerID
;
1099 unsigned m_validBit
;
1102 ALWAYS_INLINE
void invalidateAllTempRegisters()
1104 m_tempRegistersValidBits
= 0;
1107 ALWAYS_INLINE
bool isTempRegisterValid(unsigned registerMask
)
1109 return (m_tempRegistersValidBits
& registerMask
);
1112 ALWAYS_INLINE
void clearTempRegisterValid(unsigned registerMask
)
1114 m_tempRegistersValidBits
&= ~registerMask
;
1117 ALWAYS_INLINE
void setTempRegisterValid(unsigned registerMask
)
1119 m_tempRegistersValidBits
|= registerMask
;
1122 unsigned m_tempRegistersValidBits
;
1124 friend class LinkBuffer
;
1125 friend class RepatchBuffer
;
1127 static void linkJump(void* code
, Jump jump
, CodeLocationLabel target
)
1129 AssemblerType::linkJump(code
, jump
.m_label
, target
.dataLocation());
1132 static void linkPointer(void* code
, AssemblerLabel label
, void* value
)
1134 AssemblerType::linkPointer(code
, label
, value
);
1137 static void* getLinkerAddress(void* code
, AssemblerLabel label
)
1139 return AssemblerType::getRelocatedAddress(code
, label
);
1142 static unsigned getLinkerCallReturnOffset(Call call
)
1144 return AssemblerType::getCallReturnOffset(call
.m_label
);
1147 static void repatchJump(CodeLocationJump jump
, CodeLocationLabel destination
)
1149 AssemblerType::relinkJump(jump
.dataLocation(), destination
.dataLocation());
1152 static void repatchNearCall(CodeLocationNearCall nearCall
, CodeLocationLabel destination
)
1154 AssemblerType::relinkCall(nearCall
.dataLocation(), destination
.executableAddress());
1157 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact
, int32_t value
)
1159 AssemblerType::repatchCompact(dataLabelCompact
.dataLocation(), value
);
1162 static void repatchInt32(CodeLocationDataLabel32 dataLabel32
, int32_t value
)
1164 AssemblerType::repatchInt32(dataLabel32
.dataLocation(), value
);
1167 static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr
, void* value
)
1169 AssemblerType::repatchPointer(dataLabelPtr
.dataLocation(), value
);
1172 static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr
)
1174 return AssemblerType::readPointer(dataLabelPtr
.dataLocation());
1177 static void replaceWithLoad(CodeLocationConvertibleLoad label
)
1179 AssemblerType::replaceWithLoad(label
.dataLocation());
1182 static void replaceWithAddressComputation(CodeLocationConvertibleLoad label
)
1184 AssemblerType::replaceWithAddressComputation(label
.dataLocation());
1189 #if ENABLE(MASM_PROBE)
1203 PrintArg(AllRegisters
&)
1204 : type(Type::AllRegisters
)
1208 PrintArg(RegisterID regID
)
1209 : type(Type::RegisterID
)
1211 u
.gpRegisterID
= regID
;
1214 PrintArg(FPRegisterID regID
)
1215 : type(Type::FPRegisterID
)
1217 u
.fpRegisterID
= regID
;
1220 PrintArg(const char* ptr
)
1221 : type(Type::ConstCharPtr
)
1223 u
.constCharPtr
= ptr
;
1226 PrintArg(const void* ptr
)
1227 : type(Type::ConstVoidPtr
)
1229 u
.constVoidPtr
= ptr
;
1233 : type(Type::IntptrValue
)
1235 u
.intptrValue
= value
;
1238 PrintArg(unsigned value
)
1239 : type(Type::UintptrValue
)
1241 u
.intptrValue
= value
;
1244 PrintArg(intptr_t value
)
1245 : type(Type::IntptrValue
)
1247 u
.intptrValue
= value
;
1250 PrintArg(uintptr_t value
)
1251 : type(Type::UintptrValue
)
1253 u
.uintptrValue
= value
;
1258 RegisterID gpRegisterID
;
1259 FPRegisterID fpRegisterID
;
1260 const char* constCharPtr
;
1261 const void* constVoidPtr
;
1262 intptr_t intptrValue
;
1263 uintptr_t uintptrValue
;
1267 typedef Vector
<PrintArg
> PrintArgsList
;
1269 template<typename FirstArg
, typename
... Arguments
>
1270 static void appendPrintArg(PrintArgsList
* argsList
, FirstArg
& firstArg
, Arguments
... otherArgs
)
1272 argsList
->append(PrintArg(firstArg
));
1273 appendPrintArg(argsList
, otherArgs
...);
1276 static void appendPrintArg(PrintArgsList
*) { }
1279 template<typename
... Arguments
>
1280 static void printInternal(MacroAssemblerType
* masm
, Arguments
... args
)
1282 auto argsList
= std::make_unique
<PrintArgsList
>();
1283 appendPrintArg(argsList
.get(), args
...);
1284 masm
->probe(printCallback
, argsList
.release());
1287 static void printCallback(ProbeContext
* context
)
1289 typedef PrintArg Arg
;
1290 PrintArgsList
& argsList
=
1291 *reinterpret_cast<PrintArgsList
*>(context
->arg1
);
1292 for (size_t i
= 0; i
< argsList
.size(); i
++) {
1293 auto& arg
= argsList
[i
];
1295 case Arg::Type::AllRegisters
:
1296 MacroAssemblerType::printCPU(context
->cpu
);
1298 case Arg::Type::RegisterID
:
1299 MacroAssemblerType::printRegister(context
->cpu
, arg
.u
.gpRegisterID
);
1301 case Arg::Type::FPRegisterID
:
1302 MacroAssemblerType::printRegister(context
->cpu
, arg
.u
.fpRegisterID
);
1304 case Arg::Type::ConstCharPtr
:
1305 dataLog(arg
.u
.constCharPtr
);
1307 case Arg::Type::ConstVoidPtr
:
1308 dataLogF("%p", arg
.u
.constVoidPtr
);
1310 case Arg::Type::IntptrValue
:
1311 dataLog(arg
.u
.intptrValue
);
1313 case Arg::Type::UintptrValue
:
1314 dataLog(arg
.u
.uintptrValue
);
1320 #endif // ENABLE(MASM_PROBE)
1322 }; // class AbstractMacroAssembler
1326 #endif // ENABLE(ASSEMBLER)
1328 #endif // AbstractMacroAssembler_h