2 * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssembler_h
27 #define MacroAssembler_h
32 #include "MacroAssemblerARMv7.h"
33 namespace JSC
{ typedef MacroAssemblerARMv7 MacroAssemblerBase
; };
36 #include "MacroAssemblerARM64.h"
37 namespace JSC
{ typedef MacroAssemblerARM64 MacroAssemblerBase
; };
39 #elif CPU(ARM_TRADITIONAL)
40 #include "MacroAssemblerARM.h"
41 namespace JSC
{ typedef MacroAssemblerARM MacroAssemblerBase
; };
44 #include "MacroAssemblerMIPS.h"
46 typedef MacroAssemblerMIPS MacroAssemblerBase
;
50 #include "MacroAssemblerX86.h"
51 namespace JSC
{ typedef MacroAssemblerX86 MacroAssemblerBase
; };
54 #include "MacroAssemblerX86_64.h"
55 namespace JSC
{ typedef MacroAssemblerX86_64 MacroAssemblerBase
; };
58 #include "MacroAssemblerSH4.h"
60 typedef MacroAssemblerSH4 MacroAssemblerBase
;
64 #error "The MacroAssembler is not supported on this platform."
69 class MacroAssembler
: public MacroAssemblerBase
{
72 static RegisterID
nextRegister(RegisterID reg
)
74 return static_cast<RegisterID
>(reg
+ 1);
77 static FPRegisterID
nextFPRegister(FPRegisterID reg
)
79 return static_cast<FPRegisterID
>(reg
+ 1);
82 static unsigned numberOfRegisters()
84 return lastRegister() - firstRegister() + 1;
87 static unsigned registerIndex(RegisterID reg
)
89 return reg
- firstRegister();
92 static unsigned numberOfFPRegisters()
94 return lastFPRegister() - firstFPRegister() + 1;
97 static unsigned fpRegisterIndex(FPRegisterID reg
)
99 return reg
- firstFPRegister();
102 static unsigned registerIndex(FPRegisterID reg
)
104 return fpRegisterIndex(reg
) + numberOfRegisters();
107 static unsigned totalNumberOfRegisters()
109 return numberOfRegisters() + numberOfFPRegisters();
112 using MacroAssemblerBase::pop
;
113 using MacroAssemblerBase::jump
;
114 using MacroAssemblerBase::branch32
;
115 using MacroAssemblerBase::move
;
116 using MacroAssemblerBase::add32
;
117 using MacroAssemblerBase::and32
;
118 using MacroAssemblerBase::branchAdd32
;
119 using MacroAssemblerBase::branchMul32
;
120 #if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64)
121 using MacroAssemblerBase::branchPtr
;
123 using MacroAssemblerBase::branchSub32
;
124 using MacroAssemblerBase::lshift32
;
125 using MacroAssemblerBase::or32
;
126 using MacroAssemblerBase::rshift32
;
127 using MacroAssemblerBase::store32
;
128 using MacroAssemblerBase::sub32
;
129 using MacroAssemblerBase::urshift32
;
130 using MacroAssemblerBase::xor32
;
132 static bool isPtrAlignedAddressOffset(ptrdiff_t value
)
134 return value
== static_cast<int32_t>(value
);
137 static const double twoToThe32
; // This is super useful for some double code.
139 // Utilities used by the DFG JIT.
141 using MacroAssemblerBase::invert
;
143 static DoubleCondition
invert(DoubleCondition cond
)
147 return DoubleNotEqualOrUnordered
;
149 return DoubleEqualOrUnordered
;
150 case DoubleGreaterThan
:
151 return DoubleLessThanOrEqualOrUnordered
;
152 case DoubleGreaterThanOrEqual
:
153 return DoubleLessThanOrUnordered
;
155 return DoubleGreaterThanOrEqualOrUnordered
;
156 case DoubleLessThanOrEqual
:
157 return DoubleGreaterThanOrUnordered
;
158 case DoubleEqualOrUnordered
:
159 return DoubleNotEqual
;
160 case DoubleNotEqualOrUnordered
:
162 case DoubleGreaterThanOrUnordered
:
163 return DoubleLessThanOrEqual
;
164 case DoubleGreaterThanOrEqualOrUnordered
:
165 return DoubleLessThan
;
166 case DoubleLessThanOrUnordered
:
167 return DoubleGreaterThanOrEqual
;
168 case DoubleLessThanOrEqualOrUnordered
:
169 return DoubleGreaterThan
;
171 RELEASE_ASSERT_NOT_REACHED();
172 return DoubleEqual
; // make compiler happy
176 static bool isInvertible(ResultCondition cond
)
187 static ResultCondition
invert(ResultCondition cond
)
195 RELEASE_ASSERT_NOT_REACHED();
196 return Zero
; // Make compiler happy for release builds.
201 // Platform agnostic onvenience functions,
202 // described in terms of other macro assembly methods.
205 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister
);
208 void peek(RegisterID dest
, int index
= 0)
210 loadPtr(Address(stackPointerRegister
, (index
* sizeof(void*))), dest
);
213 Address
addressForPoke(int index
)
215 return Address(stackPointerRegister
, (index
* sizeof(void*)));
218 void poke(RegisterID src
, int index
= 0)
220 storePtr(src
, addressForPoke(index
));
223 void poke(TrustedImm32 value
, int index
= 0)
225 store32(value
, addressForPoke(index
));
228 void poke(TrustedImmPtr imm
, int index
= 0)
230 storePtr(imm
, addressForPoke(index
));
234 void pushToSave(RegisterID src
)
238 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm
)
242 void popToRestore(RegisterID dest
)
246 void pushToSave(FPRegisterID src
)
248 subPtr(TrustedImm32(sizeof(double)), stackPointerRegister
);
249 storeDouble(src
, stackPointerRegister
);
251 void popToRestore(FPRegisterID dest
)
253 loadDouble(stackPointerRegister
, dest
);
254 addPtr(TrustedImm32(sizeof(double)), stackPointerRegister
);
257 static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
258 #endif // !CPU(ARM64)
260 #if CPU(X86_64) || CPU(ARM64)
261 void peek64(RegisterID dest
, int index
= 0)
263 load64(Address(stackPointerRegister
, (index
* sizeof(void*))), dest
);
266 void poke(TrustedImm64 value
, int index
= 0)
268 store64(value
, addressForPoke(index
));
271 void poke64(RegisterID src
, int index
= 0)
273 store64(src
, addressForPoke(index
));
278 void poke(FPRegisterID src
, int index
= 0)
280 ASSERT(!(index
& 1));
281 storeDouble(src
, addressForPoke(index
));
285 // Immediate shifts only have 5 controllable bits
286 // so we'll consider them safe for now.
287 TrustedImm32
trustedImm32ForShift(Imm32 imm
)
289 return TrustedImm32(imm
.asTrustedImm32().m_value
& 31);
292 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
293 void branchPtr(RelationalCondition cond
, RegisterID op1
, TrustedImmPtr imm
, Label target
)
295 branchPtr(cond
, op1
, imm
).linkTo(target
, this);
297 void branchPtr(RelationalCondition cond
, RegisterID op1
, ImmPtr imm
, Label target
)
299 branchPtr(cond
, op1
, imm
).linkTo(target
, this);
302 void branch32(RelationalCondition cond
, RegisterID op1
, RegisterID op2
, Label target
)
304 branch32(cond
, op1
, op2
).linkTo(target
, this);
307 void branch32(RelationalCondition cond
, RegisterID op1
, TrustedImm32 imm
, Label target
)
309 branch32(cond
, op1
, imm
).linkTo(target
, this);
312 void branch32(RelationalCondition cond
, RegisterID op1
, Imm32 imm
, Label target
)
314 branch32(cond
, op1
, imm
).linkTo(target
, this);
317 void branch32(RelationalCondition cond
, RegisterID left
, Address right
, Label target
)
319 branch32(cond
, left
, right
).linkTo(target
, this);
322 Jump
branch32(RelationalCondition cond
, TrustedImm32 left
, RegisterID right
)
324 return branch32(commute(cond
), right
, left
);
327 Jump
branch32(RelationalCondition cond
, Imm32 left
, RegisterID right
)
329 return branch32(commute(cond
), right
, left
);
332 void branchTestPtr(ResultCondition cond
, RegisterID reg
, Label target
)
334 branchTestPtr(cond
, reg
).linkTo(target
, this);
337 #if !CPU(ARM_THUMB2) && !CPU(ARM64)
338 PatchableJump
patchableBranchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
= TrustedImmPtr(0))
340 return PatchableJump(branchPtr(cond
, left
, right
));
343 PatchableJump
patchableBranchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
345 return PatchableJump(branchPtrWithPatch(cond
, left
, dataLabel
, initialRightValue
));
348 PatchableJump
patchableBranch32WithPatch(RelationalCondition cond
, Address left
, DataLabel32
& dataLabel
, TrustedImm32 initialRightValue
= TrustedImm32(0))
350 return PatchableJump(branch32WithPatch(cond
, left
, dataLabel
, initialRightValue
));
353 #if !CPU(ARM_TRADITIONAL)
354 PatchableJump
patchableJump()
356 return PatchableJump(jump());
359 PatchableJump
patchableBranchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
361 return PatchableJump(branchTest32(cond
, reg
, mask
));
364 PatchableJump
patchableBranch32(RelationalCondition cond
, RegisterID reg
, TrustedImm32 imm
)
366 return PatchableJump(branch32(cond
, reg
, imm
));
369 PatchableJump
patchableBranch32(RelationalCondition cond
, Address address
, TrustedImm32 imm
)
371 return PatchableJump(branch32(cond
, address
, imm
));
376 void jump(Label target
)
378 jump().linkTo(target
, this);
381 // Commute a relational condition, returns a new condition that will produce
382 // the same results given the same inputs but with their positions exchanged.
383 static RelationalCondition
commute(RelationalCondition condition
)
396 case GreaterThanOrEqual
:
397 return LessThanOrEqual
;
400 case LessThanOrEqual
:
401 return GreaterThanOrEqual
;
406 ASSERT(condition
== Equal
|| condition
== NotEqual
);
410 static const unsigned BlindingModulus
= 64;
411 bool shouldConsiderBlinding()
413 return !(random() & (BlindingModulus
- 1));
417 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
418 // FIXME: should this use a test for 32-bitness instead of this specific exception?
419 #if !CPU(X86_64) && !CPU(ARM64)
420 void addPtr(Address src
, RegisterID dest
)
425 void addPtr(AbsoluteAddress src
, RegisterID dest
)
430 void addPtr(RegisterID src
, RegisterID dest
)
435 void addPtr(TrustedImm32 imm
, RegisterID srcDest
)
440 void addPtr(TrustedImmPtr imm
, RegisterID dest
)
442 add32(TrustedImm32(imm
), dest
);
445 void addPtr(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
447 add32(imm
, src
, dest
);
450 void addPtr(TrustedImm32 imm
, AbsoluteAddress address
)
455 void andPtr(RegisterID src
, RegisterID dest
)
460 void andPtr(TrustedImm32 imm
, RegisterID srcDest
)
465 void andPtr(TrustedImmPtr imm
, RegisterID srcDest
)
467 and32(TrustedImm32(imm
), srcDest
);
470 void lshiftPtr(Imm32 imm
, RegisterID srcDest
)
472 lshift32(trustedImm32ForShift(imm
), srcDest
);
475 void rshiftPtr(Imm32 imm
, RegisterID srcDest
)
477 rshift32(trustedImm32ForShift(imm
), srcDest
);
480 void urshiftPtr(Imm32 imm
, RegisterID srcDest
)
482 urshift32(trustedImm32ForShift(imm
), srcDest
);
485 void negPtr(RegisterID dest
)
490 void orPtr(RegisterID src
, RegisterID dest
)
495 void orPtr(RegisterID op1
, RegisterID op2
, RegisterID dest
)
497 or32(op1
, op2
, dest
);
500 void orPtr(TrustedImmPtr imm
, RegisterID dest
)
502 or32(TrustedImm32(imm
), dest
);
505 void orPtr(TrustedImm32 imm
, RegisterID dest
)
510 void subPtr(RegisterID src
, RegisterID dest
)
515 void subPtr(TrustedImm32 imm
, RegisterID dest
)
520 void subPtr(TrustedImmPtr imm
, RegisterID dest
)
522 sub32(TrustedImm32(imm
), dest
);
525 void xorPtr(RegisterID src
, RegisterID dest
)
530 void xorPtr(TrustedImm32 imm
, RegisterID srcDest
)
536 void loadPtr(ImplicitAddress address
, RegisterID dest
)
538 load32(address
, dest
);
541 void loadPtr(BaseIndex address
, RegisterID dest
)
543 load32(address
, dest
);
546 void loadPtr(const void* address
, RegisterID dest
)
548 load32(address
, dest
);
551 DataLabel32
loadPtrWithAddressOffsetPatch(Address address
, RegisterID dest
)
553 return load32WithAddressOffsetPatch(address
, dest
);
556 DataLabelCompact
loadPtrWithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
558 return load32WithCompactAddressOffsetPatch(address
, dest
);
561 void move(ImmPtr imm
, RegisterID dest
)
563 move(Imm32(imm
.asTrustedImmPtr()), dest
);
566 void comparePtr(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
568 compare32(cond
, left
, right
, dest
);
571 void comparePtr(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
573 compare32(cond
, left
, right
, dest
);
576 void storePtr(RegisterID src
, ImplicitAddress address
)
578 store32(src
, address
);
581 void storePtr(RegisterID src
, BaseIndex address
)
583 store32(src
, address
);
586 void storePtr(RegisterID src
, void* address
)
588 store32(src
, address
);
591 void storePtr(TrustedImmPtr imm
, ImplicitAddress address
)
593 store32(TrustedImm32(imm
), address
);
596 void storePtr(ImmPtr imm
, Address address
)
598 store32(Imm32(imm
.asTrustedImmPtr()), address
);
601 void storePtr(TrustedImmPtr imm
, void* address
)
603 store32(TrustedImm32(imm
), address
);
606 void storePtr(TrustedImm32 imm
, ImplicitAddress address
)
608 store32(imm
, address
);
611 void storePtr(TrustedImmPtr imm
, BaseIndex address
)
613 store32(TrustedImm32(imm
), address
);
616 DataLabel32
storePtrWithAddressOffsetPatch(RegisterID src
, Address address
)
618 return store32WithAddressOffsetPatch(src
, address
);
621 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, RegisterID right
)
623 return branch32(cond
, left
, right
);
626 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, TrustedImmPtr right
)
628 return branch32(cond
, left
, TrustedImm32(right
));
631 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, ImmPtr right
)
633 return branch32(cond
, left
, Imm32(right
.asTrustedImmPtr()));
636 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, Address right
)
638 return branch32(cond
, left
, right
);
641 Jump
branchPtr(RelationalCondition cond
, Address left
, RegisterID right
)
643 return branch32(cond
, left
, right
);
646 Jump
branchPtr(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
648 return branch32(cond
, left
, right
);
651 Jump
branchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
)
653 return branch32(cond
, left
, TrustedImm32(right
));
656 Jump
branchPtr(RelationalCondition cond
, AbsoluteAddress left
, TrustedImmPtr right
)
658 return branch32(cond
, left
, TrustedImm32(right
));
661 Jump
branchSubPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
663 return branchSub32(cond
, src
, dest
);
666 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
668 return branchTest32(cond
, reg
, mask
);
671 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
673 return branchTest32(cond
, reg
, mask
);
676 Jump
branchTestPtr(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
678 return branchTest32(cond
, address
, mask
);
681 Jump
branchTestPtr(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
683 return branchTest32(cond
, address
, mask
);
686 Jump
branchAddPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
688 return branchAdd32(cond
, src
, dest
);
691 Jump
branchSubPtr(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
693 return branchSub32(cond
, imm
, dest
);
695 using MacroAssemblerBase::branchTest8
;
696 Jump
branchTest8(ResultCondition cond
, ExtendedAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
698 return MacroAssemblerBase::branchTest8(cond
, Address(address
.base
, address
.offset
), mask
);
701 #else // !CPU(X86_64)
703 void addPtr(RegisterID src
, RegisterID dest
)
708 void addPtr(Address src
, RegisterID dest
)
713 void addPtr(TrustedImm32 imm
, RegisterID srcDest
)
718 void addPtr(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
720 add64(imm
, src
, dest
);
723 void addPtr(TrustedImm32 imm
, Address address
)
728 void addPtr(AbsoluteAddress src
, RegisterID dest
)
733 void addPtr(TrustedImmPtr imm
, RegisterID dest
)
735 add64(TrustedImm64(imm
), dest
);
738 void addPtr(TrustedImm32 imm
, AbsoluteAddress address
)
743 void andPtr(RegisterID src
, RegisterID dest
)
748 void andPtr(TrustedImm32 imm
, RegisterID srcDest
)
753 void andPtr(TrustedImmPtr imm
, RegisterID srcDest
)
758 void lshiftPtr(Imm32 imm
, RegisterID srcDest
)
760 lshift64(trustedImm32ForShift(imm
), srcDest
);
763 void rshiftPtr(Imm32 imm
, RegisterID srcDest
)
765 rshift64(trustedImm32ForShift(imm
), srcDest
);
768 void urshiftPtr(Imm32 imm
, RegisterID srcDest
)
770 urshift64(trustedImm32ForShift(imm
), srcDest
);
773 void negPtr(RegisterID dest
)
778 void orPtr(RegisterID src
, RegisterID dest
)
783 void orPtr(TrustedImm32 imm
, RegisterID dest
)
788 void orPtr(TrustedImmPtr imm
, RegisterID dest
)
790 or64(TrustedImm64(imm
), dest
);
793 void orPtr(RegisterID op1
, RegisterID op2
, RegisterID dest
)
795 or64(op1
, op2
, dest
);
798 void orPtr(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
800 or64(imm
, src
, dest
);
803 void rotateRightPtr(TrustedImm32 imm
, RegisterID srcDst
)
805 rotateRight64(imm
, srcDst
);
808 void subPtr(RegisterID src
, RegisterID dest
)
813 void subPtr(TrustedImm32 imm
, RegisterID dest
)
818 void subPtr(TrustedImmPtr imm
, RegisterID dest
)
820 sub64(TrustedImm64(imm
), dest
);
823 void xorPtr(RegisterID src
, RegisterID dest
)
828 void xorPtr(RegisterID src
, Address dest
)
833 void xorPtr(TrustedImm32 imm
, RegisterID srcDest
)
838 void loadPtr(ImplicitAddress address
, RegisterID dest
)
840 load64(address
, dest
);
843 void loadPtr(BaseIndex address
, RegisterID dest
)
845 load64(address
, dest
);
848 void loadPtr(const void* address
, RegisterID dest
)
850 load64(address
, dest
);
853 DataLabel32
loadPtrWithAddressOffsetPatch(Address address
, RegisterID dest
)
855 return load64WithAddressOffsetPatch(address
, dest
);
858 DataLabelCompact
loadPtrWithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
860 return load64WithCompactAddressOffsetPatch(address
, dest
);
863 void storePtr(RegisterID src
, ImplicitAddress address
)
865 store64(src
, address
);
868 void storePtr(RegisterID src
, BaseIndex address
)
870 store64(src
, address
);
873 void storePtr(RegisterID src
, void* address
)
875 store64(src
, address
);
878 void storePtr(TrustedImmPtr imm
, ImplicitAddress address
)
880 store64(TrustedImm64(imm
), address
);
883 void storePtr(TrustedImmPtr imm
, BaseIndex address
)
885 store64(TrustedImm64(imm
), address
);
888 DataLabel32
storePtrWithAddressOffsetPatch(RegisterID src
, Address address
)
890 return store64WithAddressOffsetPatch(src
, address
);
893 void comparePtr(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
895 compare64(cond
, left
, right
, dest
);
898 void comparePtr(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
900 compare64(cond
, left
, right
, dest
);
903 void testPtr(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
, RegisterID dest
)
905 test64(cond
, reg
, mask
, dest
);
908 void testPtr(ResultCondition cond
, RegisterID reg
, RegisterID mask
, RegisterID dest
)
910 test64(cond
, reg
, mask
, dest
);
913 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, RegisterID right
)
915 return branch64(cond
, left
, right
);
918 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, TrustedImmPtr right
)
920 return branch64(cond
, left
, TrustedImm64(right
));
923 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, Address right
)
925 return branch64(cond
, left
, right
);
928 Jump
branchPtr(RelationalCondition cond
, Address left
, RegisterID right
)
930 return branch64(cond
, left
, right
);
933 Jump
branchPtr(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
935 return branch64(cond
, left
, right
);
938 Jump
branchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
)
940 return branch64(cond
, left
, TrustedImm64(right
));
943 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
945 return branchTest64(cond
, reg
, mask
);
948 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
950 return branchTest64(cond
, reg
, mask
);
953 Jump
branchTestPtr(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
955 return branchTest64(cond
, address
, mask
);
958 Jump
branchTestPtr(ResultCondition cond
, Address address
, RegisterID reg
)
960 return branchTest64(cond
, address
, reg
);
963 Jump
branchTestPtr(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
965 return branchTest64(cond
, address
, mask
);
968 Jump
branchTestPtr(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
970 return branchTest64(cond
, address
, mask
);
973 Jump
branchAddPtr(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
975 return branchAdd64(cond
, imm
, dest
);
978 Jump
branchAddPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
980 return branchAdd64(cond
, src
, dest
);
983 Jump
branchSubPtr(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
985 return branchSub64(cond
, imm
, dest
);
988 Jump
branchSubPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
990 return branchSub64(cond
, src
, dest
);
993 Jump
branchSubPtr(ResultCondition cond
, RegisterID src1
, TrustedImm32 src2
, RegisterID dest
)
995 return branchSub64(cond
, src1
, src2
, dest
);
998 using MacroAssemblerBase::and64
;
999 using MacroAssemblerBase::convertInt32ToDouble
;
1000 using MacroAssemblerBase::store64
;
1001 bool shouldBlindDouble(double value
)
1003 // Don't trust NaN or +/-Infinity
1004 if (!std::isfinite(value
))
1005 return shouldConsiderBlinding();
1007 // Try to force normalisation, and check that there's no change
1008 // in the bit pattern
1009 if (bitwise_cast
<uint64_t>(value
* 1.0) != bitwise_cast
<uint64_t>(value
))
1010 return shouldConsiderBlinding();
1012 value
= fabs(value
);
1013 // Only allow a limited set of fractional components
1014 double scaledValue
= value
* 8;
1015 if (scaledValue
/ 8 != value
)
1016 return shouldConsiderBlinding();
1017 double frac
= scaledValue
- floor(scaledValue
);
1019 return shouldConsiderBlinding();
1021 return value
> 0xff;
1024 bool shouldBlindPointerForSpecificArch(uintptr_t value
)
1026 if (sizeof(void*) == 4)
1027 return shouldBlindForSpecificArch(static_cast<uint32_t>(value
));
1028 return shouldBlindForSpecificArch(static_cast<uint64_t>(value
));
1031 bool shouldBlind(ImmPtr imm
)
1036 #if ENABLE(FORCED_JIT_BLINDING)
1038 // Debug always blind all constants, if only so we know
1039 // if we've broken blinding during patch development.
1043 // First off we'll special case common, "safe" values to avoid hurting
1044 // performance too much
1045 uintptr_t value
= imm
.asTrustedImmPtr().asIntptr();
1051 case 0xffffffffffffL
:
1052 case 0xffffffffffffffL
:
1053 case 0xffffffffffffffffL
:
1063 if (!shouldConsiderBlinding())
1066 return shouldBlindPointerForSpecificArch(value
);
1069 struct RotatedImmPtr
{
1070 RotatedImmPtr(uintptr_t v1
, uint8_t v2
)
1075 TrustedImmPtr value
;
1076 TrustedImm32 rotation
;
1079 RotatedImmPtr
rotationBlindConstant(ImmPtr imm
)
1081 uint8_t rotation
= random() % (sizeof(void*) * 8);
1082 uintptr_t value
= imm
.asTrustedImmPtr().asIntptr();
1083 value
= (value
<< rotation
) | (value
>> (sizeof(void*) * 8 - rotation
));
1084 return RotatedImmPtr(value
, rotation
);
1087 void loadRotationBlindedConstant(RotatedImmPtr constant
, RegisterID dest
)
1089 move(constant
.value
, dest
);
1090 rotateRightPtr(constant
.rotation
, dest
);
1093 bool shouldBlind(Imm64 imm
)
1095 #if ENABLE(FORCED_JIT_BLINDING)
1097 // Debug always blind all constants, if only so we know
1098 // if we've broken blinding during patch development.
1102 // First off we'll special case common, "safe" values to avoid hurting
1103 // performance too much
1104 uint64_t value
= imm
.asTrustedImm64().m_value
;
1110 case 0xffffffffffffL
:
1111 case 0xffffffffffffffL
:
1112 case 0xffffffffffffffffL
:
1120 JSValue jsValue
= JSValue::decode(value
);
1121 if (jsValue
.isInt32())
1122 return shouldBlind(Imm32(jsValue
.asInt32()));
1123 if (jsValue
.isDouble() && !shouldBlindDouble(jsValue
.asDouble()))
1126 if (!shouldBlindDouble(bitwise_cast
<double>(value
)))
1131 if (!shouldConsiderBlinding())
1134 return shouldBlindForSpecificArch(value
);
1137 struct RotatedImm64
{
1138 RotatedImm64(uint64_t v1
, uint8_t v2
)
1144 TrustedImm32 rotation
;
1147 RotatedImm64
rotationBlindConstant(Imm64 imm
)
1149 uint8_t rotation
= random() % (sizeof(int64_t) * 8);
1150 uint64_t value
= imm
.asTrustedImm64().m_value
;
1151 value
= (value
<< rotation
) | (value
>> (sizeof(int64_t) * 8 - rotation
));
1152 return RotatedImm64(value
, rotation
);
1155 void loadRotationBlindedConstant(RotatedImm64 constant
, RegisterID dest
)
1157 move(constant
.value
, dest
);
1158 rotateRight64(constant
.rotation
, dest
);
1161 void convertInt32ToDouble(Imm32 imm
, FPRegisterID dest
)
1163 if (shouldBlind(imm
) && haveScratchRegisterForBlinding()) {
1164 RegisterID scratchRegister
= scratchRegisterForBlinding();
1165 loadXorBlindedConstant(xorBlindConstant(imm
), scratchRegister
);
1166 convertInt32ToDouble(scratchRegister
, dest
);
1168 convertInt32ToDouble(imm
.asTrustedImm32(), dest
);
1171 void move(ImmPtr imm
, RegisterID dest
)
1173 if (shouldBlind(imm
))
1174 loadRotationBlindedConstant(rotationBlindConstant(imm
), dest
);
1176 move(imm
.asTrustedImmPtr(), dest
);
1179 void move(Imm64 imm
, RegisterID dest
)
1181 if (shouldBlind(imm
))
1182 loadRotationBlindedConstant(rotationBlindConstant(imm
), dest
);
1184 move(imm
.asTrustedImm64(), dest
);
1187 void and64(Imm32 imm
, RegisterID dest
)
1189 if (shouldBlind(imm
)) {
1190 BlindedImm32 key
= andBlindedConstant(imm
);
1191 and64(key
.value1
, dest
);
1192 and64(key
.value2
, dest
);
1194 and64(imm
.asTrustedImm32(), dest
);
1197 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, ImmPtr right
)
1199 if (shouldBlind(right
) && haveScratchRegisterForBlinding()) {
1200 RegisterID scratchRegister
= scratchRegisterForBlinding();
1201 loadRotationBlindedConstant(rotationBlindConstant(right
), scratchRegister
);
1202 return branchPtr(cond
, left
, scratchRegister
);
1204 return branchPtr(cond
, left
, right
.asTrustedImmPtr());
1207 void storePtr(ImmPtr imm
, Address dest
)
1209 if (shouldBlind(imm
) && haveScratchRegisterForBlinding()) {
1210 RegisterID scratchRegister
= scratchRegisterForBlinding();
1211 loadRotationBlindedConstant(rotationBlindConstant(imm
), scratchRegister
);
1212 storePtr(scratchRegister
, dest
);
1214 storePtr(imm
.asTrustedImmPtr(), dest
);
1217 void store64(Imm64 imm
, Address dest
)
1219 if (shouldBlind(imm
) && haveScratchRegisterForBlinding()) {
1220 RegisterID scratchRegister
= scratchRegisterForBlinding();
1221 loadRotationBlindedConstant(rotationBlindConstant(imm
), scratchRegister
);
1222 store64(scratchRegister
, dest
);
1224 store64(imm
.asTrustedImm64(), dest
);
1227 #endif // !CPU(X86_64)
1229 bool shouldBlind(Imm32 imm
)
1231 #if ENABLE(FORCED_JIT_BLINDING)
1233 // Debug always blind all constants, if only so we know
1234 // if we've broken blinding during patch development.
1236 #else // ENABLE(FORCED_JIT_BLINDING)
1238 // First off we'll special case common, "safe" values to avoid hurting
1239 // performance too much
1240 uint32_t value
= imm
.asTrustedImm32().m_value
;
1253 if (!shouldConsiderBlinding())
1256 return shouldBlindForSpecificArch(value
);
1257 #endif // ENABLE(FORCED_JIT_BLINDING)
1260 struct BlindedImm32
{
1261 BlindedImm32(int32_t v1
, int32_t v2
)
1266 TrustedImm32 value1
;
1267 TrustedImm32 value2
;
1270 uint32_t keyForConstant(uint32_t value
, uint32_t& mask
)
1272 uint32_t key
= random();
1275 else if (value
<= 0xffff)
1277 else if (value
<= 0xffffff)
1284 uint32_t keyForConstant(uint32_t value
)
1287 return keyForConstant(value
, mask
);
1290 BlindedImm32
xorBlindConstant(Imm32 imm
)
1292 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1293 uint32_t key
= keyForConstant(baseValue
);
1294 return BlindedImm32(baseValue
^ key
, key
);
1297 BlindedImm32
additionBlindedConstant(Imm32 imm
)
1299 // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
1300 static uint32_t maskTable
[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
1302 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1303 uint32_t key
= keyForConstant(baseValue
) & maskTable
[baseValue
& 3];
1304 if (key
> baseValue
)
1305 key
= key
- baseValue
;
1306 return BlindedImm32(baseValue
- key
, key
);
1309 BlindedImm32
andBlindedConstant(Imm32 imm
)
1311 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1313 uint32_t key
= keyForConstant(baseValue
, mask
);
1314 ASSERT((baseValue
& mask
) == baseValue
);
1315 return BlindedImm32(((baseValue
& key
) | ~key
) & mask
, ((baseValue
& ~key
) | key
) & mask
);
1318 BlindedImm32
orBlindedConstant(Imm32 imm
)
1320 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1322 uint32_t key
= keyForConstant(baseValue
, mask
);
1323 ASSERT((baseValue
& mask
) == baseValue
);
1324 return BlindedImm32((baseValue
& key
) & mask
, (baseValue
& ~key
) & mask
);
1327 void loadXorBlindedConstant(BlindedImm32 constant
, RegisterID dest
)
1329 move(constant
.value1
, dest
);
1330 xor32(constant
.value2
, dest
);
1333 void add32(Imm32 imm
, RegisterID dest
)
1335 if (shouldBlind(imm
)) {
1336 BlindedImm32 key
= additionBlindedConstant(imm
);
1337 add32(key
.value1
, dest
);
1338 add32(key
.value2
, dest
);
1340 add32(imm
.asTrustedImm32(), dest
);
1343 void addPtr(Imm32 imm
, RegisterID dest
)
1345 if (shouldBlind(imm
)) {
1346 BlindedImm32 key
= additionBlindedConstant(imm
);
1347 addPtr(key
.value1
, dest
);
1348 addPtr(key
.value2
, dest
);
1350 addPtr(imm
.asTrustedImm32(), dest
);
1353 void and32(Imm32 imm
, RegisterID dest
)
1355 if (shouldBlind(imm
)) {
1356 BlindedImm32 key
= andBlindedConstant(imm
);
1357 and32(key
.value1
, dest
);
1358 and32(key
.value2
, dest
);
1360 and32(imm
.asTrustedImm32(), dest
);
1363 void andPtr(Imm32 imm
, RegisterID dest
)
1365 if (shouldBlind(imm
)) {
1366 BlindedImm32 key
= andBlindedConstant(imm
);
1367 andPtr(key
.value1
, dest
);
1368 andPtr(key
.value2
, dest
);
1370 andPtr(imm
.asTrustedImm32(), dest
);
1373 void and32(Imm32 imm
, RegisterID src
, RegisterID dest
)
1375 if (shouldBlind(imm
)) {
1377 return and32(imm
.asTrustedImm32(), dest
);
1378 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1381 and32(imm
.asTrustedImm32(), src
, dest
);
1384 void move(Imm32 imm
, RegisterID dest
)
1386 if (shouldBlind(imm
))
1387 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1389 move(imm
.asTrustedImm32(), dest
);
1392 void or32(Imm32 imm
, RegisterID src
, RegisterID dest
)
1394 if (shouldBlind(imm
)) {
1396 return or32(imm
, dest
);
1397 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1400 or32(imm
.asTrustedImm32(), src
, dest
);
1403 void or32(Imm32 imm
, RegisterID dest
)
1405 if (shouldBlind(imm
)) {
1406 BlindedImm32 key
= orBlindedConstant(imm
);
1407 or32(key
.value1
, dest
);
1408 or32(key
.value2
, dest
);
1410 or32(imm
.asTrustedImm32(), dest
);
1413 void poke(Imm32 value
, int index
= 0)
1415 store32(value
, addressForPoke(index
));
1418 void poke(ImmPtr value
, int index
= 0)
1420 storePtr(value
, addressForPoke(index
));
1423 #if CPU(X86_64) || CPU(ARM64)
1424 void poke(Imm64 value
, int index
= 0)
1426 store64(value
, addressForPoke(index
));
1428 #endif // CPU(X86_64)
1430 void store32(Imm32 imm
, Address dest
)
1432 if (shouldBlind(imm
)) {
1433 #if CPU(X86) || CPU(X86_64)
1434 BlindedImm32 blind
= xorBlindConstant(imm
);
1435 store32(blind
.value1
, dest
);
1436 xor32(blind
.value2
, dest
);
1437 #else // CPU(X86) || CPU(X86_64)
1438 if (haveScratchRegisterForBlinding()) {
1439 loadXorBlindedConstant(xorBlindConstant(imm
), scratchRegisterForBlinding());
1440 store32(scratchRegisterForBlinding(), dest
);
1442 // If we don't have a scratch register available for use, we'll just
1443 // place a random number of nops.
1444 uint32_t nopCount
= random() & 3;
1447 store32(imm
.asTrustedImm32(), dest
);
1449 #endif // CPU(X86) || CPU(X86_64)
1451 store32(imm
.asTrustedImm32(), dest
);
1454 void sub32(Imm32 imm
, RegisterID dest
)
1456 if (shouldBlind(imm
)) {
1457 BlindedImm32 key
= additionBlindedConstant(imm
);
1458 sub32(key
.value1
, dest
);
1459 sub32(key
.value2
, dest
);
1461 sub32(imm
.asTrustedImm32(), dest
);
1464 void subPtr(Imm32 imm
, RegisterID dest
)
1466 if (shouldBlind(imm
)) {
1467 BlindedImm32 key
= additionBlindedConstant(imm
);
1468 subPtr(key
.value1
, dest
);
1469 subPtr(key
.value2
, dest
);
1471 subPtr(imm
.asTrustedImm32(), dest
);
1474 void xor32(Imm32 imm
, RegisterID src
, RegisterID dest
)
1476 if (shouldBlind(imm
)) {
1477 BlindedImm32 blind
= xorBlindConstant(imm
);
1478 xor32(blind
.value1
, src
, dest
);
1479 xor32(blind
.value2
, dest
);
1481 xor32(imm
.asTrustedImm32(), src
, dest
);
1484 void xor32(Imm32 imm
, RegisterID dest
)
1486 if (shouldBlind(imm
)) {
1487 BlindedImm32 blind
= xorBlindConstant(imm
);
1488 xor32(blind
.value1
, dest
);
1489 xor32(blind
.value2
, dest
);
1491 xor32(imm
.asTrustedImm32(), dest
);
1494 Jump
branch32(RelationalCondition cond
, RegisterID left
, Imm32 right
)
1496 if (shouldBlind(right
)) {
1497 if (haveScratchRegisterForBlinding()) {
1498 loadXorBlindedConstant(xorBlindConstant(right
), scratchRegisterForBlinding());
1499 return branch32(cond
, left
, scratchRegisterForBlinding());
1501 // If we don't have a scratch register available for use, we'll just
1502 // place a random number of nops.
1503 uint32_t nopCount
= random() & 3;
1506 return branch32(cond
, left
, right
.asTrustedImm32());
1509 return branch32(cond
, left
, right
.asTrustedImm32());
1512 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, Imm32 imm
, RegisterID dest
)
1515 ASSERT(haveScratchRegisterForBlinding());
1517 if (shouldBlind(imm
)) {
1519 move(src
, scratchRegisterForBlinding());
1520 src
= scratchRegisterForBlinding();
1522 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1523 return branchAdd32(cond
, src
, dest
);
1525 return branchAdd32(cond
, src
, imm
.asTrustedImm32(), dest
);
1528 Jump
branchMul32(ResultCondition cond
, Imm32 imm
, RegisterID src
, RegisterID dest
)
1531 ASSERT(haveScratchRegisterForBlinding());
1533 if (shouldBlind(imm
)) {
1535 move(src
, scratchRegisterForBlinding());
1536 src
= scratchRegisterForBlinding();
1538 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1539 return branchMul32(cond
, src
, dest
);
1541 return branchMul32(cond
, imm
.asTrustedImm32(), src
, dest
);
1544 // branchSub32 takes a scratch register as 32 bit platforms make use of this,
1545 // with src == dst, and on x86-32 we don't have a platform scratch register.
1546 Jump
branchSub32(ResultCondition cond
, RegisterID src
, Imm32 imm
, RegisterID dest
, RegisterID scratch
)
1548 if (shouldBlind(imm
)) {
1549 ASSERT(scratch
!= dest
);
1550 ASSERT(scratch
!= src
);
1551 loadXorBlindedConstant(xorBlindConstant(imm
), scratch
);
1552 return branchSub32(cond
, src
, scratch
, dest
);
1554 return branchSub32(cond
, src
, imm
.asTrustedImm32(), dest
);
1557 void lshift32(Imm32 imm
, RegisterID dest
)
1559 lshift32(trustedImm32ForShift(imm
), dest
);
1562 void lshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
1564 lshift32(src
, trustedImm32ForShift(amount
), dest
);
1567 void rshift32(Imm32 imm
, RegisterID dest
)
1569 rshift32(trustedImm32ForShift(imm
), dest
);
1572 void rshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
1574 rshift32(src
, trustedImm32ForShift(amount
), dest
);
1577 void urshift32(Imm32 imm
, RegisterID dest
)
1579 urshift32(trustedImm32ForShift(imm
), dest
);
1582 void urshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
1584 urshift32(src
, trustedImm32ForShift(amount
), dest
);
1590 #else // ENABLE(ASSEMBLER)
1592 // If there is no assembler for this platform, at least allow code to make references to
1593 // some of the things it would otherwise define, albeit without giving that code any way
1594 // of doing anything useful.
1595 class MacroAssembler
{
1597 MacroAssembler() { }
1601 enum RegisterID
{ NoRegister
};
1602 enum FPRegisterID
{ NoFPRegister
};
1605 #endif // ENABLE(ASSEMBLER)
1607 #endif // MacroAssembler_h