2 * Copyright (C) 2008, 2012, 2013, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssembler_h
27 #define MacroAssembler_h
32 #include "MacroAssemblerARMv7.h"
33 namespace JSC
{ typedef MacroAssemblerARMv7 MacroAssemblerBase
; };
36 #include "MacroAssemblerARM64.h"
37 namespace JSC
{ typedef MacroAssemblerARM64 MacroAssemblerBase
; };
39 #elif CPU(ARM_TRADITIONAL)
40 #include "MacroAssemblerARM.h"
41 namespace JSC
{ typedef MacroAssemblerARM MacroAssemblerBase
; };
44 #include "MacroAssemblerMIPS.h"
46 typedef MacroAssemblerMIPS MacroAssemblerBase
;
50 #include "MacroAssemblerX86.h"
51 namespace JSC
{ typedef MacroAssemblerX86 MacroAssemblerBase
; };
54 #include "MacroAssemblerX86_64.h"
55 namespace JSC
{ typedef MacroAssemblerX86_64 MacroAssemblerBase
; };
58 #include "MacroAssemblerSH4.h"
60 typedef MacroAssemblerSH4 MacroAssemblerBase
;
64 #error "The MacroAssembler is not supported on this platform."
69 class MacroAssembler
: public MacroAssemblerBase
{
72 static RegisterID
nextRegister(RegisterID reg
)
74 return static_cast<RegisterID
>(reg
+ 1);
77 static FPRegisterID
nextFPRegister(FPRegisterID reg
)
79 return static_cast<FPRegisterID
>(reg
+ 1);
82 static unsigned numberOfRegisters()
84 return lastRegister() - firstRegister() + 1;
87 static unsigned registerIndex(RegisterID reg
)
89 return reg
- firstRegister();
92 static unsigned numberOfFPRegisters()
94 return lastFPRegister() - firstFPRegister() + 1;
97 static unsigned fpRegisterIndex(FPRegisterID reg
)
99 return reg
- firstFPRegister();
102 static unsigned registerIndex(FPRegisterID reg
)
104 return fpRegisterIndex(reg
) + numberOfRegisters();
107 static unsigned totalNumberOfRegisters()
109 return numberOfRegisters() + numberOfFPRegisters();
112 using MacroAssemblerBase::pop
;
113 using MacroAssemblerBase::jump
;
114 using MacroAssemblerBase::branch32
;
115 using MacroAssemblerBase::move
;
116 using MacroAssemblerBase::add32
;
117 using MacroAssemblerBase::and32
;
118 using MacroAssemblerBase::branchAdd32
;
119 using MacroAssemblerBase::branchMul32
;
120 #if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64)
121 using MacroAssemblerBase::branchPtr
;
123 using MacroAssemblerBase::branchSub32
;
124 using MacroAssemblerBase::lshift32
;
125 using MacroAssemblerBase::or32
;
126 using MacroAssemblerBase::rshift32
;
127 using MacroAssemblerBase::store32
;
128 using MacroAssemblerBase::sub32
;
129 using MacroAssemblerBase::urshift32
;
130 using MacroAssemblerBase::xor32
;
132 static bool isPtrAlignedAddressOffset(ptrdiff_t value
)
134 return value
== static_cast<int32_t>(value
);
137 static const double twoToThe32
; // This is super useful for some double code.
139 // Utilities used by the DFG JIT.
141 using MacroAssemblerBase::invert
;
143 static DoubleCondition
invert(DoubleCondition cond
)
147 return DoubleNotEqualOrUnordered
;
149 return DoubleEqualOrUnordered
;
150 case DoubleGreaterThan
:
151 return DoubleLessThanOrEqualOrUnordered
;
152 case DoubleGreaterThanOrEqual
:
153 return DoubleLessThanOrUnordered
;
155 return DoubleGreaterThanOrEqualOrUnordered
;
156 case DoubleLessThanOrEqual
:
157 return DoubleGreaterThanOrUnordered
;
158 case DoubleEqualOrUnordered
:
159 return DoubleNotEqual
;
160 case DoubleNotEqualOrUnordered
:
162 case DoubleGreaterThanOrUnordered
:
163 return DoubleLessThanOrEqual
;
164 case DoubleGreaterThanOrEqualOrUnordered
:
165 return DoubleLessThan
;
166 case DoubleLessThanOrUnordered
:
167 return DoubleGreaterThanOrEqual
;
168 case DoubleLessThanOrEqualOrUnordered
:
169 return DoubleGreaterThan
;
171 RELEASE_ASSERT_NOT_REACHED();
172 return DoubleEqual
; // make compiler happy
176 static bool isInvertible(ResultCondition cond
)
187 static ResultCondition
invert(ResultCondition cond
)
195 RELEASE_ASSERT_NOT_REACHED();
196 return Zero
; // Make compiler happy for release builds.
201 // Platform agnostic onvenience functions,
202 // described in terms of other macro assembly methods.
205 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister
);
208 void peek(RegisterID dest
, int index
= 0)
210 loadPtr(Address(stackPointerRegister
, (index
* sizeof(void*))), dest
);
213 Address
addressForPoke(int index
)
215 return Address(stackPointerRegister
, (index
* sizeof(void*)));
218 void poke(RegisterID src
, int index
= 0)
220 storePtr(src
, addressForPoke(index
));
223 void poke(TrustedImm32 value
, int index
= 0)
225 store32(value
, addressForPoke(index
));
228 void poke(TrustedImmPtr imm
, int index
= 0)
230 storePtr(imm
, addressForPoke(index
));
234 void pushToSave(RegisterID src
)
238 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm
)
242 void popToRestore(RegisterID dest
)
246 void pushToSave(FPRegisterID src
)
248 subPtr(TrustedImm32(sizeof(double)), stackPointerRegister
);
249 storeDouble(src
, stackPointerRegister
);
251 void popToRestore(FPRegisterID dest
)
253 loadDouble(stackPointerRegister
, dest
);
254 addPtr(TrustedImm32(sizeof(double)), stackPointerRegister
);
257 static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
258 #endif // !CPU(ARM64)
260 #if CPU(X86_64) || CPU(ARM64)
261 void peek64(RegisterID dest
, int index
= 0)
263 load64(Address(stackPointerRegister
, (index
* sizeof(void*))), dest
);
266 void poke(TrustedImm64 value
, int index
= 0)
268 store64(value
, addressForPoke(index
));
271 void poke64(RegisterID src
, int index
= 0)
273 store64(src
, addressForPoke(index
));
278 void poke(FPRegisterID src
, int index
= 0)
280 ASSERT(!(index
& 1));
281 storeDouble(src
, addressForPoke(index
));
285 // Immediate shifts only have 5 controllable bits
286 // so we'll consider them safe for now.
287 TrustedImm32
trustedImm32ForShift(Imm32 imm
)
289 return TrustedImm32(imm
.asTrustedImm32().m_value
& 31);
292 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
293 void branchPtr(RelationalCondition cond
, RegisterID op1
, TrustedImmPtr imm
, Label target
)
295 branchPtr(cond
, op1
, imm
).linkTo(target
, this);
297 void branchPtr(RelationalCondition cond
, RegisterID op1
, ImmPtr imm
, Label target
)
299 branchPtr(cond
, op1
, imm
).linkTo(target
, this);
302 void branch32(RelationalCondition cond
, RegisterID op1
, RegisterID op2
, Label target
)
304 branch32(cond
, op1
, op2
).linkTo(target
, this);
307 void branch32(RelationalCondition cond
, RegisterID op1
, TrustedImm32 imm
, Label target
)
309 branch32(cond
, op1
, imm
).linkTo(target
, this);
312 void branch32(RelationalCondition cond
, RegisterID op1
, Imm32 imm
, Label target
)
314 branch32(cond
, op1
, imm
).linkTo(target
, this);
317 void branch32(RelationalCondition cond
, RegisterID left
, Address right
, Label target
)
319 branch32(cond
, left
, right
).linkTo(target
, this);
322 Jump
branch32(RelationalCondition cond
, TrustedImm32 left
, RegisterID right
)
324 return branch32(commute(cond
), right
, left
);
327 Jump
branch32(RelationalCondition cond
, Imm32 left
, RegisterID right
)
329 return branch32(commute(cond
), right
, left
);
332 void branchTestPtr(ResultCondition cond
, RegisterID reg
, Label target
)
334 branchTestPtr(cond
, reg
).linkTo(target
, this);
337 #if !CPU(ARM_THUMB2) && !CPU(ARM64)
338 PatchableJump
patchableBranchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
= TrustedImmPtr(0))
340 return PatchableJump(branchPtr(cond
, left
, right
));
343 PatchableJump
patchableBranchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
345 return PatchableJump(branchPtrWithPatch(cond
, left
, dataLabel
, initialRightValue
));
348 PatchableJump
patchableBranch32WithPatch(RelationalCondition cond
, Address left
, DataLabel32
& dataLabel
, TrustedImm32 initialRightValue
= TrustedImm32(0))
350 return PatchableJump(branch32WithPatch(cond
, left
, dataLabel
, initialRightValue
));
353 #if !CPU(ARM_TRADITIONAL)
354 PatchableJump
patchableJump()
356 return PatchableJump(jump());
359 PatchableJump
patchableBranchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
361 return PatchableJump(branchTest32(cond
, reg
, mask
));
364 PatchableJump
patchableBranch32(RelationalCondition cond
, RegisterID reg
, TrustedImm32 imm
)
366 return PatchableJump(branch32(cond
, reg
, imm
));
369 PatchableJump
patchableBranch32(RelationalCondition cond
, Address address
, TrustedImm32 imm
)
371 return PatchableJump(branch32(cond
, address
, imm
));
376 void jump(Label target
)
378 jump().linkTo(target
, this);
381 // Commute a relational condition, returns a new condition that will produce
382 // the same results given the same inputs but with their positions exchanged.
383 static RelationalCondition
commute(RelationalCondition condition
)
396 case GreaterThanOrEqual
:
397 return LessThanOrEqual
;
400 case LessThanOrEqual
:
401 return GreaterThanOrEqual
;
406 ASSERT(condition
== Equal
|| condition
== NotEqual
);
410 static const unsigned BlindingModulus
= 64;
411 bool shouldConsiderBlinding()
413 return !(random() & (BlindingModulus
- 1));
417 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
418 // FIXME: should this use a test for 32-bitness instead of this specific exception?
419 #if !CPU(X86_64) && !CPU(ARM64)
420 void addPtr(Address src
, RegisterID dest
)
425 void addPtr(AbsoluteAddress src
, RegisterID dest
)
430 void addPtr(RegisterID src
, RegisterID dest
)
435 void addPtr(TrustedImm32 imm
, RegisterID srcDest
)
440 void addPtr(TrustedImmPtr imm
, RegisterID dest
)
442 add32(TrustedImm32(imm
), dest
);
445 void addPtr(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
447 add32(imm
, src
, dest
);
450 void addPtr(TrustedImm32 imm
, AbsoluteAddress address
)
455 void andPtr(RegisterID src
, RegisterID dest
)
460 void andPtr(TrustedImm32 imm
, RegisterID srcDest
)
465 void andPtr(TrustedImmPtr imm
, RegisterID srcDest
)
467 and32(TrustedImm32(imm
), srcDest
);
470 void lshiftPtr(Imm32 imm
, RegisterID srcDest
)
472 lshift32(trustedImm32ForShift(imm
), srcDest
);
475 void negPtr(RegisterID dest
)
480 void orPtr(RegisterID src
, RegisterID dest
)
485 void orPtr(RegisterID op1
, RegisterID op2
, RegisterID dest
)
487 or32(op1
, op2
, dest
);
490 void orPtr(TrustedImmPtr imm
, RegisterID dest
)
492 or32(TrustedImm32(imm
), dest
);
495 void orPtr(TrustedImm32 imm
, RegisterID dest
)
500 void subPtr(RegisterID src
, RegisterID dest
)
505 void subPtr(TrustedImm32 imm
, RegisterID dest
)
510 void subPtr(TrustedImmPtr imm
, RegisterID dest
)
512 sub32(TrustedImm32(imm
), dest
);
515 void xorPtr(RegisterID src
, RegisterID dest
)
520 void xorPtr(TrustedImm32 imm
, RegisterID srcDest
)
526 void loadPtr(ImplicitAddress address
, RegisterID dest
)
528 load32(address
, dest
);
531 void loadPtr(BaseIndex address
, RegisterID dest
)
533 load32(address
, dest
);
536 void loadPtr(const void* address
, RegisterID dest
)
538 load32(address
, dest
);
541 DataLabel32
loadPtrWithAddressOffsetPatch(Address address
, RegisterID dest
)
543 return load32WithAddressOffsetPatch(address
, dest
);
546 DataLabelCompact
loadPtrWithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
548 return load32WithCompactAddressOffsetPatch(address
, dest
);
551 void move(ImmPtr imm
, RegisterID dest
)
553 move(Imm32(imm
.asTrustedImmPtr()), dest
);
556 void comparePtr(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
558 compare32(cond
, left
, right
, dest
);
561 void comparePtr(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
563 compare32(cond
, left
, right
, dest
);
566 void storePtr(RegisterID src
, ImplicitAddress address
)
568 store32(src
, address
);
571 void storePtr(RegisterID src
, BaseIndex address
)
573 store32(src
, address
);
576 void storePtr(RegisterID src
, void* address
)
578 store32(src
, address
);
581 void storePtr(TrustedImmPtr imm
, ImplicitAddress address
)
583 store32(TrustedImm32(imm
), address
);
586 void storePtr(ImmPtr imm
, Address address
)
588 store32(Imm32(imm
.asTrustedImmPtr()), address
);
591 void storePtr(TrustedImmPtr imm
, void* address
)
593 store32(TrustedImm32(imm
), address
);
596 void storePtr(TrustedImm32 imm
, ImplicitAddress address
)
598 store32(imm
, address
);
601 void storePtr(TrustedImmPtr imm
, BaseIndex address
)
603 store32(TrustedImm32(imm
), address
);
606 DataLabel32
storePtrWithAddressOffsetPatch(RegisterID src
, Address address
)
608 return store32WithAddressOffsetPatch(src
, address
);
611 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, RegisterID right
)
613 return branch32(cond
, left
, right
);
616 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, TrustedImmPtr right
)
618 return branch32(cond
, left
, TrustedImm32(right
));
621 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, ImmPtr right
)
623 return branch32(cond
, left
, Imm32(right
.asTrustedImmPtr()));
626 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, Address right
)
628 return branch32(cond
, left
, right
);
631 Jump
branchPtr(RelationalCondition cond
, Address left
, RegisterID right
)
633 return branch32(cond
, left
, right
);
636 Jump
branchPtr(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
638 return branch32(cond
, left
, right
);
641 Jump
branchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
)
643 return branch32(cond
, left
, TrustedImm32(right
));
646 Jump
branchPtr(RelationalCondition cond
, AbsoluteAddress left
, TrustedImmPtr right
)
648 return branch32(cond
, left
, TrustedImm32(right
));
651 Jump
branchSubPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
653 return branchSub32(cond
, src
, dest
);
656 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
658 return branchTest32(cond
, reg
, mask
);
661 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
663 return branchTest32(cond
, reg
, mask
);
666 Jump
branchTestPtr(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
668 return branchTest32(cond
, address
, mask
);
671 Jump
branchTestPtr(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
673 return branchTest32(cond
, address
, mask
);
676 Jump
branchAddPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
678 return branchAdd32(cond
, src
, dest
);
681 Jump
branchSubPtr(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
683 return branchSub32(cond
, imm
, dest
);
685 using MacroAssemblerBase::branchTest8
;
686 Jump
branchTest8(ResultCondition cond
, ExtendedAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
688 return MacroAssemblerBase::branchTest8(cond
, Address(address
.base
, address
.offset
), mask
);
691 #else // !CPU(X86_64)
693 void addPtr(RegisterID src
, RegisterID dest
)
698 void addPtr(Address src
, RegisterID dest
)
703 void addPtr(TrustedImm32 imm
, RegisterID srcDest
)
708 void addPtr(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
710 add64(imm
, src
, dest
);
713 void addPtr(TrustedImm32 imm
, Address address
)
718 void addPtr(AbsoluteAddress src
, RegisterID dest
)
723 void addPtr(TrustedImmPtr imm
, RegisterID dest
)
725 add64(TrustedImm64(imm
), dest
);
728 void addPtr(TrustedImm32 imm
, AbsoluteAddress address
)
733 void andPtr(RegisterID src
, RegisterID dest
)
738 void andPtr(TrustedImm32 imm
, RegisterID srcDest
)
743 void andPtr(TrustedImmPtr imm
, RegisterID srcDest
)
748 void lshiftPtr(Imm32 imm
, RegisterID srcDest
)
750 lshift64(trustedImm32ForShift(imm
), srcDest
);
753 void negPtr(RegisterID dest
)
758 void orPtr(RegisterID src
, RegisterID dest
)
763 void orPtr(TrustedImm32 imm
, RegisterID dest
)
768 void orPtr(TrustedImmPtr imm
, RegisterID dest
)
770 or64(TrustedImm64(imm
), dest
);
773 void orPtr(RegisterID op1
, RegisterID op2
, RegisterID dest
)
775 or64(op1
, op2
, dest
);
778 void orPtr(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
780 or64(imm
, src
, dest
);
783 void rotateRightPtr(TrustedImm32 imm
, RegisterID srcDst
)
785 rotateRight64(imm
, srcDst
);
788 void subPtr(RegisterID src
, RegisterID dest
)
793 void subPtr(TrustedImm32 imm
, RegisterID dest
)
798 void subPtr(TrustedImmPtr imm
, RegisterID dest
)
800 sub64(TrustedImm64(imm
), dest
);
803 void xorPtr(RegisterID src
, RegisterID dest
)
808 void xorPtr(RegisterID src
, Address dest
)
813 void xorPtr(TrustedImm32 imm
, RegisterID srcDest
)
818 void loadPtr(ImplicitAddress address
, RegisterID dest
)
820 load64(address
, dest
);
823 void loadPtr(BaseIndex address
, RegisterID dest
)
825 load64(address
, dest
);
828 void loadPtr(const void* address
, RegisterID dest
)
830 load64(address
, dest
);
833 DataLabel32
loadPtrWithAddressOffsetPatch(Address address
, RegisterID dest
)
835 return load64WithAddressOffsetPatch(address
, dest
);
838 DataLabelCompact
loadPtrWithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
840 return load64WithCompactAddressOffsetPatch(address
, dest
);
843 void storePtr(RegisterID src
, ImplicitAddress address
)
845 store64(src
, address
);
848 void storePtr(RegisterID src
, BaseIndex address
)
850 store64(src
, address
);
853 void storePtr(RegisterID src
, void* address
)
855 store64(src
, address
);
858 void storePtr(TrustedImmPtr imm
, ImplicitAddress address
)
860 store64(TrustedImm64(imm
), address
);
863 void storePtr(TrustedImmPtr imm
, BaseIndex address
)
865 store64(TrustedImm64(imm
), address
);
868 DataLabel32
storePtrWithAddressOffsetPatch(RegisterID src
, Address address
)
870 return store64WithAddressOffsetPatch(src
, address
);
873 void comparePtr(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
875 compare64(cond
, left
, right
, dest
);
878 void comparePtr(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
880 compare64(cond
, left
, right
, dest
);
883 void testPtr(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
, RegisterID dest
)
885 test64(cond
, reg
, mask
, dest
);
888 void testPtr(ResultCondition cond
, RegisterID reg
, RegisterID mask
, RegisterID dest
)
890 test64(cond
, reg
, mask
, dest
);
893 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, RegisterID right
)
895 return branch64(cond
, left
, right
);
898 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, TrustedImmPtr right
)
900 return branch64(cond
, left
, TrustedImm64(right
));
903 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, Address right
)
905 return branch64(cond
, left
, right
);
908 Jump
branchPtr(RelationalCondition cond
, Address left
, RegisterID right
)
910 return branch64(cond
, left
, right
);
913 Jump
branchPtr(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
915 return branch64(cond
, left
, right
);
918 Jump
branchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
)
920 return branch64(cond
, left
, TrustedImm64(right
));
923 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
925 return branchTest64(cond
, reg
, mask
);
928 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
930 return branchTest64(cond
, reg
, mask
);
933 Jump
branchTestPtr(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
935 return branchTest64(cond
, address
, mask
);
938 Jump
branchTestPtr(ResultCondition cond
, Address address
, RegisterID reg
)
940 return branchTest64(cond
, address
, reg
);
943 Jump
branchTestPtr(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
945 return branchTest64(cond
, address
, mask
);
948 Jump
branchTestPtr(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
950 return branchTest64(cond
, address
, mask
);
953 Jump
branchAddPtr(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
955 return branchAdd64(cond
, imm
, dest
);
958 Jump
branchAddPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
960 return branchAdd64(cond
, src
, dest
);
963 Jump
branchSubPtr(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
965 return branchSub64(cond
, imm
, dest
);
968 Jump
branchSubPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
970 return branchSub64(cond
, src
, dest
);
973 Jump
branchSubPtr(ResultCondition cond
, RegisterID src1
, TrustedImm32 src2
, RegisterID dest
)
975 return branchSub64(cond
, src1
, src2
, dest
);
978 using MacroAssemblerBase::and64
;
979 using MacroAssemblerBase::convertInt32ToDouble
;
980 using MacroAssemblerBase::store64
;
981 bool shouldBlindDouble(double value
)
983 // Don't trust NaN or +/-Infinity
984 if (!std::isfinite(value
))
985 return shouldConsiderBlinding();
987 // Try to force normalisation, and check that there's no change
988 // in the bit pattern
989 if (bitwise_cast
<uint64_t>(value
* 1.0) != bitwise_cast
<uint64_t>(value
))
990 return shouldConsiderBlinding();
993 // Only allow a limited set of fractional components
994 double scaledValue
= value
* 8;
995 if (scaledValue
/ 8 != value
)
996 return shouldConsiderBlinding();
997 double frac
= scaledValue
- floor(scaledValue
);
999 return shouldConsiderBlinding();
1001 return value
> 0xff;
1004 bool shouldBlindPointerForSpecificArch(uintptr_t value
)
1006 if (sizeof(void*) == 4)
1007 return shouldBlindForSpecificArch(static_cast<uint32_t>(value
));
1008 return shouldBlindForSpecificArch(static_cast<uint64_t>(value
));
1011 bool shouldBlind(ImmPtr imm
)
1016 #if ENABLE(FORCED_JIT_BLINDING)
1018 // Debug always blind all constants, if only so we know
1019 // if we've broken blinding during patch development.
1023 // First off we'll special case common, "safe" values to avoid hurting
1024 // performance too much
1025 uintptr_t value
= imm
.asTrustedImmPtr().asIntptr();
1031 case 0xffffffffffffL
:
1032 case 0xffffffffffffffL
:
1033 case 0xffffffffffffffffL
:
1043 if (!shouldConsiderBlinding())
1046 return shouldBlindPointerForSpecificArch(value
);
1049 struct RotatedImmPtr
{
1050 RotatedImmPtr(uintptr_t v1
, uint8_t v2
)
1055 TrustedImmPtr value
;
1056 TrustedImm32 rotation
;
1059 RotatedImmPtr
rotationBlindConstant(ImmPtr imm
)
1061 uint8_t rotation
= random() % (sizeof(void*) * 8);
1062 uintptr_t value
= imm
.asTrustedImmPtr().asIntptr();
1063 value
= (value
<< rotation
) | (value
>> (sizeof(void*) * 8 - rotation
));
1064 return RotatedImmPtr(value
, rotation
);
1067 void loadRotationBlindedConstant(RotatedImmPtr constant
, RegisterID dest
)
1069 move(constant
.value
, dest
);
1070 rotateRightPtr(constant
.rotation
, dest
);
1073 bool shouldBlind(Imm64 imm
)
1075 #if ENABLE(FORCED_JIT_BLINDING)
1077 // Debug always blind all constants, if only so we know
1078 // if we've broken blinding during patch development.
1082 // First off we'll special case common, "safe" values to avoid hurting
1083 // performance too much
1084 uint64_t value
= imm
.asTrustedImm64().m_value
;
1090 case 0xffffffffffffL
:
1091 case 0xffffffffffffffL
:
1092 case 0xffffffffffffffffL
:
1100 JSValue jsValue
= JSValue::decode(value
);
1101 if (jsValue
.isInt32())
1102 return shouldBlind(Imm32(jsValue
.asInt32()));
1103 if (jsValue
.isDouble() && !shouldBlindDouble(jsValue
.asDouble()))
1106 if (!shouldBlindDouble(bitwise_cast
<double>(value
)))
1111 if (!shouldConsiderBlinding())
1114 return shouldBlindForSpecificArch(value
);
1117 struct RotatedImm64
{
1118 RotatedImm64(uint64_t v1
, uint8_t v2
)
1124 TrustedImm32 rotation
;
1127 RotatedImm64
rotationBlindConstant(Imm64 imm
)
1129 uint8_t rotation
= random() % (sizeof(int64_t) * 8);
1130 uint64_t value
= imm
.asTrustedImm64().m_value
;
1131 value
= (value
<< rotation
) | (value
>> (sizeof(int64_t) * 8 - rotation
));
1132 return RotatedImm64(value
, rotation
);
1135 void loadRotationBlindedConstant(RotatedImm64 constant
, RegisterID dest
)
1137 move(constant
.value
, dest
);
1138 rotateRight64(constant
.rotation
, dest
);
1141 void convertInt32ToDouble(Imm32 imm
, FPRegisterID dest
)
1143 if (shouldBlind(imm
) && haveScratchRegisterForBlinding()) {
1144 RegisterID scratchRegister
= scratchRegisterForBlinding();
1145 loadXorBlindedConstant(xorBlindConstant(imm
), scratchRegister
);
1146 convertInt32ToDouble(scratchRegister
, dest
);
1148 convertInt32ToDouble(imm
.asTrustedImm32(), dest
);
1151 void move(ImmPtr imm
, RegisterID dest
)
1153 if (shouldBlind(imm
))
1154 loadRotationBlindedConstant(rotationBlindConstant(imm
), dest
);
1156 move(imm
.asTrustedImmPtr(), dest
);
1159 void move(Imm64 imm
, RegisterID dest
)
1161 if (shouldBlind(imm
))
1162 loadRotationBlindedConstant(rotationBlindConstant(imm
), dest
);
1164 move(imm
.asTrustedImm64(), dest
);
1167 void and64(Imm32 imm
, RegisterID dest
)
1169 if (shouldBlind(imm
)) {
1170 BlindedImm32 key
= andBlindedConstant(imm
);
1171 and64(key
.value1
, dest
);
1172 and64(key
.value2
, dest
);
1174 and64(imm
.asTrustedImm32(), dest
);
1177 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, ImmPtr right
)
1179 if (shouldBlind(right
) && haveScratchRegisterForBlinding()) {
1180 RegisterID scratchRegister
= scratchRegisterForBlinding();
1181 loadRotationBlindedConstant(rotationBlindConstant(right
), scratchRegister
);
1182 return branchPtr(cond
, left
, scratchRegister
);
1184 return branchPtr(cond
, left
, right
.asTrustedImmPtr());
1187 void storePtr(ImmPtr imm
, Address dest
)
1189 if (shouldBlind(imm
) && haveScratchRegisterForBlinding()) {
1190 RegisterID scratchRegister
= scratchRegisterForBlinding();
1191 loadRotationBlindedConstant(rotationBlindConstant(imm
), scratchRegister
);
1192 storePtr(scratchRegister
, dest
);
1194 storePtr(imm
.asTrustedImmPtr(), dest
);
1197 void store64(Imm64 imm
, Address dest
)
1199 if (shouldBlind(imm
) && haveScratchRegisterForBlinding()) {
1200 RegisterID scratchRegister
= scratchRegisterForBlinding();
1201 loadRotationBlindedConstant(rotationBlindConstant(imm
), scratchRegister
);
1202 store64(scratchRegister
, dest
);
1204 store64(imm
.asTrustedImm64(), dest
);
1207 #endif // !CPU(X86_64)
1209 bool shouldBlind(Imm32 imm
)
1211 #if ENABLE(FORCED_JIT_BLINDING)
1213 // Debug always blind all constants, if only so we know
1214 // if we've broken blinding during patch development.
1216 #else // ENABLE(FORCED_JIT_BLINDING)
1218 // First off we'll special case common, "safe" values to avoid hurting
1219 // performance too much
1220 uint32_t value
= imm
.asTrustedImm32().m_value
;
1233 if (!shouldConsiderBlinding())
1236 return shouldBlindForSpecificArch(value
);
1237 #endif // ENABLE(FORCED_JIT_BLINDING)
1240 struct BlindedImm32
{
1241 BlindedImm32(int32_t v1
, int32_t v2
)
1246 TrustedImm32 value1
;
1247 TrustedImm32 value2
;
1250 uint32_t keyForConstant(uint32_t value
, uint32_t& mask
)
1252 uint32_t key
= random();
1255 else if (value
<= 0xffff)
1257 else if (value
<= 0xffffff)
1264 uint32_t keyForConstant(uint32_t value
)
1267 return keyForConstant(value
, mask
);
1270 BlindedImm32
xorBlindConstant(Imm32 imm
)
1272 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1273 uint32_t key
= keyForConstant(baseValue
);
1274 return BlindedImm32(baseValue
^ key
, key
);
1277 BlindedImm32
additionBlindedConstant(Imm32 imm
)
1279 // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
1280 static uint32_t maskTable
[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
1282 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1283 uint32_t key
= keyForConstant(baseValue
) & maskTable
[baseValue
& 3];
1284 if (key
> baseValue
)
1285 key
= key
- baseValue
;
1286 return BlindedImm32(baseValue
- key
, key
);
1289 BlindedImm32
andBlindedConstant(Imm32 imm
)
1291 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1293 uint32_t key
= keyForConstant(baseValue
, mask
);
1294 ASSERT((baseValue
& mask
) == baseValue
);
1295 return BlindedImm32(((baseValue
& key
) | ~key
) & mask
, ((baseValue
& ~key
) | key
) & mask
);
1298 BlindedImm32
orBlindedConstant(Imm32 imm
)
1300 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1302 uint32_t key
= keyForConstant(baseValue
, mask
);
1303 ASSERT((baseValue
& mask
) == baseValue
);
1304 return BlindedImm32((baseValue
& key
) & mask
, (baseValue
& ~key
) & mask
);
1307 void loadXorBlindedConstant(BlindedImm32 constant
, RegisterID dest
)
1309 move(constant
.value1
, dest
);
1310 xor32(constant
.value2
, dest
);
1313 void add32(Imm32 imm
, RegisterID dest
)
1315 if (shouldBlind(imm
)) {
1316 BlindedImm32 key
= additionBlindedConstant(imm
);
1317 add32(key
.value1
, dest
);
1318 add32(key
.value2
, dest
);
1320 add32(imm
.asTrustedImm32(), dest
);
1323 void addPtr(Imm32 imm
, RegisterID dest
)
1325 if (shouldBlind(imm
)) {
1326 BlindedImm32 key
= additionBlindedConstant(imm
);
1327 addPtr(key
.value1
, dest
);
1328 addPtr(key
.value2
, dest
);
1330 addPtr(imm
.asTrustedImm32(), dest
);
1333 void and32(Imm32 imm
, RegisterID dest
)
1335 if (shouldBlind(imm
)) {
1336 BlindedImm32 key
= andBlindedConstant(imm
);
1337 and32(key
.value1
, dest
);
1338 and32(key
.value2
, dest
);
1340 and32(imm
.asTrustedImm32(), dest
);
1343 void andPtr(Imm32 imm
, RegisterID dest
)
1345 if (shouldBlind(imm
)) {
1346 BlindedImm32 key
= andBlindedConstant(imm
);
1347 andPtr(key
.value1
, dest
);
1348 andPtr(key
.value2
, dest
);
1350 andPtr(imm
.asTrustedImm32(), dest
);
1353 void and32(Imm32 imm
, RegisterID src
, RegisterID dest
)
1355 if (shouldBlind(imm
)) {
1357 return and32(imm
.asTrustedImm32(), dest
);
1358 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1361 and32(imm
.asTrustedImm32(), src
, dest
);
1364 void move(Imm32 imm
, RegisterID dest
)
1366 if (shouldBlind(imm
))
1367 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1369 move(imm
.asTrustedImm32(), dest
);
1372 void or32(Imm32 imm
, RegisterID src
, RegisterID dest
)
1374 if (shouldBlind(imm
)) {
1376 return or32(imm
, dest
);
1377 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1380 or32(imm
.asTrustedImm32(), src
, dest
);
1383 void or32(Imm32 imm
, RegisterID dest
)
1385 if (shouldBlind(imm
)) {
1386 BlindedImm32 key
= orBlindedConstant(imm
);
1387 or32(key
.value1
, dest
);
1388 or32(key
.value2
, dest
);
1390 or32(imm
.asTrustedImm32(), dest
);
1393 void poke(Imm32 value
, int index
= 0)
1395 store32(value
, addressForPoke(index
));
1398 void poke(ImmPtr value
, int index
= 0)
1400 storePtr(value
, addressForPoke(index
));
1403 #if CPU(X86_64) || CPU(ARM64)
1404 void poke(Imm64 value
, int index
= 0)
1406 store64(value
, addressForPoke(index
));
1408 #endif // CPU(X86_64)
1410 void store32(Imm32 imm
, Address dest
)
1412 if (shouldBlind(imm
)) {
1413 #if CPU(X86) || CPU(X86_64)
1414 BlindedImm32 blind
= xorBlindConstant(imm
);
1415 store32(blind
.value1
, dest
);
1416 xor32(blind
.value2
, dest
);
1417 #else // CPU(X86) || CPU(X86_64)
1418 if (haveScratchRegisterForBlinding()) {
1419 loadXorBlindedConstant(xorBlindConstant(imm
), scratchRegisterForBlinding());
1420 store32(scratchRegisterForBlinding(), dest
);
1422 // If we don't have a scratch register available for use, we'll just
1423 // place a random number of nops.
1424 uint32_t nopCount
= random() & 3;
1427 store32(imm
.asTrustedImm32(), dest
);
1429 #endif // CPU(X86) || CPU(X86_64)
1431 store32(imm
.asTrustedImm32(), dest
);
1434 void sub32(Imm32 imm
, RegisterID dest
)
1436 if (shouldBlind(imm
)) {
1437 BlindedImm32 key
= additionBlindedConstant(imm
);
1438 sub32(key
.value1
, dest
);
1439 sub32(key
.value2
, dest
);
1441 sub32(imm
.asTrustedImm32(), dest
);
1444 void subPtr(Imm32 imm
, RegisterID dest
)
1446 if (shouldBlind(imm
)) {
1447 BlindedImm32 key
= additionBlindedConstant(imm
);
1448 subPtr(key
.value1
, dest
);
1449 subPtr(key
.value2
, dest
);
1451 subPtr(imm
.asTrustedImm32(), dest
);
1454 void xor32(Imm32 imm
, RegisterID src
, RegisterID dest
)
1456 if (shouldBlind(imm
)) {
1457 BlindedImm32 blind
= xorBlindConstant(imm
);
1458 xor32(blind
.value1
, src
, dest
);
1459 xor32(blind
.value2
, dest
);
1461 xor32(imm
.asTrustedImm32(), src
, dest
);
1464 void xor32(Imm32 imm
, RegisterID dest
)
1466 if (shouldBlind(imm
)) {
1467 BlindedImm32 blind
= xorBlindConstant(imm
);
1468 xor32(blind
.value1
, dest
);
1469 xor32(blind
.value2
, dest
);
1471 xor32(imm
.asTrustedImm32(), dest
);
1474 Jump
branch32(RelationalCondition cond
, RegisterID left
, Imm32 right
)
1476 if (shouldBlind(right
)) {
1477 if (haveScratchRegisterForBlinding()) {
1478 loadXorBlindedConstant(xorBlindConstant(right
), scratchRegisterForBlinding());
1479 return branch32(cond
, left
, scratchRegisterForBlinding());
1481 // If we don't have a scratch register available for use, we'll just
1482 // place a random number of nops.
1483 uint32_t nopCount
= random() & 3;
1486 return branch32(cond
, left
, right
.asTrustedImm32());
1489 return branch32(cond
, left
, right
.asTrustedImm32());
1492 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, Imm32 imm
, RegisterID dest
)
1495 ASSERT(haveScratchRegisterForBlinding());
1497 if (shouldBlind(imm
)) {
1499 move(src
, scratchRegisterForBlinding());
1500 src
= scratchRegisterForBlinding();
1502 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1503 return branchAdd32(cond
, src
, dest
);
1505 return branchAdd32(cond
, src
, imm
.asTrustedImm32(), dest
);
1508 Jump
branchMul32(ResultCondition cond
, Imm32 imm
, RegisterID src
, RegisterID dest
)
1511 ASSERT(haveScratchRegisterForBlinding());
1513 if (shouldBlind(imm
)) {
1515 move(src
, scratchRegisterForBlinding());
1516 src
= scratchRegisterForBlinding();
1518 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1519 return branchMul32(cond
, src
, dest
);
1521 return branchMul32(cond
, imm
.asTrustedImm32(), src
, dest
);
1524 // branchSub32 takes a scratch register as 32 bit platforms make use of this,
1525 // with src == dst, and on x86-32 we don't have a platform scratch register.
1526 Jump
branchSub32(ResultCondition cond
, RegisterID src
, Imm32 imm
, RegisterID dest
, RegisterID scratch
)
1528 if (shouldBlind(imm
)) {
1529 ASSERT(scratch
!= dest
);
1530 ASSERT(scratch
!= src
);
1531 loadXorBlindedConstant(xorBlindConstant(imm
), scratch
);
1532 return branchSub32(cond
, src
, scratch
, dest
);
1534 return branchSub32(cond
, src
, imm
.asTrustedImm32(), dest
);
1537 void lshift32(Imm32 imm
, RegisterID dest
)
1539 lshift32(trustedImm32ForShift(imm
), dest
);
1542 void lshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
1544 lshift32(src
, trustedImm32ForShift(amount
), dest
);
1547 void rshift32(Imm32 imm
, RegisterID dest
)
1549 rshift32(trustedImm32ForShift(imm
), dest
);
1552 void rshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
1554 rshift32(src
, trustedImm32ForShift(amount
), dest
);
1557 void urshift32(Imm32 imm
, RegisterID dest
)
1559 urshift32(trustedImm32ForShift(imm
), dest
);
1562 void urshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
1564 urshift32(src
, trustedImm32ForShift(amount
), dest
);
1570 #else // ENABLE(ASSEMBLER)
1572 // If there is no assembler for this platform, at least allow code to make references to
1573 // some of the things it would otherwise define, albeit without giving that code any way
1574 // of doing anything useful.
1575 class MacroAssembler
{
1577 MacroAssembler() { }
1581 enum RegisterID
{ NoRegister
};
1582 enum FPRegisterID
{ NoFPRegister
};
1585 #endif // ENABLE(ASSEMBLER)
1587 #endif // MacroAssembler_h