2 * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssembler_h
27 #define MacroAssembler_h
29 #include <wtf/Platform.h>
34 #include "MacroAssemblerARMv7.h"
35 namespace JSC
{ typedef MacroAssemblerARMv7 MacroAssemblerBase
; };
38 #include "MacroAssemblerARM64.h"
39 namespace JSC
{ typedef MacroAssemblerARM64 MacroAssemblerBase
; };
41 #elif CPU(ARM_TRADITIONAL)
42 #include "MacroAssemblerARM.h"
43 namespace JSC
{ typedef MacroAssemblerARM MacroAssemblerBase
; };
46 #include "MacroAssemblerMIPS.h"
48 typedef MacroAssemblerMIPS MacroAssemblerBase
;
52 #include "MacroAssemblerX86.h"
53 namespace JSC
{ typedef MacroAssemblerX86 MacroAssemblerBase
; };
56 #include "MacroAssemblerX86_64.h"
57 namespace JSC
{ typedef MacroAssemblerX86_64 MacroAssemblerBase
; };
60 #include "MacroAssemblerSH4.h"
62 typedef MacroAssemblerSH4 MacroAssemblerBase
;
66 #error "The MacroAssembler is not supported on this platform."
71 class MacroAssembler
: public MacroAssemblerBase
{
74 using MacroAssemblerBase::pop
;
75 using MacroAssemblerBase::jump
;
76 using MacroAssemblerBase::branch32
;
77 using MacroAssemblerBase::move
;
79 #if ENABLE(JIT_CONSTANT_BLINDING)
80 using MacroAssemblerBase::add32
;
81 using MacroAssemblerBase::and32
;
82 using MacroAssemblerBase::branchAdd32
;
83 using MacroAssemblerBase::branchMul32
;
84 using MacroAssemblerBase::branchSub32
;
85 using MacroAssemblerBase::lshift32
;
86 using MacroAssemblerBase::or32
;
87 using MacroAssemblerBase::rshift32
;
88 using MacroAssemblerBase::store32
;
89 using MacroAssemblerBase::sub32
;
90 using MacroAssemblerBase::urshift32
;
91 using MacroAssemblerBase::xor32
;
94 static bool isPtrAlignedAddressOffset(ptrdiff_t value
)
96 return value
== (int32_t)value
;
99 static const double twoToThe32
; // This is super useful for some double code.
101 // Utilities used by the DFG JIT.
103 using MacroAssemblerBase::invert
;
105 static DoubleCondition
invert(DoubleCondition cond
)
109 return DoubleNotEqualOrUnordered
;
111 return DoubleEqualOrUnordered
;
112 case DoubleGreaterThan
:
113 return DoubleLessThanOrEqualOrUnordered
;
114 case DoubleGreaterThanOrEqual
:
115 return DoubleLessThanOrUnordered
;
117 return DoubleGreaterThanOrEqualOrUnordered
;
118 case DoubleLessThanOrEqual
:
119 return DoubleGreaterThanOrUnordered
;
120 case DoubleEqualOrUnordered
:
121 return DoubleNotEqual
;
122 case DoubleNotEqualOrUnordered
:
124 case DoubleGreaterThanOrUnordered
:
125 return DoubleLessThanOrEqual
;
126 case DoubleGreaterThanOrEqualOrUnordered
:
127 return DoubleLessThan
;
128 case DoubleLessThanOrUnordered
:
129 return DoubleGreaterThanOrEqual
;
130 case DoubleLessThanOrEqualOrUnordered
:
131 return DoubleGreaterThan
;
133 RELEASE_ASSERT_NOT_REACHED();
134 return DoubleEqual
; // make compiler happy
138 static bool isInvertible(ResultCondition cond
)
149 static ResultCondition
invert(ResultCondition cond
)
157 RELEASE_ASSERT_NOT_REACHED();
158 return Zero
; // Make compiler happy for release builds.
163 // Platform agnostic onvenience functions,
164 // described in terms of other macro assembly methods.
167 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister
);
170 void peek(RegisterID dest
, int index
= 0)
172 loadPtr(Address(stackPointerRegister
, (index
* sizeof(void*))), dest
);
175 Address
addressForPoke(int index
)
177 return Address(stackPointerRegister
, (index
* sizeof(void*)));
180 void poke(RegisterID src
, int index
= 0)
182 storePtr(src
, addressForPoke(index
));
185 void poke(TrustedImm32 value
, int index
= 0)
187 store32(value
, addressForPoke(index
));
190 void poke(TrustedImmPtr imm
, int index
= 0)
192 storePtr(imm
, addressForPoke(index
));
196 void pushToSave(RegisterID src
)
200 void popToRestore(RegisterID dest
)
204 void pushToSave(FPRegisterID src
)
206 subPtr(TrustedImm32(sizeof(double)), stackPointerRegister
);
207 storeDouble(src
, stackPointerRegister
);
209 void popToRestore(FPRegisterID dest
)
211 loadDouble(stackPointerRegister
, dest
);
212 addPtr(TrustedImm32(sizeof(double)), stackPointerRegister
);
214 #endif // !CPU(ARM64)
216 #if CPU(X86_64) || CPU(ARM64)
217 void peek64(RegisterID dest
, int index
= 0)
219 load64(Address(stackPointerRegister
, (index
* sizeof(void*))), dest
);
222 void poke(TrustedImm64 value
, int index
= 0)
224 store64(value
, addressForPoke(index
));
227 void poke64(RegisterID src
, int index
= 0)
229 store64(src
, addressForPoke(index
));
234 void poke(FPRegisterID src
, int index
= 0)
236 ASSERT(!(index
& 1));
237 storeDouble(src
, addressForPoke(index
));
241 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
242 void branchPtr(RelationalCondition cond
, RegisterID op1
, TrustedImmPtr imm
, Label target
)
244 branchPtr(cond
, op1
, imm
).linkTo(target
, this);
246 void branchPtr(RelationalCondition cond
, RegisterID op1
, ImmPtr imm
, Label target
)
248 branchPtr(cond
, op1
, imm
).linkTo(target
, this);
251 void branch32(RelationalCondition cond
, RegisterID op1
, RegisterID op2
, Label target
)
253 branch32(cond
, op1
, op2
).linkTo(target
, this);
256 void branch32(RelationalCondition cond
, RegisterID op1
, TrustedImm32 imm
, Label target
)
258 branch32(cond
, op1
, imm
).linkTo(target
, this);
261 void branch32(RelationalCondition cond
, RegisterID op1
, Imm32 imm
, Label target
)
263 branch32(cond
, op1
, imm
).linkTo(target
, this);
266 void branch32(RelationalCondition cond
, RegisterID left
, Address right
, Label target
)
268 branch32(cond
, left
, right
).linkTo(target
, this);
271 Jump
branch32(RelationalCondition cond
, TrustedImm32 left
, RegisterID right
)
273 return branch32(commute(cond
), right
, left
);
276 Jump
branch32(RelationalCondition cond
, Imm32 left
, RegisterID right
)
278 return branch32(commute(cond
), right
, left
);
281 void branchTestPtr(ResultCondition cond
, RegisterID reg
, Label target
)
283 branchTestPtr(cond
, reg
).linkTo(target
, this);
286 #if !CPU(ARM_THUMB2) && !CPU(ARM64)
287 PatchableJump
patchableBranchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
= TrustedImmPtr(0))
289 return PatchableJump(branchPtr(cond
, left
, right
));
292 PatchableJump
patchableBranchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
294 return PatchableJump(branchPtrWithPatch(cond
, left
, dataLabel
, initialRightValue
));
297 PatchableJump
patchableJump()
299 return PatchableJump(jump());
302 PatchableJump
patchableBranchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
304 return PatchableJump(branchTest32(cond
, reg
, mask
));
307 PatchableJump
patchableBranch32(RelationalCondition cond
, RegisterID reg
, TrustedImm32 imm
)
309 return PatchableJump(branch32(cond
, reg
, imm
));
313 void jump(Label target
)
315 jump().linkTo(target
, this);
318 // Commute a relational condition, returns a new condition that will produce
319 // the same results given the same inputs but with their positions exchanged.
320 static RelationalCondition
commute(RelationalCondition condition
)
333 case GreaterThanOrEqual
:
334 return LessThanOrEqual
;
337 case LessThanOrEqual
:
338 return GreaterThanOrEqual
;
343 ASSERT(condition
== Equal
|| condition
== NotEqual
);
347 static const unsigned BlindingModulus
= 64;
348 bool shouldConsiderBlinding()
350 return !(random() & (BlindingModulus
- 1));
354 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
355 // FIXME: should this use a test for 32-bitness instead of this specific exception?
356 #if !CPU(X86_64) && !CPU(ARM64)
357 void addPtr(Address src
, RegisterID dest
)
362 void addPtr(AbsoluteAddress src
, RegisterID dest
)
367 void addPtr(RegisterID src
, RegisterID dest
)
372 void addPtr(TrustedImm32 imm
, RegisterID srcDest
)
377 void addPtr(TrustedImmPtr imm
, RegisterID dest
)
379 add32(TrustedImm32(imm
), dest
);
382 void addPtr(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
384 add32(imm
, src
, dest
);
387 void addPtr(TrustedImm32 imm
, AbsoluteAddress address
)
392 void andPtr(RegisterID src
, RegisterID dest
)
397 void andPtr(TrustedImm32 imm
, RegisterID srcDest
)
402 void negPtr(RegisterID dest
)
407 void orPtr(RegisterID src
, RegisterID dest
)
412 void orPtr(RegisterID op1
, RegisterID op2
, RegisterID dest
)
414 or32(op1
, op2
, dest
);
417 void orPtr(TrustedImmPtr imm
, RegisterID dest
)
419 or32(TrustedImm32(imm
), dest
);
422 void orPtr(TrustedImm32 imm
, RegisterID dest
)
427 void subPtr(RegisterID src
, RegisterID dest
)
432 void subPtr(TrustedImm32 imm
, RegisterID dest
)
437 void subPtr(TrustedImmPtr imm
, RegisterID dest
)
439 sub32(TrustedImm32(imm
), dest
);
442 void xorPtr(RegisterID src
, RegisterID dest
)
447 void xorPtr(TrustedImm32 imm
, RegisterID srcDest
)
453 void loadPtr(ImplicitAddress address
, RegisterID dest
)
455 load32(address
, dest
);
458 void loadPtr(BaseIndex address
, RegisterID dest
)
460 load32(address
, dest
);
463 void loadPtr(const void* address
, RegisterID dest
)
465 load32(address
, dest
);
468 DataLabel32
loadPtrWithAddressOffsetPatch(Address address
, RegisterID dest
)
470 return load32WithAddressOffsetPatch(address
, dest
);
473 DataLabelCompact
loadPtrWithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
475 return load32WithCompactAddressOffsetPatch(address
, dest
);
478 void move(ImmPtr imm
, RegisterID dest
)
480 move(Imm32(imm
.asTrustedImmPtr()), dest
);
483 void comparePtr(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
485 compare32(cond
, left
, right
, dest
);
488 void storePtr(RegisterID src
, ImplicitAddress address
)
490 store32(src
, address
);
493 void storePtr(RegisterID src
, BaseIndex address
)
495 store32(src
, address
);
498 void storePtr(RegisterID src
, void* address
)
500 store32(src
, address
);
503 void storePtr(TrustedImmPtr imm
, ImplicitAddress address
)
505 store32(TrustedImm32(imm
), address
);
508 void storePtr(ImmPtr imm
, Address address
)
510 store32(Imm32(imm
.asTrustedImmPtr()), address
);
513 void storePtr(TrustedImmPtr imm
, void* address
)
515 store32(TrustedImm32(imm
), address
);
518 DataLabel32
storePtrWithAddressOffsetPatch(RegisterID src
, Address address
)
520 return store32WithAddressOffsetPatch(src
, address
);
523 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, RegisterID right
)
525 return branch32(cond
, left
, right
);
528 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, TrustedImmPtr right
)
530 return branch32(cond
, left
, TrustedImm32(right
));
533 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, ImmPtr right
)
535 return branch32(cond
, left
, Imm32(right
.asTrustedImmPtr()));
538 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, Address right
)
540 return branch32(cond
, left
, right
);
543 Jump
branchPtr(RelationalCondition cond
, Address left
, RegisterID right
)
545 return branch32(cond
, left
, right
);
548 Jump
branchPtr(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
550 return branch32(cond
, left
, right
);
553 Jump
branchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
)
555 return branch32(cond
, left
, TrustedImm32(right
));
558 Jump
branchPtr(RelationalCondition cond
, AbsoluteAddress left
, TrustedImmPtr right
)
560 return branch32(cond
, left
, TrustedImm32(right
));
563 Jump
branchSubPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
565 return branchSub32(cond
, src
, dest
);
568 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
570 return branchTest32(cond
, reg
, mask
);
573 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
575 return branchTest32(cond
, reg
, mask
);
578 Jump
branchTestPtr(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
580 return branchTest32(cond
, address
, mask
);
583 Jump
branchTestPtr(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
585 return branchTest32(cond
, address
, mask
);
588 Jump
branchAddPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
590 return branchAdd32(cond
, src
, dest
);
593 Jump
branchSubPtr(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
595 return branchSub32(cond
, imm
, dest
);
597 using MacroAssemblerBase::branchTest8
;
598 Jump
branchTest8(ResultCondition cond
, ExtendedAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
600 return MacroAssemblerBase::branchTest8(cond
, Address(address
.base
, address
.offset
), mask
);
603 void addPtr(RegisterID src
, RegisterID dest
)
608 void addPtr(Address src
, RegisterID dest
)
613 void addPtr(TrustedImm32 imm
, RegisterID srcDest
)
618 void addPtr(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
620 add64(imm
, src
, dest
);
623 void addPtr(TrustedImm32 imm
, Address address
)
628 void addPtr(AbsoluteAddress src
, RegisterID dest
)
633 void addPtr(TrustedImmPtr imm
, RegisterID dest
)
635 add64(TrustedImm64(imm
), dest
);
638 void addPtr(TrustedImm32 imm
, AbsoluteAddress address
)
643 void andPtr(RegisterID src
, RegisterID dest
)
648 void andPtr(TrustedImm32 imm
, RegisterID srcDest
)
653 void negPtr(RegisterID dest
)
658 void orPtr(RegisterID src
, RegisterID dest
)
663 void orPtr(TrustedImm32 imm
, RegisterID dest
)
668 void orPtr(TrustedImmPtr imm
, RegisterID dest
)
670 or64(TrustedImm64(imm
), dest
);
673 void orPtr(RegisterID op1
, RegisterID op2
, RegisterID dest
)
675 or64(op1
, op2
, dest
);
678 void orPtr(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
680 or64(imm
, src
, dest
);
683 void rotateRightPtr(TrustedImm32 imm
, RegisterID srcDst
)
685 rotateRight64(imm
, srcDst
);
688 void subPtr(RegisterID src
, RegisterID dest
)
693 void subPtr(TrustedImm32 imm
, RegisterID dest
)
698 void subPtr(TrustedImmPtr imm
, RegisterID dest
)
700 sub64(TrustedImm64(imm
), dest
);
703 void xorPtr(RegisterID src
, RegisterID dest
)
708 void xorPtr(RegisterID src
, Address dest
)
713 void xorPtr(TrustedImm32 imm
, RegisterID srcDest
)
718 void loadPtr(ImplicitAddress address
, RegisterID dest
)
720 load64(address
, dest
);
723 void loadPtr(BaseIndex address
, RegisterID dest
)
725 load64(address
, dest
);
728 void loadPtr(const void* address
, RegisterID dest
)
730 load64(address
, dest
);
733 DataLabel32
loadPtrWithAddressOffsetPatch(Address address
, RegisterID dest
)
735 return load64WithAddressOffsetPatch(address
, dest
);
738 DataLabelCompact
loadPtrWithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
740 return load64WithCompactAddressOffsetPatch(address
, dest
);
743 void storePtr(RegisterID src
, ImplicitAddress address
)
745 store64(src
, address
);
748 void storePtr(RegisterID src
, BaseIndex address
)
750 store64(src
, address
);
753 void storePtr(RegisterID src
, void* address
)
755 store64(src
, address
);
758 void storePtr(TrustedImmPtr imm
, ImplicitAddress address
)
760 store64(TrustedImm64(imm
), address
);
763 void storePtr(TrustedImmPtr imm
, BaseIndex address
)
765 store64(TrustedImm64(imm
), address
);
768 DataLabel32
storePtrWithAddressOffsetPatch(RegisterID src
, Address address
)
770 return store64WithAddressOffsetPatch(src
, address
);
773 void comparePtr(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
775 compare64(cond
, left
, right
, dest
);
778 void comparePtr(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
780 compare64(cond
, left
, right
, dest
);
783 void testPtr(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
, RegisterID dest
)
785 test64(cond
, reg
, mask
, dest
);
788 void testPtr(ResultCondition cond
, RegisterID reg
, RegisterID mask
, RegisterID dest
)
790 test64(cond
, reg
, mask
, dest
);
793 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, RegisterID right
)
795 return branch64(cond
, left
, right
);
798 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, TrustedImmPtr right
)
800 return branch64(cond
, left
, TrustedImm64(right
));
803 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, Address right
)
805 return branch64(cond
, left
, right
);
808 Jump
branchPtr(RelationalCondition cond
, Address left
, RegisterID right
)
810 return branch64(cond
, left
, right
);
813 Jump
branchPtr(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
815 return branch64(cond
, left
, right
);
818 Jump
branchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
)
820 return branch64(cond
, left
, TrustedImm64(right
));
823 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
825 return branchTest64(cond
, reg
, mask
);
828 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
830 return branchTest64(cond
, reg
, mask
);
833 Jump
branchTestPtr(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
835 return branchTest64(cond
, address
, mask
);
838 Jump
branchTestPtr(ResultCondition cond
, Address address
, RegisterID reg
)
840 return branchTest64(cond
, address
, reg
);
843 Jump
branchTestPtr(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
845 return branchTest64(cond
, address
, mask
);
848 Jump
branchTestPtr(ResultCondition cond
, AbsoluteAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
850 return branchTest64(cond
, address
, mask
);
853 Jump
branchAddPtr(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
855 return branchAdd64(cond
, imm
, dest
);
858 Jump
branchAddPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
860 return branchAdd64(cond
, src
, dest
);
863 Jump
branchSubPtr(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
865 return branchSub64(cond
, imm
, dest
);
868 Jump
branchSubPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
870 return branchSub64(cond
, src
, dest
);
873 Jump
branchSubPtr(ResultCondition cond
, RegisterID src1
, TrustedImm32 src2
, RegisterID dest
)
875 return branchSub64(cond
, src1
, src2
, dest
);
878 #if ENABLE(JIT_CONSTANT_BLINDING)
879 using MacroAssemblerBase::and64
;
880 using MacroAssemblerBase::convertInt32ToDouble
;
881 using MacroAssemblerBase::store64
;
882 bool shouldBlindDouble(double value
)
884 // Don't trust NaN or +/-Infinity
885 if (!std::isfinite(value
))
886 return shouldConsiderBlinding();
888 // Try to force normalisation, and check that there's no change
889 // in the bit pattern
890 if (bitwise_cast
<uint64_t>(value
* 1.0) != bitwise_cast
<uint64_t>(value
))
891 return shouldConsiderBlinding();
894 // Only allow a limited set of fractional components
895 double scaledValue
= value
* 8;
896 if (scaledValue
/ 8 != value
)
897 return shouldConsiderBlinding();
898 double frac
= scaledValue
- floor(scaledValue
);
900 return shouldConsiderBlinding();
905 bool shouldBlind(ImmPtr imm
)
907 #if ENABLE(FORCED_JIT_BLINDING)
909 // Debug always blind all constants, if only so we know
910 // if we've broken blinding during patch development.
914 // First off we'll special case common, "safe" values to avoid hurting
915 // performance too much
916 uintptr_t value
= imm
.asTrustedImmPtr().asIntptr();
922 case 0xffffffffffffL
:
923 case 0xffffffffffffffL
:
924 case 0xffffffffffffffffL
:
934 if (!shouldConsiderBlinding())
937 return shouldBlindForSpecificArch(value
);
940 struct RotatedImmPtr
{
941 RotatedImmPtr(uintptr_t v1
, uint8_t v2
)
947 TrustedImm32 rotation
;
950 RotatedImmPtr
rotationBlindConstant(ImmPtr imm
)
952 uint8_t rotation
= random() % (sizeof(void*) * 8);
953 uintptr_t value
= imm
.asTrustedImmPtr().asIntptr();
954 value
= (value
<< rotation
) | (value
>> (sizeof(void*) * 8 - rotation
));
955 return RotatedImmPtr(value
, rotation
);
958 void loadRotationBlindedConstant(RotatedImmPtr constant
, RegisterID dest
)
960 move(constant
.value
, dest
);
961 rotateRightPtr(constant
.rotation
, dest
);
964 bool shouldBlind(Imm64 imm
)
966 #if ENABLE(FORCED_JIT_BLINDING)
968 // Debug always blind all constants, if only so we know
969 // if we've broken blinding during patch development.
973 // First off we'll special case common, "safe" values to avoid hurting
974 // performance too much
975 uint64_t value
= imm
.asTrustedImm64().m_value
;
981 case 0xffffffffffffL
:
982 case 0xffffffffffffffL
:
983 case 0xffffffffffffffffL
:
991 JSValue jsValue
= JSValue::decode(value
);
992 if (jsValue
.isInt32())
993 return shouldBlind(Imm32(jsValue
.asInt32()));
994 if (jsValue
.isDouble() && !shouldBlindDouble(jsValue
.asDouble()))
997 if (!shouldBlindDouble(bitwise_cast
<double>(value
)))
1002 if (!shouldConsiderBlinding())
1005 return shouldBlindForSpecificArch(value
);
1008 struct RotatedImm64
{
1009 RotatedImm64(uint64_t v1
, uint8_t v2
)
1015 TrustedImm32 rotation
;
1018 RotatedImm64
rotationBlindConstant(Imm64 imm
)
1020 uint8_t rotation
= random() % (sizeof(int64_t) * 8);
1021 uint64_t value
= imm
.asTrustedImm64().m_value
;
1022 value
= (value
<< rotation
) | (value
>> (sizeof(int64_t) * 8 - rotation
));
1023 return RotatedImm64(value
, rotation
);
1026 void loadRotationBlindedConstant(RotatedImm64 constant
, RegisterID dest
)
1028 move(constant
.value
, dest
);
1029 rotateRight64(constant
.rotation
, dest
);
1032 void convertInt32ToDouble(Imm32 imm
, FPRegisterID dest
)
1034 if (shouldBlind(imm
)) {
1035 RegisterID scratchRegister
= scratchRegisterForBlinding();
1036 loadXorBlindedConstant(xorBlindConstant(imm
), scratchRegister
);
1037 convertInt32ToDouble(scratchRegister
, dest
);
1039 convertInt32ToDouble(imm
.asTrustedImm32(), dest
);
1042 void move(ImmPtr imm
, RegisterID dest
)
1044 if (shouldBlind(imm
))
1045 loadRotationBlindedConstant(rotationBlindConstant(imm
), dest
);
1047 move(imm
.asTrustedImmPtr(), dest
);
1050 void move(Imm64 imm
, RegisterID dest
)
1052 if (shouldBlind(imm
))
1053 loadRotationBlindedConstant(rotationBlindConstant(imm
), dest
);
1055 move(imm
.asTrustedImm64(), dest
);
1058 void and64(Imm32 imm
, RegisterID dest
)
1060 if (shouldBlind(imm
)) {
1061 BlindedImm32 key
= andBlindedConstant(imm
);
1062 and64(key
.value1
, dest
);
1063 and64(key
.value2
, dest
);
1065 and64(imm
.asTrustedImm32(), dest
);
1068 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, ImmPtr right
)
1070 if (shouldBlind(right
)) {
1071 RegisterID scratchRegister
= scratchRegisterForBlinding();
1072 loadRotationBlindedConstant(rotationBlindConstant(right
), scratchRegister
);
1073 return branchPtr(cond
, left
, scratchRegister
);
1075 return branchPtr(cond
, left
, right
.asTrustedImmPtr());
1078 void storePtr(ImmPtr imm
, Address dest
)
1080 if (shouldBlind(imm
)) {
1081 RegisterID scratchRegister
= scratchRegisterForBlinding();
1082 loadRotationBlindedConstant(rotationBlindConstant(imm
), scratchRegister
);
1083 storePtr(scratchRegister
, dest
);
1085 storePtr(imm
.asTrustedImmPtr(), dest
);
1088 void store64(Imm64 imm
, Address dest
)
1090 if (shouldBlind(imm
)) {
1091 RegisterID scratchRegister
= scratchRegisterForBlinding();
1092 loadRotationBlindedConstant(rotationBlindConstant(imm
), scratchRegister
);
1093 store64(scratchRegister
, dest
);
1095 store64(imm
.asTrustedImm64(), dest
);
1100 #endif // !CPU(X86_64)
1102 #if ENABLE(JIT_CONSTANT_BLINDING)
1103 bool shouldBlind(Imm32 imm
)
1105 #if ENABLE(FORCED_JIT_BLINDING)
1107 // Debug always blind all constants, if only so we know
1108 // if we've broken blinding during patch development.
1112 // First off we'll special case common, "safe" values to avoid hurting
1113 // performance too much
1114 uint32_t value
= imm
.asTrustedImm32().m_value
;
1127 if (!shouldConsiderBlinding())
1130 return shouldBlindForSpecificArch(value
);
1134 struct BlindedImm32
{
1135 BlindedImm32(int32_t v1
, int32_t v2
)
1140 TrustedImm32 value1
;
1141 TrustedImm32 value2
;
1144 uint32_t keyForConstant(uint32_t value
, uint32_t& mask
)
1146 uint32_t key
= random();
1149 else if (value
<= 0xffff)
1151 else if (value
<= 0xffffff)
1158 uint32_t keyForConstant(uint32_t value
)
1161 return keyForConstant(value
, mask
);
1164 BlindedImm32
xorBlindConstant(Imm32 imm
)
1166 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1167 uint32_t key
= keyForConstant(baseValue
);
1168 return BlindedImm32(baseValue
^ key
, key
);
1171 BlindedImm32
additionBlindedConstant(Imm32 imm
)
1173 // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
1174 static uint32_t maskTable
[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
1176 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1177 uint32_t key
= keyForConstant(baseValue
) & maskTable
[baseValue
& 3];
1178 if (key
> baseValue
)
1179 key
= key
- baseValue
;
1180 return BlindedImm32(baseValue
- key
, key
);
1183 BlindedImm32
andBlindedConstant(Imm32 imm
)
1185 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1187 uint32_t key
= keyForConstant(baseValue
, mask
);
1188 ASSERT((baseValue
& mask
) == baseValue
);
1189 return BlindedImm32(((baseValue
& key
) | ~key
) & mask
, ((baseValue
& ~key
) | key
) & mask
);
1192 BlindedImm32
orBlindedConstant(Imm32 imm
)
1194 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
1196 uint32_t key
= keyForConstant(baseValue
, mask
);
1197 ASSERT((baseValue
& mask
) == baseValue
);
1198 return BlindedImm32((baseValue
& key
) & mask
, (baseValue
& ~key
) & mask
);
1201 void loadXorBlindedConstant(BlindedImm32 constant
, RegisterID dest
)
1203 move(constant
.value1
, dest
);
1204 xor32(constant
.value2
, dest
);
1207 void add32(Imm32 imm
, RegisterID dest
)
1209 if (shouldBlind(imm
)) {
1210 BlindedImm32 key
= additionBlindedConstant(imm
);
1211 add32(key
.value1
, dest
);
1212 add32(key
.value2
, dest
);
1214 add32(imm
.asTrustedImm32(), dest
);
1217 void addPtr(Imm32 imm
, RegisterID dest
)
1219 if (shouldBlind(imm
)) {
1220 BlindedImm32 key
= additionBlindedConstant(imm
);
1221 addPtr(key
.value1
, dest
);
1222 addPtr(key
.value2
, dest
);
1224 addPtr(imm
.asTrustedImm32(), dest
);
1227 void and32(Imm32 imm
, RegisterID dest
)
1229 if (shouldBlind(imm
)) {
1230 BlindedImm32 key
= andBlindedConstant(imm
);
1231 and32(key
.value1
, dest
);
1232 and32(key
.value2
, dest
);
1234 and32(imm
.asTrustedImm32(), dest
);
1237 void andPtr(Imm32 imm
, RegisterID dest
)
1239 if (shouldBlind(imm
)) {
1240 BlindedImm32 key
= andBlindedConstant(imm
);
1241 andPtr(key
.value1
, dest
);
1242 andPtr(key
.value2
, dest
);
1244 andPtr(imm
.asTrustedImm32(), dest
);
1247 void and32(Imm32 imm
, RegisterID src
, RegisterID dest
)
1249 if (shouldBlind(imm
)) {
1251 return and32(imm
.asTrustedImm32(), dest
);
1252 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1255 and32(imm
.asTrustedImm32(), src
, dest
);
1258 void move(Imm32 imm
, RegisterID dest
)
1260 if (shouldBlind(imm
))
1261 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1263 move(imm
.asTrustedImm32(), dest
);
1266 void or32(Imm32 imm
, RegisterID src
, RegisterID dest
)
1268 if (shouldBlind(imm
)) {
1270 return or32(imm
, dest
);
1271 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1274 or32(imm
.asTrustedImm32(), src
, dest
);
1277 void or32(Imm32 imm
, RegisterID dest
)
1279 if (shouldBlind(imm
)) {
1280 BlindedImm32 key
= orBlindedConstant(imm
);
1281 or32(key
.value1
, dest
);
1282 or32(key
.value2
, dest
);
1284 or32(imm
.asTrustedImm32(), dest
);
1287 void poke(Imm32 value
, int index
= 0)
1289 store32(value
, addressForPoke(index
));
1292 void poke(ImmPtr value
, int index
= 0)
1294 storePtr(value
, addressForPoke(index
));
1297 #if CPU(X86_64) || CPU(ARM64)
1298 void poke(Imm64 value
, int index
= 0)
1300 store64(value
, addressForPoke(index
));
1304 void store32(Imm32 imm
, Address dest
)
1306 if (shouldBlind(imm
)) {
1307 #if CPU(X86) || CPU(X86_64)
1308 BlindedImm32 blind
= xorBlindConstant(imm
);
1309 store32(blind
.value1
, dest
);
1310 xor32(blind
.value2
, dest
);
1312 if (RegisterID scratchRegister
= (RegisterID
)scratchRegisterForBlinding()) {
1313 loadXorBlindedConstant(xorBlindConstant(imm
), scratchRegister
);
1314 store32(scratchRegister
, dest
);
1316 // If we don't have a scratch register available for use, we'll just
1317 // place a random number of nops.
1318 uint32_t nopCount
= random() & 3;
1321 store32(imm
.asTrustedImm32(), dest
);
1325 store32(imm
.asTrustedImm32(), dest
);
1328 void sub32(Imm32 imm
, RegisterID dest
)
1330 if (shouldBlind(imm
)) {
1331 BlindedImm32 key
= additionBlindedConstant(imm
);
1332 sub32(key
.value1
, dest
);
1333 sub32(key
.value2
, dest
);
1335 sub32(imm
.asTrustedImm32(), dest
);
1338 void subPtr(Imm32 imm
, RegisterID dest
)
1340 if (shouldBlind(imm
)) {
1341 BlindedImm32 key
= additionBlindedConstant(imm
);
1342 subPtr(key
.value1
, dest
);
1343 subPtr(key
.value2
, dest
);
1345 subPtr(imm
.asTrustedImm32(), dest
);
1348 void xor32(Imm32 imm
, RegisterID src
, RegisterID dest
)
1350 if (shouldBlind(imm
)) {
1351 BlindedImm32 blind
= xorBlindConstant(imm
);
1352 xor32(blind
.value1
, src
, dest
);
1353 xor32(blind
.value2
, dest
);
1355 xor32(imm
.asTrustedImm32(), src
, dest
);
1358 void xor32(Imm32 imm
, RegisterID dest
)
1360 if (shouldBlind(imm
)) {
1361 BlindedImm32 blind
= xorBlindConstant(imm
);
1362 xor32(blind
.value1
, dest
);
1363 xor32(blind
.value2
, dest
);
1365 xor32(imm
.asTrustedImm32(), dest
);
1368 Jump
branch32(RelationalCondition cond
, RegisterID left
, Imm32 right
)
1370 if (shouldBlind(right
)) {
1371 if (RegisterID scratchRegister
= (RegisterID
)scratchRegisterForBlinding()) {
1372 loadXorBlindedConstant(xorBlindConstant(right
), scratchRegister
);
1373 return branch32(cond
, left
, scratchRegister
);
1375 // If we don't have a scratch register available for use, we'll just
1376 // place a random number of nops.
1377 uint32_t nopCount
= random() & 3;
1380 return branch32(cond
, left
, right
.asTrustedImm32());
1383 return branch32(cond
, left
, right
.asTrustedImm32());
1386 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, Imm32 imm
, RegisterID dest
)
1389 ASSERT(scratchRegisterForBlinding());
1391 if (shouldBlind(imm
)) {
1393 if (RegisterID scratchRegister
= (RegisterID
)scratchRegisterForBlinding()) {
1394 move(src
, scratchRegister
);
1395 src
= scratchRegister
;
1398 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1399 return branchAdd32(cond
, src
, dest
);
1401 return branchAdd32(cond
, src
, imm
.asTrustedImm32(), dest
);
1404 Jump
branchMul32(ResultCondition cond
, Imm32 imm
, RegisterID src
, RegisterID dest
)
1407 ASSERT(scratchRegisterForBlinding());
1409 if (shouldBlind(imm
)) {
1411 if (RegisterID scratchRegister
= (RegisterID
)scratchRegisterForBlinding()) {
1412 move(src
, scratchRegister
);
1413 src
= scratchRegister
;
1416 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
1417 return branchMul32(cond
, src
, dest
);
1419 return branchMul32(cond
, imm
.asTrustedImm32(), src
, dest
);
1422 // branchSub32 takes a scratch register as 32 bit platforms make use of this,
1423 // with src == dst, and on x86-32 we don't have a platform scratch register.
1424 Jump
branchSub32(ResultCondition cond
, RegisterID src
, Imm32 imm
, RegisterID dest
, RegisterID scratch
)
1426 if (shouldBlind(imm
)) {
1427 ASSERT(scratch
!= dest
);
1428 ASSERT(scratch
!= src
);
1429 loadXorBlindedConstant(xorBlindConstant(imm
), scratch
);
1430 return branchSub32(cond
, src
, scratch
, dest
);
1432 return branchSub32(cond
, src
, imm
.asTrustedImm32(), dest
);
1435 // Immediate shifts only have 5 controllable bits
1436 // so we'll consider them safe for now.
1437 TrustedImm32
trustedImm32ForShift(Imm32 imm
)
1439 return TrustedImm32(imm
.asTrustedImm32().m_value
& 31);
1442 void lshift32(Imm32 imm
, RegisterID dest
)
1444 lshift32(trustedImm32ForShift(imm
), dest
);
1447 void lshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
1449 lshift32(src
, trustedImm32ForShift(amount
), dest
);
1452 void rshift32(Imm32 imm
, RegisterID dest
)
1454 rshift32(trustedImm32ForShift(imm
), dest
);
1457 void rshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
1459 rshift32(src
, trustedImm32ForShift(amount
), dest
);
1462 void urshift32(Imm32 imm
, RegisterID dest
)
1464 urshift32(trustedImm32ForShift(imm
), dest
);
1467 void urshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
1469 urshift32(src
, trustedImm32ForShift(amount
), dest
);
1476 #else // ENABLE(ASSEMBLER)
1478 // If there is no assembler for this platform, at least allow code to make references to
1479 // some of the things it would otherwise define, albeit without giving that code any way
1480 // of doing anything useful.
1481 class MacroAssembler
{
1483 MacroAssembler() { }
1487 enum RegisterID
{ NoRegister
};
1488 enum FPRegisterID
{ NoFPRegister
};
1491 #endif // ENABLE(ASSEMBLER)
1493 #endif // MacroAssembler_h