2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssembler_h
27 #define MacroAssembler_h
32 #include "MacroAssemblerARMv7.h"
33 namespace JSC
{ typedef MacroAssemblerARMv7 MacroAssemblerBase
; };
35 #elif CPU(ARM_TRADITIONAL)
36 #include "MacroAssemblerARM.h"
37 namespace JSC
{ typedef MacroAssemblerARM MacroAssemblerBase
; };
40 #include "MacroAssemblerMIPS.h"
42 typedef MacroAssemblerMIPS MacroAssemblerBase
;
46 #include "MacroAssemblerX86.h"
47 namespace JSC
{ typedef MacroAssemblerX86 MacroAssemblerBase
; };
50 #include "MacroAssemblerX86_64.h"
51 namespace JSC
{ typedef MacroAssemblerX86_64 MacroAssemblerBase
; };
54 #include "MacroAssemblerSH4.h"
56 typedef MacroAssemblerSH4 MacroAssemblerBase
;
60 #error "The MacroAssembler is not supported on this platform."
65 class MacroAssembler
: public MacroAssemblerBase
{
68 using MacroAssemblerBase::pop
;
69 using MacroAssemblerBase::jump
;
70 using MacroAssemblerBase::branch32
;
72 using MacroAssemblerBase::branchPtr
;
73 using MacroAssemblerBase::branchTestPtr
;
75 using MacroAssemblerBase::move
;
77 #if ENABLE(JIT_CONSTANT_BLINDING)
78 using MacroAssemblerBase::add32
;
79 using MacroAssemblerBase::and32
;
80 using MacroAssemblerBase::branchAdd32
;
81 using MacroAssemblerBase::branchMul32
;
82 using MacroAssemblerBase::branchSub32
;
83 using MacroAssemblerBase::lshift32
;
84 using MacroAssemblerBase::or32
;
85 using MacroAssemblerBase::rshift32
;
86 using MacroAssemblerBase::store32
;
87 using MacroAssemblerBase::sub32
;
88 using MacroAssemblerBase::urshift32
;
89 using MacroAssemblerBase::xor32
;
92 // Utilities used by the DFG JIT.
94 using MacroAssemblerBase::invert
;
96 static DoubleCondition
invert(DoubleCondition cond
)
100 return DoubleNotEqualOrUnordered
;
102 return DoubleEqualOrUnordered
;
103 case DoubleGreaterThan
:
104 return DoubleLessThanOrEqualOrUnordered
;
105 case DoubleGreaterThanOrEqual
:
106 return DoubleLessThanOrUnordered
;
108 return DoubleGreaterThanOrEqualOrUnordered
;
109 case DoubleLessThanOrEqual
:
110 return DoubleGreaterThanOrUnordered
;
111 case DoubleEqualOrUnordered
:
112 return DoubleNotEqual
;
113 case DoubleNotEqualOrUnordered
:
115 case DoubleGreaterThanOrUnordered
:
116 return DoubleLessThanOrEqual
;
117 case DoubleGreaterThanOrEqualOrUnordered
:
118 return DoubleLessThan
;
119 case DoubleLessThanOrUnordered
:
120 return DoubleGreaterThanOrEqual
;
121 case DoubleLessThanOrEqualOrUnordered
:
122 return DoubleGreaterThan
;
124 ASSERT_NOT_REACHED();
125 return DoubleEqual
; // make compiler happy
129 static bool isInvertible(ResultCondition cond
)
140 static ResultCondition
invert(ResultCondition cond
)
148 ASSERT_NOT_REACHED();
149 return Zero
; // Make compiler happy for release builds.
154 // Platform agnostic onvenience functions,
155 // described in terms of other macro assembly methods.
158 addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister
);
161 void peek(RegisterID dest
, int index
= 0)
163 loadPtr(Address(stackPointerRegister
, (index
* sizeof(void*))), dest
);
166 Address
addressForPoke(int index
)
168 return Address(stackPointerRegister
, (index
* sizeof(void*)));
171 void poke(RegisterID src
, int index
= 0)
173 storePtr(src
, addressForPoke(index
));
176 void poke(TrustedImm32 value
, int index
= 0)
178 store32(value
, addressForPoke(index
));
181 void poke(TrustedImmPtr imm
, int index
= 0)
183 storePtr(imm
, addressForPoke(index
));
187 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
188 void branchPtr(RelationalCondition cond
, RegisterID op1
, TrustedImmPtr imm
, Label target
)
190 branchPtr(cond
, op1
, imm
).linkTo(target
, this);
192 void branchPtr(RelationalCondition cond
, RegisterID op1
, ImmPtr imm
, Label target
)
194 branchPtr(cond
, op1
, imm
).linkTo(target
, this);
197 void branch32(RelationalCondition cond
, RegisterID op1
, RegisterID op2
, Label target
)
199 branch32(cond
, op1
, op2
).linkTo(target
, this);
202 void branch32(RelationalCondition cond
, RegisterID op1
, TrustedImm32 imm
, Label target
)
204 branch32(cond
, op1
, imm
).linkTo(target
, this);
207 void branch32(RelationalCondition cond
, RegisterID op1
, Imm32 imm
, Label target
)
209 branch32(cond
, op1
, imm
).linkTo(target
, this);
212 void branch32(RelationalCondition cond
, RegisterID left
, Address right
, Label target
)
214 branch32(cond
, left
, right
).linkTo(target
, this);
217 Jump
branch32(RelationalCondition cond
, TrustedImm32 left
, RegisterID right
)
219 return branch32(commute(cond
), right
, left
);
222 Jump
branch32(RelationalCondition cond
, Imm32 left
, RegisterID right
)
224 return branch32(commute(cond
), right
, left
);
227 void branchTestPtr(ResultCondition cond
, RegisterID reg
, Label target
)
229 branchTestPtr(cond
, reg
).linkTo(target
, this);
233 PatchableJump
patchableBranchPtrWithPatch(RelationalCondition cond
, Address left
, DataLabelPtr
& dataLabel
, TrustedImmPtr initialRightValue
= TrustedImmPtr(0))
235 return PatchableJump(branchPtrWithPatch(cond
, left
, dataLabel
, initialRightValue
));
238 PatchableJump
patchableJump()
240 return PatchableJump(jump());
244 void jump(Label target
)
246 jump().linkTo(target
, this);
249 // Commute a relational condition, returns a new condition that will produce
250 // the same results given the same inputs but with their positions exchanged.
251 static RelationalCondition
commute(RelationalCondition condition
)
264 case GreaterThanOrEqual
:
265 return LessThanOrEqual
;
268 case LessThanOrEqual
:
269 return GreaterThanOrEqual
;
274 ASSERT(condition
== Equal
|| condition
== NotEqual
);
280 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
281 // FIXME: should this use a test for 32-bitness instead of this specific exception?
283 void addPtr(RegisterID src
, RegisterID dest
)
288 void addPtr(TrustedImm32 imm
, RegisterID srcDest
)
293 void addPtr(TrustedImmPtr imm
, RegisterID dest
)
295 add32(TrustedImm32(imm
), dest
);
298 void addPtr(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
300 add32(imm
, src
, dest
);
303 void addPtr(TrustedImm32 imm
, AbsoluteAddress address
)
308 void andPtr(RegisterID src
, RegisterID dest
)
313 void andPtr(TrustedImm32 imm
, RegisterID srcDest
)
318 void orPtr(RegisterID src
, RegisterID dest
)
323 void orPtr(RegisterID op1
, RegisterID op2
, RegisterID dest
)
325 or32(op1
, op2
, dest
);
328 void orPtr(TrustedImmPtr imm
, RegisterID dest
)
330 or32(TrustedImm32(imm
), dest
);
333 void orPtr(TrustedImm32 imm
, RegisterID dest
)
338 void subPtr(RegisterID src
, RegisterID dest
)
343 void subPtr(TrustedImm32 imm
, RegisterID dest
)
348 void subPtr(TrustedImmPtr imm
, RegisterID dest
)
350 sub32(TrustedImm32(imm
), dest
);
353 void xorPtr(RegisterID src
, RegisterID dest
)
358 void xorPtr(TrustedImm32 imm
, RegisterID srcDest
)
364 void loadPtr(ImplicitAddress address
, RegisterID dest
)
366 load32(address
, dest
);
369 void loadPtr(BaseIndex address
, RegisterID dest
)
371 load32(address
, dest
);
374 void loadPtr(const void* address
, RegisterID dest
)
376 load32(address
, dest
);
379 DataLabel32
loadPtrWithAddressOffsetPatch(Address address
, RegisterID dest
)
381 return load32WithAddressOffsetPatch(address
, dest
);
384 DataLabelCompact
loadPtrWithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
386 return load32WithCompactAddressOffsetPatch(address
, dest
);
389 void move(ImmPtr imm
, RegisterID dest
)
391 move(Imm32(imm
.asTrustedImmPtr()), dest
);
394 void comparePtr(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
396 compare32(cond
, left
, right
, dest
);
399 void storePtr(RegisterID src
, ImplicitAddress address
)
401 store32(src
, address
);
404 void storePtr(RegisterID src
, BaseIndex address
)
406 store32(src
, address
);
409 void storePtr(RegisterID src
, void* address
)
411 store32(src
, address
);
414 void storePtr(TrustedImmPtr imm
, ImplicitAddress address
)
416 store32(TrustedImm32(imm
), address
);
419 void storePtr(ImmPtr imm
, Address address
)
421 store32(Imm32(imm
.asTrustedImmPtr()), address
);
424 void storePtr(TrustedImmPtr imm
, void* address
)
426 store32(TrustedImm32(imm
), address
);
429 DataLabel32
storePtrWithAddressOffsetPatch(RegisterID src
, Address address
)
431 return store32WithAddressOffsetPatch(src
, address
);
434 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, RegisterID right
)
436 return branch32(cond
, left
, right
);
439 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, TrustedImmPtr right
)
441 return branch32(cond
, left
, TrustedImm32(right
));
444 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, ImmPtr right
)
446 return branch32(cond
, left
, Imm32(right
.asTrustedImmPtr()));
449 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, Address right
)
451 return branch32(cond
, left
, right
);
454 Jump
branchPtr(RelationalCondition cond
, Address left
, RegisterID right
)
456 return branch32(cond
, left
, right
);
459 Jump
branchPtr(RelationalCondition cond
, AbsoluteAddress left
, RegisterID right
)
461 return branch32(cond
, left
, right
);
464 Jump
branchPtr(RelationalCondition cond
, Address left
, TrustedImmPtr right
)
466 return branch32(cond
, left
, TrustedImm32(right
));
469 Jump
branchPtr(RelationalCondition cond
, AbsoluteAddress left
, TrustedImmPtr right
)
471 return branch32(cond
, left
, TrustedImm32(right
));
474 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
476 return branchTest32(cond
, reg
, mask
);
479 Jump
branchTestPtr(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
481 return branchTest32(cond
, reg
, mask
);
484 Jump
branchTestPtr(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
486 return branchTest32(cond
, address
, mask
);
489 Jump
branchTestPtr(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
491 return branchTest32(cond
, address
, mask
);
494 Jump
branchAddPtr(ResultCondition cond
, RegisterID src
, RegisterID dest
)
496 return branchAdd32(cond
, src
, dest
);
499 Jump
branchSubPtr(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
501 return branchSub32(cond
, imm
, dest
);
503 using MacroAssemblerBase::branchTest8
;
504 Jump
branchTest8(ResultCondition cond
, ExtendedAddress address
, TrustedImm32 mask
= TrustedImm32(-1))
506 return MacroAssemblerBase::branchTest8(cond
, Address(address
.base
, address
.offset
), mask
);
510 #if ENABLE(JIT_CONSTANT_BLINDING)
511 using MacroAssemblerBase::addPtr
;
512 using MacroAssemblerBase::andPtr
;
513 using MacroAssemblerBase::branchSubPtr
;
514 using MacroAssemblerBase::convertInt32ToDouble
;
515 using MacroAssemblerBase::storePtr
;
516 using MacroAssemblerBase::subPtr
;
517 using MacroAssemblerBase::xorPtr
;
519 bool shouldBlindDouble(double value
)
521 // Don't trust NaN or +/-Infinity
522 if (!isfinite(value
))
525 // Try to force normalisation, and check that there's no change
526 // in the bit pattern
527 if (bitwise_cast
<uintptr_t>(value
* 1.0) != bitwise_cast
<uintptr_t>(value
))
531 // Only allow a limited set of fractional components
532 double scaledValue
= value
* 8;
533 if (scaledValue
/ 8 != value
)
535 double frac
= scaledValue
- floor(scaledValue
);
542 bool shouldBlind(ImmPtr imm
)
546 // Debug always blind all constants, if only so we know
547 // if we've broken blinding during patch development.
551 // First off we'll special case common, "safe" values to avoid hurting
552 // performance too much
553 uintptr_t value
= imm
.asTrustedImmPtr().asIntptr();
559 case 0xffffffffffffL
:
560 case 0xffffffffffffffL
:
561 case 0xffffffffffffffffL
:
567 JSValue jsValue
= JSValue::decode(reinterpret_cast<void*>(value
));
568 if (jsValue
.isInt32())
569 return shouldBlind(Imm32(jsValue
.asInt32()));
570 if (jsValue
.isDouble() && !shouldBlindDouble(jsValue
.asDouble()))
573 if (!shouldBlindDouble(bitwise_cast
<double>(value
)))
578 return shouldBlindForSpecificArch(value
);
581 struct RotatedImmPtr
{
582 RotatedImmPtr(uintptr_t v1
, uint8_t v2
)
588 TrustedImm32 rotation
;
591 RotatedImmPtr
rotationBlindConstant(ImmPtr imm
)
593 uint8_t rotation
= random() % (sizeof(void*) * 8);
594 uintptr_t value
= imm
.asTrustedImmPtr().asIntptr();
595 value
= (value
<< rotation
) | (value
>> (sizeof(void*) * 8 - rotation
));
596 return RotatedImmPtr(value
, rotation
);
599 void loadRotationBlindedConstant(RotatedImmPtr constant
, RegisterID dest
)
601 move(constant
.value
, dest
);
602 rotateRightPtr(constant
.rotation
, dest
);
605 void convertInt32ToDouble(Imm32 imm
, FPRegisterID dest
)
607 if (shouldBlind(imm
)) {
608 RegisterID scratchRegister
= scratchRegisterForBlinding();
609 loadXorBlindedConstant(xorBlindConstant(imm
), scratchRegister
);
610 convertInt32ToDouble(scratchRegister
, dest
);
612 convertInt32ToDouble(imm
.asTrustedImm32(), dest
);
615 void move(ImmPtr imm
, RegisterID dest
)
617 if (shouldBlind(imm
))
618 loadRotationBlindedConstant(rotationBlindConstant(imm
), dest
);
620 move(imm
.asTrustedImmPtr(), dest
);
623 Jump
branchPtr(RelationalCondition cond
, RegisterID left
, ImmPtr right
)
625 if (shouldBlind(right
)) {
626 RegisterID scratchRegister
= scratchRegisterForBlinding();
627 loadRotationBlindedConstant(rotationBlindConstant(right
), scratchRegister
);
628 return branchPtr(cond
, left
, scratchRegister
);
630 return branchPtr(cond
, left
, right
.asTrustedImmPtr());
633 void storePtr(ImmPtr imm
, Address dest
)
635 if (shouldBlind(imm
)) {
636 RegisterID scratchRegister
= scratchRegisterForBlinding();
637 loadRotationBlindedConstant(rotationBlindConstant(imm
), scratchRegister
);
638 storePtr(scratchRegister
, dest
);
640 storePtr(imm
.asTrustedImmPtr(), dest
);
645 #endif // !CPU(X86_64)
647 #if ENABLE(JIT_CONSTANT_BLINDING)
648 bool shouldBlind(Imm32 imm
)
652 // Debug always blind all constants, if only so we know
653 // if we've broken blinding during patch development.
657 // First off we'll special case common, "safe" values to avoid hurting
658 // performance too much
659 uint32_t value
= imm
.asTrustedImm32().m_value
;
669 return shouldBlindForSpecificArch(value
);
673 struct BlindedImm32
{
674 BlindedImm32(int32_t v1
, int32_t v2
)
683 uint32_t keyForConstant(uint32_t value
, uint32_t& mask
)
685 uint32_t key
= random();
688 else if (value
<= 0xffff)
690 else if (value
<= 0xffffff)
697 uint32_t keyForConstant(uint32_t value
)
700 return keyForConstant(value
, mask
);
703 BlindedImm32
xorBlindConstant(Imm32 imm
)
705 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
706 uint32_t key
= keyForConstant(baseValue
);
707 return BlindedImm32(baseValue
^ key
, key
);
710 BlindedImm32
additionBlindedConstant(Imm32 imm
)
712 // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
713 static uint32_t maskTable
[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
715 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
716 uint32_t key
= keyForConstant(baseValue
) & maskTable
[baseValue
& 3];
718 key
= key
- baseValue
;
719 return BlindedImm32(baseValue
- key
, key
);
722 BlindedImm32
andBlindedConstant(Imm32 imm
)
724 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
726 uint32_t key
= keyForConstant(baseValue
, mask
);
727 ASSERT((baseValue
& mask
) == baseValue
);
728 return BlindedImm32(((baseValue
& key
) | ~key
) & mask
, ((baseValue
& ~key
) | key
) & mask
);
731 BlindedImm32
orBlindedConstant(Imm32 imm
)
733 uint32_t baseValue
= imm
.asTrustedImm32().m_value
;
735 uint32_t key
= keyForConstant(baseValue
, mask
);
736 ASSERT((baseValue
& mask
) == baseValue
);
737 return BlindedImm32((baseValue
& key
) & mask
, (baseValue
& ~key
) & mask
);
740 void loadXorBlindedConstant(BlindedImm32 constant
, RegisterID dest
)
742 move(constant
.value1
, dest
);
743 xor32(constant
.value2
, dest
);
746 void add32(Imm32 imm
, RegisterID dest
)
748 if (shouldBlind(imm
)) {
749 BlindedImm32 key
= additionBlindedConstant(imm
);
750 add32(key
.value1
, dest
);
751 add32(key
.value2
, dest
);
753 add32(imm
.asTrustedImm32(), dest
);
756 void addPtr(Imm32 imm
, RegisterID dest
)
758 if (shouldBlind(imm
)) {
759 BlindedImm32 key
= additionBlindedConstant(imm
);
760 addPtr(key
.value1
, dest
);
761 addPtr(key
.value2
, dest
);
763 addPtr(imm
.asTrustedImm32(), dest
);
766 void and32(Imm32 imm
, RegisterID dest
)
768 if (shouldBlind(imm
)) {
769 BlindedImm32 key
= andBlindedConstant(imm
);
770 and32(key
.value1
, dest
);
771 and32(key
.value2
, dest
);
773 and32(imm
.asTrustedImm32(), dest
);
776 void andPtr(Imm32 imm
, RegisterID dest
)
778 if (shouldBlind(imm
)) {
779 BlindedImm32 key
= andBlindedConstant(imm
);
780 andPtr(key
.value1
, dest
);
781 andPtr(key
.value2
, dest
);
783 andPtr(imm
.asTrustedImm32(), dest
);
786 void and32(Imm32 imm
, RegisterID src
, RegisterID dest
)
788 if (shouldBlind(imm
)) {
790 return and32(imm
.asTrustedImm32(), dest
);
791 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
794 and32(imm
.asTrustedImm32(), src
, dest
);
797 void move(Imm32 imm
, RegisterID dest
)
799 if (shouldBlind(imm
))
800 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
802 move(imm
.asTrustedImm32(), dest
);
805 void or32(Imm32 imm
, RegisterID src
, RegisterID dest
)
807 if (shouldBlind(imm
)) {
809 return or32(imm
, dest
);
810 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
813 or32(imm
.asTrustedImm32(), src
, dest
);
816 void or32(Imm32 imm
, RegisterID dest
)
818 if (shouldBlind(imm
)) {
819 BlindedImm32 key
= orBlindedConstant(imm
);
820 or32(key
.value1
, dest
);
821 or32(key
.value2
, dest
);
823 or32(imm
.asTrustedImm32(), dest
);
826 void poke(Imm32 value
, int index
= 0)
828 store32(value
, addressForPoke(index
));
831 void poke(ImmPtr value
, int index
= 0)
833 storePtr(value
, addressForPoke(index
));
836 void store32(Imm32 imm
, Address dest
)
838 if (shouldBlind(imm
)) {
839 #if CPU(X86) || CPU(X86_64)
840 BlindedImm32 blind
= xorBlindConstant(imm
);
841 store32(blind
.value1
, dest
);
842 xor32(blind
.value2
, dest
);
844 if (RegisterID scratchRegister
= (RegisterID
)scratchRegisterForBlinding()) {
845 loadXorBlindedConstant(xorBlindConstant(imm
), scratchRegister
);
846 store32(scratchRegister
, dest
);
848 // If we don't have a scratch register available for use, we'll just
849 // place a random number of nops.
850 uint32_t nopCount
= random() & 3;
853 store32(imm
.asTrustedImm32(), dest
);
857 store32(imm
.asTrustedImm32(), dest
);
860 void sub32(Imm32 imm
, RegisterID dest
)
862 if (shouldBlind(imm
)) {
863 BlindedImm32 key
= additionBlindedConstant(imm
);
864 sub32(key
.value1
, dest
);
865 sub32(key
.value2
, dest
);
867 sub32(imm
.asTrustedImm32(), dest
);
870 void subPtr(Imm32 imm
, RegisterID dest
)
872 if (shouldBlind(imm
)) {
873 BlindedImm32 key
= additionBlindedConstant(imm
);
874 subPtr(key
.value1
, dest
);
875 subPtr(key
.value2
, dest
);
877 subPtr(imm
.asTrustedImm32(), dest
);
880 void xor32(Imm32 imm
, RegisterID src
, RegisterID dest
)
882 if (shouldBlind(imm
)) {
883 BlindedImm32 blind
= xorBlindConstant(imm
);
884 xor32(blind
.value1
, src
, dest
);
885 xor32(blind
.value2
, dest
);
887 xor32(imm
.asTrustedImm32(), src
, dest
);
890 void xor32(Imm32 imm
, RegisterID dest
)
892 if (shouldBlind(imm
)) {
893 BlindedImm32 blind
= xorBlindConstant(imm
);
894 xor32(blind
.value1
, dest
);
895 xor32(blind
.value2
, dest
);
897 xor32(imm
.asTrustedImm32(), dest
);
900 Jump
branch32(RelationalCondition cond
, RegisterID left
, Imm32 right
)
902 if (shouldBlind(right
)) {
903 if (RegisterID scratchRegister
= (RegisterID
)scratchRegisterForBlinding()) {
904 loadXorBlindedConstant(xorBlindConstant(right
), scratchRegister
);
905 return branch32(cond
, left
, scratchRegister
);
907 // If we don't have a scratch register available for use, we'll just
908 // place a random number of nops.
909 uint32_t nopCount
= random() & 3;
912 return branch32(cond
, left
, right
.asTrustedImm32());
915 return branch32(cond
, left
, right
.asTrustedImm32());
918 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, Imm32 imm
, RegisterID dest
)
921 if (!scratchRegisterForBlinding()) {
922 // Release mode ASSERT, if this fails we will perform incorrect codegen.
926 if (shouldBlind(imm
)) {
928 if (RegisterID scratchRegister
= (RegisterID
)scratchRegisterForBlinding()) {
929 move(src
, scratchRegister
);
930 src
= scratchRegister
;
933 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
934 return branchAdd32(cond
, src
, dest
);
936 return branchAdd32(cond
, src
, imm
.asTrustedImm32(), dest
);
939 Jump
branchMul32(ResultCondition cond
, Imm32 imm
, RegisterID src
, RegisterID dest
)
942 if (!scratchRegisterForBlinding()) {
943 // Release mode ASSERT, if this fails we will perform incorrect codegen.
947 if (shouldBlind(imm
)) {
949 if (RegisterID scratchRegister
= (RegisterID
)scratchRegisterForBlinding()) {
950 move(src
, scratchRegister
);
951 src
= scratchRegister
;
954 loadXorBlindedConstant(xorBlindConstant(imm
), dest
);
955 return branchMul32(cond
, src
, dest
);
957 return branchMul32(cond
, imm
.asTrustedImm32(), src
, dest
);
960 // branchSub32 takes a scratch register as 32 bit platforms make use of this,
961 // with src == dst, and on x86-32 we don't have a platform scratch register.
962 Jump
branchSub32(ResultCondition cond
, RegisterID src
, Imm32 imm
, RegisterID dest
, RegisterID scratch
)
964 if (shouldBlind(imm
)) {
965 ASSERT(scratch
!= dest
);
966 ASSERT(scratch
!= src
);
967 loadXorBlindedConstant(xorBlindConstant(imm
), scratch
);
968 return branchSub32(cond
, src
, scratch
, dest
);
970 return branchSub32(cond
, src
, imm
.asTrustedImm32(), dest
);
973 // Immediate shifts only have 5 controllable bits
974 // so we'll consider them safe for now.
975 TrustedImm32
trustedImm32ForShift(Imm32 imm
)
977 return TrustedImm32(imm
.asTrustedImm32().m_value
& 31);
980 void lshift32(Imm32 imm
, RegisterID dest
)
982 lshift32(trustedImm32ForShift(imm
), dest
);
985 void lshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
987 lshift32(src
, trustedImm32ForShift(amount
), dest
);
990 void rshift32(Imm32 imm
, RegisterID dest
)
992 rshift32(trustedImm32ForShift(imm
), dest
);
995 void rshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
997 rshift32(src
, trustedImm32ForShift(amount
), dest
);
1000 void urshift32(Imm32 imm
, RegisterID dest
)
1002 urshift32(trustedImm32ForShift(imm
), dest
);
1005 void urshift32(RegisterID src
, Imm32 amount
, RegisterID dest
)
1007 urshift32(src
, trustedImm32ForShift(amount
), dest
);
1014 #else // ENABLE(ASSEMBLER)
1016 // If there is no assembler for this platform, at least allow code to make references to
1017 // some of the things it would otherwise define, albeit without giving that code any way
1018 // of doing anything useful.
1019 class MacroAssembler
{
1021 MacroAssembler() { }
1025 enum RegisterID
{ NoRegister
};
1026 enum FPRegisterID
{ NoFPRegister
};
1029 #endif // ENABLE(ASSEMBLER)
1031 #endif // MacroAssembler_h