2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
36 class MacroAssemblerX86Common
: public AbstractMacroAssembler
<X86Assembler
> {
39 static const X86Registers::RegisterID scratchRegister
= X86Registers::r11
;
42 static const int DoubleConditionBitInvert
= 0x10;
43 static const int DoubleConditionBitSpecial
= 0x20;
44 static const int DoubleConditionBits
= DoubleConditionBitInvert
| DoubleConditionBitSpecial
;
47 typedef X86Assembler::FPRegisterID FPRegisterID
;
48 typedef X86Assembler::XMMRegisterID XMMRegisterID
;
50 static const int MaximumCompactPtrAlignedAddressOffset
= 127;
52 enum RelationalCondition
{
53 Equal
= X86Assembler::ConditionE
,
54 NotEqual
= X86Assembler::ConditionNE
,
55 Above
= X86Assembler::ConditionA
,
56 AboveOrEqual
= X86Assembler::ConditionAE
,
57 Below
= X86Assembler::ConditionB
,
58 BelowOrEqual
= X86Assembler::ConditionBE
,
59 GreaterThan
= X86Assembler::ConditionG
,
60 GreaterThanOrEqual
= X86Assembler::ConditionGE
,
61 LessThan
= X86Assembler::ConditionL
,
62 LessThanOrEqual
= X86Assembler::ConditionLE
65 enum ResultCondition
{
66 Overflow
= X86Assembler::ConditionO
,
67 Signed
= X86Assembler::ConditionS
,
68 Zero
= X86Assembler::ConditionE
,
69 NonZero
= X86Assembler::ConditionNE
72 enum DoubleCondition
{
73 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
74 DoubleEqual
= X86Assembler::ConditionE
| DoubleConditionBitSpecial
,
75 DoubleNotEqual
= X86Assembler::ConditionNE
,
76 DoubleGreaterThan
= X86Assembler::ConditionA
,
77 DoubleGreaterThanOrEqual
= X86Assembler::ConditionAE
,
78 DoubleLessThan
= X86Assembler::ConditionA
| DoubleConditionBitInvert
,
79 DoubleLessThanOrEqual
= X86Assembler::ConditionAE
| DoubleConditionBitInvert
,
80 // If either operand is NaN, these conditions always evaluate to true.
81 DoubleEqualOrUnordered
= X86Assembler::ConditionE
,
82 DoubleNotEqualOrUnordered
= X86Assembler::ConditionNE
| DoubleConditionBitSpecial
,
83 DoubleGreaterThanOrUnordered
= X86Assembler::ConditionB
| DoubleConditionBitInvert
,
84 DoubleGreaterThanOrEqualOrUnordered
= X86Assembler::ConditionBE
| DoubleConditionBitInvert
,
85 DoubleLessThanOrUnordered
= X86Assembler::ConditionB
,
86 DoubleLessThanOrEqualOrUnordered
= X86Assembler::ConditionBE
,
89 !((X86Assembler::ConditionE
| X86Assembler::ConditionNE
| X86Assembler::ConditionA
| X86Assembler::ConditionAE
| X86Assembler::ConditionB
| X86Assembler::ConditionBE
) & DoubleConditionBits
),
90 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes
);
92 static const RegisterID stackPointerRegister
= X86Registers::esp
;
94 #if ENABLE(JIT_CONSTANT_BLINDING)
95 static bool shouldBlindForSpecificArch(uint32_t value
) { return value
>= 0x00ffffff; }
97 static bool shouldBlindForSpecificArch(uintptr_t value
) { return value
>= 0x00ffffff; }
101 // Integer arithmetic operations:
103 // Operations are typically two operand - operation(source, srcDst)
104 // For many operations the source may be an TrustedImm32, the srcDst operand
105 // may often be a memory location (explictly described using an Address
108 void add32(RegisterID src
, RegisterID dest
)
110 m_assembler
.addl_rr(src
, dest
);
113 void add32(TrustedImm32 imm
, Address address
)
115 m_assembler
.addl_im(imm
.m_value
, address
.offset
, address
.base
);
118 void add32(TrustedImm32 imm
, RegisterID dest
)
120 m_assembler
.addl_ir(imm
.m_value
, dest
);
123 void add32(Address src
, RegisterID dest
)
125 m_assembler
.addl_mr(src
.offset
, src
.base
, dest
);
128 void add32(RegisterID src
, Address dest
)
130 m_assembler
.addl_rm(src
, dest
.offset
, dest
.base
);
133 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
135 m_assembler
.leal_mr(imm
.m_value
, src
, dest
);
138 void and32(RegisterID src
, RegisterID dest
)
140 m_assembler
.andl_rr(src
, dest
);
143 void and32(TrustedImm32 imm
, RegisterID dest
)
145 m_assembler
.andl_ir(imm
.m_value
, dest
);
148 void and32(RegisterID src
, Address dest
)
150 m_assembler
.andl_rm(src
, dest
.offset
, dest
.base
);
153 void and32(Address src
, RegisterID dest
)
155 m_assembler
.andl_mr(src
.offset
, src
.base
, dest
);
158 void and32(TrustedImm32 imm
, Address address
)
160 m_assembler
.andl_im(imm
.m_value
, address
.offset
, address
.base
);
163 void and32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
166 zeroExtend32ToPtr(op1
, dest
);
167 else if (op1
== dest
)
175 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
181 void lshift32(RegisterID shift_amount
, RegisterID dest
)
183 ASSERT(shift_amount
!= dest
);
185 if (shift_amount
== X86Registers::ecx
)
186 m_assembler
.shll_CLr(dest
);
188 // On x86 we can only shift by ecx; if asked to shift by another register we'll
189 // need rejig the shift amount into ecx first, and restore the registers afterwards.
190 // If we dest is ecx, then shift the swapped register!
191 swap(shift_amount
, X86Registers::ecx
);
192 m_assembler
.shll_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
193 swap(shift_amount
, X86Registers::ecx
);
197 void lshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
199 ASSERT(shift_amount
!= dest
);
203 lshift32(shift_amount
, dest
);
206 void lshift32(TrustedImm32 imm
, RegisterID dest
)
208 m_assembler
.shll_i8r(imm
.m_value
, dest
);
211 void lshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
218 void mul32(RegisterID src
, RegisterID dest
)
220 m_assembler
.imull_rr(src
, dest
);
223 void mul32(Address src
, RegisterID dest
)
225 m_assembler
.imull_mr(src
.offset
, src
.base
, dest
);
228 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
230 m_assembler
.imull_i32r(src
, imm
.m_value
, dest
);
233 void neg32(RegisterID srcDest
)
235 m_assembler
.negl_r(srcDest
);
238 void neg32(Address srcDest
)
240 m_assembler
.negl_m(srcDest
.offset
, srcDest
.base
);
243 void or32(RegisterID src
, RegisterID dest
)
245 m_assembler
.orl_rr(src
, dest
);
248 void or32(TrustedImm32 imm
, RegisterID dest
)
250 m_assembler
.orl_ir(imm
.m_value
, dest
);
253 void or32(RegisterID src
, Address dest
)
255 m_assembler
.orl_rm(src
, dest
.offset
, dest
.base
);
258 void or32(Address src
, RegisterID dest
)
260 m_assembler
.orl_mr(src
.offset
, src
.base
, dest
);
263 void or32(TrustedImm32 imm
, Address address
)
265 m_assembler
.orl_im(imm
.m_value
, address
.offset
, address
.base
);
268 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
271 zeroExtend32ToPtr(op1
, dest
);
272 else if (op1
== dest
)
280 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
286 void rshift32(RegisterID shift_amount
, RegisterID dest
)
288 ASSERT(shift_amount
!= dest
);
290 if (shift_amount
== X86Registers::ecx
)
291 m_assembler
.sarl_CLr(dest
);
293 // On x86 we can only shift by ecx; if asked to shift by another register we'll
294 // need rejig the shift amount into ecx first, and restore the registers afterwards.
295 // If we dest is ecx, then shift the swapped register!
296 swap(shift_amount
, X86Registers::ecx
);
297 m_assembler
.sarl_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
298 swap(shift_amount
, X86Registers::ecx
);
302 void rshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
304 ASSERT(shift_amount
!= dest
);
308 rshift32(shift_amount
, dest
);
311 void rshift32(TrustedImm32 imm
, RegisterID dest
)
313 m_assembler
.sarl_i8r(imm
.m_value
, dest
);
316 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
323 void urshift32(RegisterID shift_amount
, RegisterID dest
)
325 ASSERT(shift_amount
!= dest
);
327 if (shift_amount
== X86Registers::ecx
)
328 m_assembler
.shrl_CLr(dest
);
330 // On x86 we can only shift by ecx; if asked to shift by another register we'll
331 // need rejig the shift amount into ecx first, and restore the registers afterwards.
332 // If we dest is ecx, then shift the swapped register!
333 swap(shift_amount
, X86Registers::ecx
);
334 m_assembler
.shrl_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
335 swap(shift_amount
, X86Registers::ecx
);
339 void urshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
341 ASSERT(shift_amount
!= dest
);
345 urshift32(shift_amount
, dest
);
348 void urshift32(TrustedImm32 imm
, RegisterID dest
)
350 m_assembler
.shrl_i8r(imm
.m_value
, dest
);
353 void urshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
357 urshift32(imm
, dest
);
360 void sub32(RegisterID src
, RegisterID dest
)
362 m_assembler
.subl_rr(src
, dest
);
365 void sub32(TrustedImm32 imm
, RegisterID dest
)
367 m_assembler
.subl_ir(imm
.m_value
, dest
);
370 void sub32(TrustedImm32 imm
, Address address
)
372 m_assembler
.subl_im(imm
.m_value
, address
.offset
, address
.base
);
375 void sub32(Address src
, RegisterID dest
)
377 m_assembler
.subl_mr(src
.offset
, src
.base
, dest
);
380 void sub32(RegisterID src
, Address dest
)
382 m_assembler
.subl_rm(src
, dest
.offset
, dest
.base
);
385 void xor32(RegisterID src
, RegisterID dest
)
387 m_assembler
.xorl_rr(src
, dest
);
390 void xor32(TrustedImm32 imm
, Address dest
)
392 if (imm
.m_value
== -1)
393 m_assembler
.notl_m(dest
.offset
, dest
.base
);
395 m_assembler
.xorl_im(imm
.m_value
, dest
.offset
, dest
.base
);
398 void xor32(TrustedImm32 imm
, RegisterID dest
)
400 if (imm
.m_value
== -1)
401 m_assembler
.notl_r(dest
);
403 m_assembler
.xorl_ir(imm
.m_value
, dest
);
406 void xor32(RegisterID src
, Address dest
)
408 m_assembler
.xorl_rm(src
, dest
.offset
, dest
.base
);
411 void xor32(Address src
, RegisterID dest
)
413 m_assembler
.xorl_mr(src
.offset
, src
.base
, dest
);
416 void xor32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
419 move(TrustedImm32(0), dest
);
420 else if (op1
== dest
)
428 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
434 void sqrtDouble(FPRegisterID src
, FPRegisterID dst
)
436 m_assembler
.sqrtsd_rr(src
, dst
);
439 void absDouble(FPRegisterID src
, FPRegisterID dst
)
442 static const double negativeZeroConstant
= -0.0;
443 loadDouble(&negativeZeroConstant
, dst
);
444 m_assembler
.andnpd_rr(src
, dst
);
447 void negateDouble(FPRegisterID src
, FPRegisterID dst
)
450 static const double negativeZeroConstant
= -0.0;
451 loadDouble(&negativeZeroConstant
, dst
);
452 m_assembler
.xorpd_rr(src
, dst
);
456 // Memory access operations:
458 // Loads are of the form load(address, destination) and stores of the form
459 // store(source, address). The source for a store may be an TrustedImm32. Address
460 // operand objects to loads and store will be implicitly constructed if a
461 // register is passed.
463 void load32(ImplicitAddress address
, RegisterID dest
)
465 m_assembler
.movl_mr(address
.offset
, address
.base
, dest
);
468 void load32(BaseIndex address
, RegisterID dest
)
470 m_assembler
.movl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
473 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
475 load32(address
, dest
);
478 void load16Unaligned(BaseIndex address
, RegisterID dest
)
480 load16(address
, dest
);
483 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
485 m_assembler
.movl_mr_disp32(address
.offset
, address
.base
, dest
);
486 return DataLabel32(this);
489 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
491 m_assembler
.movl_mr_disp8(address
.offset
, address
.base
, dest
);
492 return DataLabelCompact(this);
495 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact
, int32_t value
)
498 ASSERT(value
< MaximumCompactPtrAlignedAddressOffset
);
499 AssemblerType_T::repatchCompact(dataLabelCompact
.dataLocation(), value
);
502 DataLabelCompact
loadCompactWithAddressOffsetPatch(Address address
, RegisterID dest
)
504 m_assembler
.movl_mr_disp8(address
.offset
, address
.base
, dest
);
505 return DataLabelCompact(this);
508 void load8(BaseIndex address
, RegisterID dest
)
510 m_assembler
.movzbl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
513 void load8(ImplicitAddress address
, RegisterID dest
)
515 m_assembler
.movzbl_mr(address
.offset
, address
.base
, dest
);
518 void load8Signed(BaseIndex address
, RegisterID dest
)
520 m_assembler
.movsbl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
523 void load8Signed(ImplicitAddress address
, RegisterID dest
)
525 m_assembler
.movsbl_mr(address
.offset
, address
.base
, dest
);
528 void load16(BaseIndex address
, RegisterID dest
)
530 m_assembler
.movzwl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
533 void load16(Address address
, RegisterID dest
)
535 m_assembler
.movzwl_mr(address
.offset
, address
.base
, dest
);
538 void load16Signed(BaseIndex address
, RegisterID dest
)
540 m_assembler
.movswl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
543 void load16Signed(Address address
, RegisterID dest
)
545 m_assembler
.movswl_mr(address
.offset
, address
.base
, dest
);
548 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
550 m_assembler
.movl_rm_disp32(src
, address
.offset
, address
.base
);
551 return DataLabel32(this);
554 void store32(RegisterID src
, ImplicitAddress address
)
556 m_assembler
.movl_rm(src
, address
.offset
, address
.base
);
559 void store32(RegisterID src
, BaseIndex address
)
561 m_assembler
.movl_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
564 void store32(TrustedImm32 imm
, ImplicitAddress address
)
566 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
);
569 void store32(TrustedImm32 imm
, BaseIndex address
)
571 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
574 void store8(TrustedImm32 imm
, Address address
)
576 ASSERT(-128 <= imm
.m_value
&& imm
.m_value
< 128);
577 m_assembler
.movb_i8m(imm
.m_value
, address
.offset
, address
.base
);
580 void store8(TrustedImm32 imm
, BaseIndex address
)
582 ASSERT(-128 <= imm
.m_value
&& imm
.m_value
< 128);
583 m_assembler
.movb_i8m(imm
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
586 void store8(RegisterID src
, BaseIndex address
)
589 // On 32-bit x86 we can only store from the first 4 registers;
590 // esp..edi are mapped to the 'h' registers!
592 // Pick a temporary register.
594 if (address
.base
!= X86Registers::eax
&& address
.index
!= X86Registers::eax
)
595 temp
= X86Registers::eax
;
596 else if (address
.base
!= X86Registers::ebx
&& address
.index
!= X86Registers::ebx
)
597 temp
= X86Registers::ebx
;
599 ASSERT(address
.base
!= X86Registers::ecx
&& address
.index
!= X86Registers::ecx
);
600 temp
= X86Registers::ecx
;
603 // Swap to the temporary register to perform the store.
605 m_assembler
.movb_rm(temp
, address
.offset
, address
.base
, address
.index
, address
.scale
);
610 m_assembler
.movb_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
613 void store16(RegisterID src
, BaseIndex address
)
616 // On 32-bit x86 we can only store from the first 4 registers;
617 // esp..edi are mapped to the 'h' registers!
619 // Pick a temporary register.
621 if (address
.base
!= X86Registers::eax
&& address
.index
!= X86Registers::eax
)
622 temp
= X86Registers::eax
;
623 else if (address
.base
!= X86Registers::ebx
&& address
.index
!= X86Registers::ebx
)
624 temp
= X86Registers::ebx
;
626 ASSERT(address
.base
!= X86Registers::ecx
&& address
.index
!= X86Registers::ecx
);
627 temp
= X86Registers::ecx
;
630 // Swap to the temporary register to perform the store.
632 m_assembler
.movw_rm(temp
, address
.offset
, address
.base
, address
.index
, address
.scale
);
637 m_assembler
.movw_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
641 // Floating-point operation:
643 // Presently only supports SSE, not x87 floating point.
645 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
647 ASSERT(isSSE2Present());
649 m_assembler
.movsd_rr(src
, dest
);
652 void loadDouble(const void* address
, FPRegisterID dest
)
655 ASSERT(isSSE2Present());
656 m_assembler
.movsd_mr(address
, dest
);
658 move(TrustedImmPtr(address
), scratchRegister
);
659 loadDouble(scratchRegister
, dest
);
663 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
665 ASSERT(isSSE2Present());
666 m_assembler
.movsd_mr(address
.offset
, address
.base
, dest
);
669 void loadDouble(BaseIndex address
, FPRegisterID dest
)
671 ASSERT(isSSE2Present());
672 m_assembler
.movsd_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
674 void loadFloat(BaseIndex address
, FPRegisterID dest
)
676 ASSERT(isSSE2Present());
677 m_assembler
.movss_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
680 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
682 ASSERT(isSSE2Present());
683 m_assembler
.movsd_rm(src
, address
.offset
, address
.base
);
686 void storeDouble(FPRegisterID src
, BaseIndex address
)
688 ASSERT(isSSE2Present());
689 m_assembler
.movsd_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
692 void storeFloat(FPRegisterID src
, BaseIndex address
)
694 ASSERT(isSSE2Present());
695 m_assembler
.movss_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
698 void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dst
)
700 ASSERT(isSSE2Present());
701 m_assembler
.cvtsd2ss_rr(src
, dst
);
704 void convertFloatToDouble(FPRegisterID src
, FPRegisterID dst
)
706 ASSERT(isSSE2Present());
707 m_assembler
.cvtss2sd_rr(src
, dst
);
710 void addDouble(FPRegisterID src
, FPRegisterID dest
)
712 ASSERT(isSSE2Present());
713 m_assembler
.addsd_rr(src
, dest
);
716 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
718 ASSERT(isSSE2Present());
720 addDouble(op2
, dest
);
722 moveDouble(op2
, dest
);
723 addDouble(op1
, dest
);
727 void addDouble(Address src
, FPRegisterID dest
)
729 ASSERT(isSSE2Present());
730 m_assembler
.addsd_mr(src
.offset
, src
.base
, dest
);
733 void divDouble(FPRegisterID src
, FPRegisterID dest
)
735 ASSERT(isSSE2Present());
736 m_assembler
.divsd_rr(src
, dest
);
739 void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
741 // B := A / B is invalid.
742 ASSERT(op1
== dest
|| op2
!= dest
);
744 moveDouble(op1
, dest
);
745 divDouble(op2
, dest
);
748 void divDouble(Address src
, FPRegisterID dest
)
750 ASSERT(isSSE2Present());
751 m_assembler
.divsd_mr(src
.offset
, src
.base
, dest
);
754 void subDouble(FPRegisterID src
, FPRegisterID dest
)
756 ASSERT(isSSE2Present());
757 m_assembler
.subsd_rr(src
, dest
);
760 void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
762 // B := A - B is invalid.
763 ASSERT(op1
== dest
|| op2
!= dest
);
765 moveDouble(op1
, dest
);
766 subDouble(op2
, dest
);
769 void subDouble(Address src
, FPRegisterID dest
)
771 ASSERT(isSSE2Present());
772 m_assembler
.subsd_mr(src
.offset
, src
.base
, dest
);
775 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
777 ASSERT(isSSE2Present());
778 m_assembler
.mulsd_rr(src
, dest
);
781 void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
783 ASSERT(isSSE2Present());
785 mulDouble(op2
, dest
);
787 moveDouble(op2
, dest
);
788 mulDouble(op1
, dest
);
792 void mulDouble(Address src
, FPRegisterID dest
)
794 ASSERT(isSSE2Present());
795 m_assembler
.mulsd_mr(src
.offset
, src
.base
, dest
);
798 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
800 ASSERT(isSSE2Present());
801 m_assembler
.cvtsi2sd_rr(src
, dest
);
804 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
806 ASSERT(isSSE2Present());
807 m_assembler
.cvtsi2sd_mr(src
.offset
, src
.base
, dest
);
810 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
812 ASSERT(isSSE2Present());
814 if (cond
& DoubleConditionBitInvert
)
815 m_assembler
.ucomisd_rr(left
, right
);
817 m_assembler
.ucomisd_rr(right
, left
);
819 if (cond
== DoubleEqual
) {
820 Jump
isUnordered(m_assembler
.jp());
821 Jump result
= Jump(m_assembler
.je());
822 isUnordered
.link(this);
824 } else if (cond
== DoubleNotEqualOrUnordered
) {
825 Jump
isUnordered(m_assembler
.jp());
826 Jump
isEqual(m_assembler
.je());
827 isUnordered
.link(this);
828 Jump result
= jump();
833 ASSERT(!(cond
& DoubleConditionBitSpecial
));
834 return Jump(m_assembler
.jCC(static_cast<X86Assembler::Condition
>(cond
& ~DoubleConditionBits
)));
837 // Truncates 'src' to an integer, and places the resulting 'dest'.
838 // If the result is not representable as a 32 bit value, branch.
839 // May also branch for some values that are representable in 32 bits
840 // (specifically, in this case, INT_MIN).
841 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
842 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
844 ASSERT(isSSE2Present());
845 m_assembler
.cvttsd2si_rr(src
, dest
);
846 return branch32(branchType
? NotEqual
: Equal
, dest
, TrustedImm32(0x80000000));
849 Jump
branchTruncateDoubleToUint32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
851 ASSERT(isSSE2Present());
852 m_assembler
.cvttsd2si_rr(src
, dest
);
853 return branch32(branchType
? GreaterThanOrEqual
: LessThan
, dest
, TrustedImm32(0));
856 void truncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
858 ASSERT(isSSE2Present());
859 m_assembler
.cvttsd2si_rr(src
, dest
);
863 void truncateDoubleToUint32(FPRegisterID src
, RegisterID dest
)
865 ASSERT(isSSE2Present());
866 m_assembler
.cvttsd2siq_rr(src
, dest
);
870 // Convert 'src' to an integer, and places the resulting 'dest'.
871 // If the result is not representable as a 32 bit value, branch.
872 // May also branch for some values that are representable in 32 bits
873 // (specifically, in this case, 0).
874 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
)
876 ASSERT(isSSE2Present());
877 m_assembler
.cvttsd2si_rr(src
, dest
);
879 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
880 failureCases
.append(branchTest32(Zero
, dest
));
882 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
883 convertInt32ToDouble(dest
, fpTemp
);
884 m_assembler
.ucomisd_rr(fpTemp
, src
);
885 failureCases
.append(m_assembler
.jp());
886 failureCases
.append(m_assembler
.jne());
889 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
891 ASSERT(isSSE2Present());
892 m_assembler
.xorpd_rr(scratch
, scratch
);
893 return branchDouble(DoubleNotEqual
, reg
, scratch
);
896 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
898 ASSERT(isSSE2Present());
899 m_assembler
.xorpd_rr(scratch
, scratch
);
900 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
903 void lshiftPacked(TrustedImm32 imm
, XMMRegisterID reg
)
905 ASSERT(isSSE2Present());
906 m_assembler
.psllq_i8r(imm
.m_value
, reg
);
909 void rshiftPacked(TrustedImm32 imm
, XMMRegisterID reg
)
911 ASSERT(isSSE2Present());
912 m_assembler
.psrlq_i8r(imm
.m_value
, reg
);
915 void orPacked(XMMRegisterID src
, XMMRegisterID dst
)
917 ASSERT(isSSE2Present());
918 m_assembler
.por_rr(src
, dst
);
921 void moveInt32ToPacked(RegisterID src
, XMMRegisterID dst
)
923 ASSERT(isSSE2Present());
924 m_assembler
.movd_rr(src
, dst
);
927 void movePackedToInt32(XMMRegisterID src
, RegisterID dst
)
929 ASSERT(isSSE2Present());
930 m_assembler
.movd_rr(src
, dst
);
933 // Stack manipulation operations:
935 // The ABI is assumed to provide a stack abstraction to memory,
936 // containing machine word sized units of data. Push and pop
937 // operations add and remove a single register sized unit of data
938 // to or from the stack. Peek and poke operations read or write
939 // values on the stack, without moving the current stack position.
941 void pop(RegisterID dest
)
943 m_assembler
.pop_r(dest
);
946 void push(RegisterID src
)
948 m_assembler
.push_r(src
);
951 void push(Address address
)
953 m_assembler
.push_m(address
.offset
, address
.base
);
956 void push(TrustedImm32 imm
)
958 m_assembler
.push_i32(imm
.m_value
);
962 // Register move operations:
964 // Move values in registers.
966 void move(TrustedImm32 imm
, RegisterID dest
)
968 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
969 // may be useful to have a separate version that sign extends the value?
971 m_assembler
.xorl_rr(dest
, dest
);
973 m_assembler
.movl_i32r(imm
.m_value
, dest
);
977 void move(RegisterID src
, RegisterID dest
)
979 // Note: on 64-bit this is is a full register move; perhaps it would be
980 // useful to have separate move32 & movePtr, with move32 zero extending?
982 m_assembler
.movq_rr(src
, dest
);
985 void move(TrustedImmPtr imm
, RegisterID dest
)
987 m_assembler
.movq_i64r(imm
.asIntptr(), dest
);
990 void swap(RegisterID reg1
, RegisterID reg2
)
993 m_assembler
.xchgq_rr(reg1
, reg2
);
996 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
998 m_assembler
.movsxd_rr(src
, dest
);
1001 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1003 m_assembler
.movl_rr(src
, dest
);
1006 void move(RegisterID src
, RegisterID dest
)
1009 m_assembler
.movl_rr(src
, dest
);
1012 void move(TrustedImmPtr imm
, RegisterID dest
)
1014 m_assembler
.movl_i32r(imm
.asIntptr(), dest
);
1017 void swap(RegisterID reg1
, RegisterID reg2
)
1020 m_assembler
.xchgl_rr(reg1
, reg2
);
1023 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1028 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1035 // Forwards / external control flow operations:
1037 // This set of jump and conditional branch operations return a Jump
1038 // object which may linked at a later point, allow forwards jump,
1039 // or jumps that will require external linkage (after the code has been
1042 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1043 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1044 // used (representing the names 'below' and 'above').
1046 // Operands to the comparision are provided in the expected order, e.g.
1047 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1048 // treated as a signed 32bit value, is less than or equal to 5.
1050 // jz and jnz test whether the first operand is equal to zero, and take
1051 // an optional second operand of a mask under which to perform the test.
1054 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1056 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
);
1057 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1060 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1062 m_assembler
.cmpl_rr(right
, left
);
1063 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1066 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1068 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1069 m_assembler
.testl_rr(left
, left
);
1071 m_assembler
.cmpl_ir(right
.m_value
, left
);
1072 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1075 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1077 m_assembler
.cmpl_mr(right
.offset
, right
.base
, left
);
1078 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1081 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1083 m_assembler
.cmpl_rm(right
, left
.offset
, left
.base
);
1084 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1087 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1089 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
);
1090 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1093 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1095 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
1096 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1099 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1101 return branch32(cond
, left
, right
);
1104 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1106 m_assembler
.testl_rr(reg
, mask
);
1107 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1110 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1112 // if we are only interested in the low seven bits, this can be tested with a testb
1113 if (mask
.m_value
== -1)
1114 m_assembler
.testl_rr(reg
, reg
);
1116 m_assembler
.testl_i32r(mask
.m_value
, reg
);
1117 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1120 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1122 if (mask
.m_value
== -1)
1123 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
1125 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
1126 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1129 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1131 if (mask
.m_value
== -1)
1132 m_assembler
.cmpl_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
1134 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
1135 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1138 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1140 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1141 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
1142 if (mask
.m_value
== -1)
1143 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
1145 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
1146 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1149 Jump
branchTest8(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1151 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1152 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
1153 if (mask
.m_value
== -1)
1154 m_assembler
.cmpb_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
1156 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
1157 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1160 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1162 ASSERT(!(right
.m_value
& 0xFFFFFF00));
1164 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
1165 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1170 return Jump(m_assembler
.jmp());
1173 void jump(RegisterID target
)
1175 m_assembler
.jmp_r(target
);
1178 // Address is a memory location containing the address to jump to
1179 void jump(Address address
)
1181 m_assembler
.jmp_m(address
.offset
, address
.base
);
1185 // Arithmetic control flow operations:
1187 // This set of conditional branch operations branch based
1188 // on the result of an arithmetic operation. The operation
1189 // is performed as normal, storing the result.
1191 // * jz operations branch if the result is zero.
1192 // * jo operations branch if the (signed) arithmetic
1193 // operation caused an overflow to occur.
1195 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1198 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1201 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1204 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1207 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 src
, Address dest
)
1210 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1213 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, Address dest
)
1216 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1219 Jump
branchAdd32(ResultCondition cond
, Address src
, RegisterID dest
)
1222 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1225 Jump
branchAdd32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1228 return branchAdd32(cond
, src2
, dest
);
1230 return branchAdd32(cond
, src1
, dest
);
1233 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
1236 return branchAdd32(cond
, imm
, dest
);
1239 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1242 if (cond
!= Overflow
)
1243 m_assembler
.testl_rr(dest
, dest
);
1244 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1247 Jump
branchMul32(ResultCondition cond
, Address src
, RegisterID dest
)
1250 if (cond
!= Overflow
)
1251 m_assembler
.testl_rr(dest
, dest
);
1252 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1255 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
1257 mul32(imm
, src
, dest
);
1258 if (cond
!= Overflow
)
1259 m_assembler
.testl_rr(dest
, dest
);
1260 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1263 Jump
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1266 return branchMul32(cond
, src2
, dest
);
1268 return branchMul32(cond
, src1
, dest
);
1271 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1274 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1277 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1280 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1283 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, Address dest
)
1286 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1289 Jump
branchSub32(ResultCondition cond
, RegisterID src
, Address dest
)
1292 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1295 Jump
branchSub32(ResultCondition cond
, Address src
, RegisterID dest
)
1298 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1301 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1303 // B := A - B is invalid.
1304 ASSERT(src1
== dest
|| src2
!= dest
);
1307 return branchSub32(cond
, src2
, dest
);
1310 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, TrustedImm32 src2
, RegisterID dest
)
1313 return branchSub32(cond
, src2
, dest
);
1316 Jump
branchNeg32(ResultCondition cond
, RegisterID srcDest
)
1319 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1322 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1325 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1329 // Miscellaneous operations:
1338 return Call(m_assembler
.call(), Call::LinkableNear
);
1341 Call
call(RegisterID target
)
1343 return Call(m_assembler
.call(target
), Call::None
);
1346 void call(Address address
)
1348 m_assembler
.call_m(address
.offset
, address
.base
);
1356 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
1358 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
);
1359 set32(x86Condition(cond
), dest
);
1362 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1364 m_assembler
.cmpl_rr(right
, left
);
1365 set32(x86Condition(cond
), dest
);
1368 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
1370 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1371 m_assembler
.testl_rr(left
, left
);
1373 m_assembler
.cmpl_ir(right
.m_value
, left
);
1374 set32(x86Condition(cond
), dest
);
1378 // The mask should be optional... perhaps the argument order should be
1379 // dest-src, operations always have a dest? ... possibly not true, considering
1380 // asm ops like test, or pseudo ops like pop().
1382 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1384 if (mask
.m_value
== -1)
1385 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
1387 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
1388 set32(x86Condition(cond
), dest
);
1391 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1393 if (mask
.m_value
== -1)
1394 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
1396 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
1397 set32(x86Condition(cond
), dest
);
1400 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1401 static RelationalCondition
invert(RelationalCondition cond
)
1403 return static_cast<RelationalCondition
>(cond
^ 1);
1412 X86Assembler::Condition
x86Condition(RelationalCondition cond
)
1414 return static_cast<X86Assembler::Condition
>(cond
);
1417 X86Assembler::Condition
x86Condition(ResultCondition cond
)
1419 return static_cast<X86Assembler::Condition
>(cond
);
1422 void set32(X86Assembler::Condition cond
, RegisterID dest
)
1425 // On 32-bit x86 we can only set the first 4 registers;
1426 // esp..edi are mapped to the 'h' registers!
1428 m_assembler
.xchgl_rr(dest
, X86Registers::eax
);
1429 m_assembler
.setCC_r(cond
, X86Registers::eax
);
1430 m_assembler
.movzbl_rr(X86Registers::eax
, X86Registers::eax
);
1431 m_assembler
.xchgl_rr(dest
, X86Registers::eax
);
1435 m_assembler
.setCC_r(cond
, dest
);
1436 m_assembler
.movzbl_rr(dest
, dest
);
1440 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1441 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1442 friend class MacroAssemblerX86
;
1447 // All X86 Macs are guaranteed to support at least SSE2,
1448 static bool isSSE2Present()
1453 #else // OS(MAC_OS_X)
1455 enum SSE2CheckState
{
1461 static bool isSSE2Present()
1463 if (s_sse2CheckState
== NotCheckedSSE2
) {
1464 // Default the flags value to zero; if the compiler is
1465 // not MSVC or GCC we will read this as SSE2 not present.
1469 mov eax
, 1 // cpuid function 1 gives us the standard feature set
1482 : "%eax", "%ecx", "%edx"
1485 static const int SSE2FeatureBit
= 1 << 26;
1486 s_sse2CheckState
= (flags
& SSE2FeatureBit
) ? HasSSE2
: NoSSE2
;
1489 ASSERT(s_sse2CheckState
!= NotCheckedSSE2
);
1491 return s_sse2CheckState
== HasSSE2
;
1494 static SSE2CheckState s_sse2CheckState
;
1496 #endif // OS(MAC_OS_X)
1497 #elif !defined(NDEBUG) // CPU(X86)
1499 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1500 // but non debug add this method to keep the asserts above happy.
1501 static bool isSSE2Present()
1511 #endif // ENABLE(ASSEMBLER)
1513 #endif // MacroAssemblerX86Common_h