2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
36 class MacroAssemblerX86Common
: public AbstractMacroAssembler
<X86Assembler
> {
39 static const X86Registers::RegisterID scratchRegister
= X86Registers::r11
;
43 static const int DoubleConditionBitInvert
= 0x10;
44 static const int DoubleConditionBitSpecial
= 0x20;
45 static const int DoubleConditionBits
= DoubleConditionBitInvert
| DoubleConditionBitSpecial
;
48 typedef X86Assembler::XMMRegisterID XMMRegisterID
;
50 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value
)
52 return value
>= -128 && value
<= 127;
55 enum RelationalCondition
{
56 Equal
= X86Assembler::ConditionE
,
57 NotEqual
= X86Assembler::ConditionNE
,
58 Above
= X86Assembler::ConditionA
,
59 AboveOrEqual
= X86Assembler::ConditionAE
,
60 Below
= X86Assembler::ConditionB
,
61 BelowOrEqual
= X86Assembler::ConditionBE
,
62 GreaterThan
= X86Assembler::ConditionG
,
63 GreaterThanOrEqual
= X86Assembler::ConditionGE
,
64 LessThan
= X86Assembler::ConditionL
,
65 LessThanOrEqual
= X86Assembler::ConditionLE
68 enum ResultCondition
{
69 Overflow
= X86Assembler::ConditionO
,
70 Signed
= X86Assembler::ConditionS
,
71 PositiveOrZero
= X86Assembler::ConditionNS
,
72 Zero
= X86Assembler::ConditionE
,
73 NonZero
= X86Assembler::ConditionNE
76 enum DoubleCondition
{
77 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
78 DoubleEqual
= X86Assembler::ConditionE
| DoubleConditionBitSpecial
,
79 DoubleNotEqual
= X86Assembler::ConditionNE
,
80 DoubleGreaterThan
= X86Assembler::ConditionA
,
81 DoubleGreaterThanOrEqual
= X86Assembler::ConditionAE
,
82 DoubleLessThan
= X86Assembler::ConditionA
| DoubleConditionBitInvert
,
83 DoubleLessThanOrEqual
= X86Assembler::ConditionAE
| DoubleConditionBitInvert
,
84 // If either operand is NaN, these conditions always evaluate to true.
85 DoubleEqualOrUnordered
= X86Assembler::ConditionE
,
86 DoubleNotEqualOrUnordered
= X86Assembler::ConditionNE
| DoubleConditionBitSpecial
,
87 DoubleGreaterThanOrUnordered
= X86Assembler::ConditionB
| DoubleConditionBitInvert
,
88 DoubleGreaterThanOrEqualOrUnordered
= X86Assembler::ConditionBE
| DoubleConditionBitInvert
,
89 DoubleLessThanOrUnordered
= X86Assembler::ConditionB
,
90 DoubleLessThanOrEqualOrUnordered
= X86Assembler::ConditionBE
,
93 !((X86Assembler::ConditionE
| X86Assembler::ConditionNE
| X86Assembler::ConditionA
| X86Assembler::ConditionAE
| X86Assembler::ConditionB
| X86Assembler::ConditionBE
) & DoubleConditionBits
),
94 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes
);
96 static const RegisterID stackPointerRegister
= X86Registers::esp
;
97 static const RegisterID framePointerRegister
= X86Registers::ebp
;
99 static bool canBlind() { return true; }
100 static bool shouldBlindForSpecificArch(uint32_t value
) { return value
>= 0x00ffffff; }
101 static bool shouldBlindForSpecificArch(uint64_t value
) { return value
>= 0x00ffffff; }
103 // Integer arithmetic operations:
105 // Operations are typically two operand - operation(source, srcDst)
106 // For many operations the source may be an TrustedImm32, the srcDst operand
107 // may often be a memory location (explictly described using an Address
110 void add32(RegisterID src
, RegisterID dest
)
112 m_assembler
.addl_rr(src
, dest
);
115 void add32(TrustedImm32 imm
, Address address
)
117 m_assembler
.addl_im(imm
.m_value
, address
.offset
, address
.base
);
120 void add32(TrustedImm32 imm
, RegisterID dest
)
122 if (imm
.m_value
== 1)
123 m_assembler
.inc_r(dest
);
125 m_assembler
.addl_ir(imm
.m_value
, dest
);
128 void add32(Address src
, RegisterID dest
)
130 m_assembler
.addl_mr(src
.offset
, src
.base
, dest
);
133 void add32(RegisterID src
, Address dest
)
135 m_assembler
.addl_rm(src
, dest
.offset
, dest
.base
);
138 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
140 m_assembler
.leal_mr(imm
.m_value
, src
, dest
);
143 void and32(RegisterID src
, RegisterID dest
)
145 m_assembler
.andl_rr(src
, dest
);
148 void and32(TrustedImm32 imm
, RegisterID dest
)
150 m_assembler
.andl_ir(imm
.m_value
, dest
);
153 void and32(RegisterID src
, Address dest
)
155 m_assembler
.andl_rm(src
, dest
.offset
, dest
.base
);
158 void and32(Address src
, RegisterID dest
)
160 m_assembler
.andl_mr(src
.offset
, src
.base
, dest
);
163 void and32(TrustedImm32 imm
, Address address
)
165 m_assembler
.andl_im(imm
.m_value
, address
.offset
, address
.base
);
168 void and32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
171 zeroExtend32ToPtr(op1
, dest
);
172 else if (op1
== dest
)
180 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
186 void lshift32(RegisterID shift_amount
, RegisterID dest
)
188 ASSERT(shift_amount
!= dest
);
190 if (shift_amount
== X86Registers::ecx
)
191 m_assembler
.shll_CLr(dest
);
193 // On x86 we can only shift by ecx; if asked to shift by another register we'll
194 // need rejig the shift amount into ecx first, and restore the registers afterwards.
195 // If we dest is ecx, then shift the swapped register!
196 swap(shift_amount
, X86Registers::ecx
);
197 m_assembler
.shll_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
198 swap(shift_amount
, X86Registers::ecx
);
202 void lshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
204 ASSERT(shift_amount
!= dest
);
208 lshift32(shift_amount
, dest
);
211 void lshift32(TrustedImm32 imm
, RegisterID dest
)
213 m_assembler
.shll_i8r(imm
.m_value
, dest
);
216 void lshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
223 void mul32(RegisterID src
, RegisterID dest
)
225 m_assembler
.imull_rr(src
, dest
);
228 void mul32(Address src
, RegisterID dest
)
230 m_assembler
.imull_mr(src
.offset
, src
.base
, dest
);
233 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
235 m_assembler
.imull_i32r(src
, imm
.m_value
, dest
);
238 void neg32(RegisterID srcDest
)
240 m_assembler
.negl_r(srcDest
);
243 void neg32(Address srcDest
)
245 m_assembler
.negl_m(srcDest
.offset
, srcDest
.base
);
248 void or32(RegisterID src
, RegisterID dest
)
250 m_assembler
.orl_rr(src
, dest
);
253 void or32(TrustedImm32 imm
, RegisterID dest
)
255 m_assembler
.orl_ir(imm
.m_value
, dest
);
258 void or32(RegisterID src
, Address dest
)
260 m_assembler
.orl_rm(src
, dest
.offset
, dest
.base
);
263 void or32(Address src
, RegisterID dest
)
265 m_assembler
.orl_mr(src
.offset
, src
.base
, dest
);
268 void or32(TrustedImm32 imm
, Address address
)
270 m_assembler
.orl_im(imm
.m_value
, address
.offset
, address
.base
);
273 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
276 zeroExtend32ToPtr(op1
, dest
);
277 else if (op1
== dest
)
285 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
291 void rshift32(RegisterID shift_amount
, RegisterID dest
)
293 ASSERT(shift_amount
!= dest
);
295 if (shift_amount
== X86Registers::ecx
)
296 m_assembler
.sarl_CLr(dest
);
298 // On x86 we can only shift by ecx; if asked to shift by another register we'll
299 // need rejig the shift amount into ecx first, and restore the registers afterwards.
300 // If we dest is ecx, then shift the swapped register!
301 swap(shift_amount
, X86Registers::ecx
);
302 m_assembler
.sarl_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
303 swap(shift_amount
, X86Registers::ecx
);
307 void rshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
309 ASSERT(shift_amount
!= dest
);
313 rshift32(shift_amount
, dest
);
316 void rshift32(TrustedImm32 imm
, RegisterID dest
)
318 m_assembler
.sarl_i8r(imm
.m_value
, dest
);
321 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
328 void urshift32(RegisterID shift_amount
, RegisterID dest
)
330 ASSERT(shift_amount
!= dest
);
332 if (shift_amount
== X86Registers::ecx
)
333 m_assembler
.shrl_CLr(dest
);
335 // On x86 we can only shift by ecx; if asked to shift by another register we'll
336 // need rejig the shift amount into ecx first, and restore the registers afterwards.
337 // If we dest is ecx, then shift the swapped register!
338 swap(shift_amount
, X86Registers::ecx
);
339 m_assembler
.shrl_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
340 swap(shift_amount
, X86Registers::ecx
);
344 void urshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
346 ASSERT(shift_amount
!= dest
);
350 urshift32(shift_amount
, dest
);
353 void urshift32(TrustedImm32 imm
, RegisterID dest
)
355 m_assembler
.shrl_i8r(imm
.m_value
, dest
);
358 void urshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
362 urshift32(imm
, dest
);
365 void sub32(RegisterID src
, RegisterID dest
)
367 m_assembler
.subl_rr(src
, dest
);
370 void sub32(TrustedImm32 imm
, RegisterID dest
)
372 if (imm
.m_value
== 1)
373 m_assembler
.dec_r(dest
);
375 m_assembler
.subl_ir(imm
.m_value
, dest
);
378 void sub32(TrustedImm32 imm
, Address address
)
380 m_assembler
.subl_im(imm
.m_value
, address
.offset
, address
.base
);
383 void sub32(Address src
, RegisterID dest
)
385 m_assembler
.subl_mr(src
.offset
, src
.base
, dest
);
388 void sub32(RegisterID src
, Address dest
)
390 m_assembler
.subl_rm(src
, dest
.offset
, dest
.base
);
393 void xor32(RegisterID src
, RegisterID dest
)
395 m_assembler
.xorl_rr(src
, dest
);
398 void xor32(TrustedImm32 imm
, Address dest
)
400 if (imm
.m_value
== -1)
401 m_assembler
.notl_m(dest
.offset
, dest
.base
);
403 m_assembler
.xorl_im(imm
.m_value
, dest
.offset
, dest
.base
);
406 void xor32(TrustedImm32 imm
, RegisterID dest
)
408 if (imm
.m_value
== -1)
409 m_assembler
.notl_r(dest
);
411 m_assembler
.xorl_ir(imm
.m_value
, dest
);
414 void xor32(RegisterID src
, Address dest
)
416 m_assembler
.xorl_rm(src
, dest
.offset
, dest
.base
);
419 void xor32(Address src
, RegisterID dest
)
421 m_assembler
.xorl_mr(src
.offset
, src
.base
, dest
);
424 void xor32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
427 move(TrustedImm32(0), dest
);
428 else if (op1
== dest
)
436 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
442 void sqrtDouble(FPRegisterID src
, FPRegisterID dst
)
444 m_assembler
.sqrtsd_rr(src
, dst
);
447 void absDouble(FPRegisterID src
, FPRegisterID dst
)
450 static const double negativeZeroConstant
= -0.0;
451 loadDouble(TrustedImmPtr(&negativeZeroConstant
), dst
);
452 m_assembler
.andnpd_rr(src
, dst
);
455 void negateDouble(FPRegisterID src
, FPRegisterID dst
)
458 static const double negativeZeroConstant
= -0.0;
459 loadDouble(TrustedImmPtr(&negativeZeroConstant
), dst
);
460 m_assembler
.xorpd_rr(src
, dst
);
464 // Memory access operations:
466 // Loads are of the form load(address, destination) and stores of the form
467 // store(source, address). The source for a store may be an TrustedImm32. Address
468 // operand objects to loads and store will be implicitly constructed if a
469 // register is passed.
471 void load32(ImplicitAddress address
, RegisterID dest
)
473 m_assembler
.movl_mr(address
.offset
, address
.base
, dest
);
476 void load32(BaseIndex address
, RegisterID dest
)
478 m_assembler
.movl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
481 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
483 load32(address
, dest
);
486 void load16Unaligned(BaseIndex address
, RegisterID dest
)
488 load16(address
, dest
);
491 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
494 m_assembler
.movl_mr_disp32(address
.offset
, address
.base
, dest
);
495 return DataLabel32(this);
498 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
501 m_assembler
.movl_mr_disp8(address
.offset
, address
.base
, dest
);
502 return DataLabelCompact(this);
505 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact
, int32_t value
)
507 ASSERT(isCompactPtrAlignedAddressOffset(value
));
508 AssemblerType_T::repatchCompact(dataLabelCompact
.dataLocation(), value
);
511 DataLabelCompact
loadCompactWithAddressOffsetPatch(Address address
, RegisterID dest
)
514 m_assembler
.movl_mr_disp8(address
.offset
, address
.base
, dest
);
515 return DataLabelCompact(this);
518 void load8(BaseIndex address
, RegisterID dest
)
520 m_assembler
.movzbl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
523 void load8(ImplicitAddress address
, RegisterID dest
)
525 m_assembler
.movzbl_mr(address
.offset
, address
.base
, dest
);
528 void load8Signed(BaseIndex address
, RegisterID dest
)
530 m_assembler
.movsbl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
533 void load8Signed(ImplicitAddress address
, RegisterID dest
)
535 m_assembler
.movsbl_mr(address
.offset
, address
.base
, dest
);
538 void load16(BaseIndex address
, RegisterID dest
)
540 m_assembler
.movzwl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
543 void load16(Address address
, RegisterID dest
)
545 m_assembler
.movzwl_mr(address
.offset
, address
.base
, dest
);
548 void load16Signed(BaseIndex address
, RegisterID dest
)
550 m_assembler
.movswl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
553 void load16Signed(Address address
, RegisterID dest
)
555 m_assembler
.movswl_mr(address
.offset
, address
.base
, dest
);
558 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
561 m_assembler
.movl_rm_disp32(src
, address
.offset
, address
.base
);
562 return DataLabel32(this);
565 void store32(RegisterID src
, ImplicitAddress address
)
567 m_assembler
.movl_rm(src
, address
.offset
, address
.base
);
570 void store32(RegisterID src
, BaseIndex address
)
572 m_assembler
.movl_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
575 void store32(TrustedImm32 imm
, ImplicitAddress address
)
577 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
);
580 void store32(TrustedImm32 imm
, BaseIndex address
)
582 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
585 void store8(TrustedImm32 imm
, Address address
)
587 ASSERT(-128 <= imm
.m_value
&& imm
.m_value
< 128);
588 m_assembler
.movb_i8m(imm
.m_value
, address
.offset
, address
.base
);
591 void store8(TrustedImm32 imm
, BaseIndex address
)
593 ASSERT(-128 <= imm
.m_value
&& imm
.m_value
< 128);
594 m_assembler
.movb_i8m(imm
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
597 static ALWAYS_INLINE RegisterID
getUnusedRegister(BaseIndex address
)
599 if (address
.base
!= X86Registers::eax
&& address
.index
!= X86Registers::eax
)
600 return X86Registers::eax
;
602 if (address
.base
!= X86Registers::ebx
&& address
.index
!= X86Registers::ebx
)
603 return X86Registers::ebx
;
605 ASSERT(address
.base
!= X86Registers::ecx
&& address
.index
!= X86Registers::ecx
);
606 return X86Registers::ecx
;
609 static ALWAYS_INLINE RegisterID
getUnusedRegister(Address address
)
611 if (address
.base
!= X86Registers::eax
)
612 return X86Registers::eax
;
614 ASSERT(address
.base
!= X86Registers::edx
);
615 return X86Registers::edx
;
618 void store8(RegisterID src
, BaseIndex address
)
621 // On 32-bit x86 we can only store from the first 4 registers;
622 // esp..edi are mapped to the 'h' registers!
624 // Pick a temporary register.
625 RegisterID temp
= getUnusedRegister(address
);
627 // Swap to the temporary register to perform the store.
629 m_assembler
.movb_rm(temp
, address
.offset
, address
.base
, address
.index
, address
.scale
);
634 m_assembler
.movb_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
637 void store8(RegisterID src
, Address address
)
640 // On 32-bit x86 we can only store from the first 4 registers;
641 // esp..edi are mapped to the 'h' registers!
643 // Pick a temporary register.
644 RegisterID temp
= getUnusedRegister(address
);
646 // Swap to the temporary register to perform the store.
648 m_assembler
.movb_rm(temp
, address
.offset
, address
.base
);
653 m_assembler
.movb_rm(src
, address
.offset
, address
.base
);
656 void store16(RegisterID src
, BaseIndex address
)
659 // On 32-bit x86 we can only store from the first 4 registers;
660 // esp..edi are mapped to the 'h' registers!
662 // Pick a temporary register.
663 RegisterID temp
= getUnusedRegister(address
);
665 // Swap to the temporary register to perform the store.
667 m_assembler
.movw_rm(temp
, address
.offset
, address
.base
, address
.index
, address
.scale
);
672 m_assembler
.movw_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
676 // Floating-point operation:
678 // Presently only supports SSE, not x87 floating point.
680 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
682 ASSERT(isSSE2Present());
684 m_assembler
.movsd_rr(src
, dest
);
687 void loadDouble(TrustedImmPtr address
, FPRegisterID dest
)
690 ASSERT(isSSE2Present());
691 m_assembler
.movsd_mr(address
.m_value
, dest
);
693 move(address
, scratchRegister
);
694 loadDouble(scratchRegister
, dest
);
698 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
700 ASSERT(isSSE2Present());
701 m_assembler
.movsd_mr(address
.offset
, address
.base
, dest
);
704 void loadDouble(BaseIndex address
, FPRegisterID dest
)
706 ASSERT(isSSE2Present());
707 m_assembler
.movsd_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
709 void loadFloat(BaseIndex address
, FPRegisterID dest
)
711 ASSERT(isSSE2Present());
712 m_assembler
.movss_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
715 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
717 ASSERT(isSSE2Present());
718 m_assembler
.movsd_rm(src
, address
.offset
, address
.base
);
721 void storeDouble(FPRegisterID src
, BaseIndex address
)
723 ASSERT(isSSE2Present());
724 m_assembler
.movsd_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
727 void storeFloat(FPRegisterID src
, BaseIndex address
)
729 ASSERT(isSSE2Present());
730 m_assembler
.movss_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
733 void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dst
)
735 ASSERT(isSSE2Present());
736 m_assembler
.cvtsd2ss_rr(src
, dst
);
739 void convertFloatToDouble(FPRegisterID src
, FPRegisterID dst
)
741 ASSERT(isSSE2Present());
742 m_assembler
.cvtss2sd_rr(src
, dst
);
745 void addDouble(FPRegisterID src
, FPRegisterID dest
)
747 ASSERT(isSSE2Present());
748 m_assembler
.addsd_rr(src
, dest
);
751 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
753 ASSERT(isSSE2Present());
755 addDouble(op2
, dest
);
757 moveDouble(op2
, dest
);
758 addDouble(op1
, dest
);
762 void addDouble(Address src
, FPRegisterID dest
)
764 ASSERT(isSSE2Present());
765 m_assembler
.addsd_mr(src
.offset
, src
.base
, dest
);
768 void divDouble(FPRegisterID src
, FPRegisterID dest
)
770 ASSERT(isSSE2Present());
771 m_assembler
.divsd_rr(src
, dest
);
774 void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
776 // B := A / B is invalid.
777 ASSERT(op1
== dest
|| op2
!= dest
);
779 moveDouble(op1
, dest
);
780 divDouble(op2
, dest
);
783 void divDouble(Address src
, FPRegisterID dest
)
785 ASSERT(isSSE2Present());
786 m_assembler
.divsd_mr(src
.offset
, src
.base
, dest
);
789 void subDouble(FPRegisterID src
, FPRegisterID dest
)
791 ASSERT(isSSE2Present());
792 m_assembler
.subsd_rr(src
, dest
);
795 void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
797 // B := A - B is invalid.
798 ASSERT(op1
== dest
|| op2
!= dest
);
800 moveDouble(op1
, dest
);
801 subDouble(op2
, dest
);
804 void subDouble(Address src
, FPRegisterID dest
)
806 ASSERT(isSSE2Present());
807 m_assembler
.subsd_mr(src
.offset
, src
.base
, dest
);
810 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
812 ASSERT(isSSE2Present());
813 m_assembler
.mulsd_rr(src
, dest
);
816 void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
818 ASSERT(isSSE2Present());
820 mulDouble(op2
, dest
);
822 moveDouble(op2
, dest
);
823 mulDouble(op1
, dest
);
827 void mulDouble(Address src
, FPRegisterID dest
)
829 ASSERT(isSSE2Present());
830 m_assembler
.mulsd_mr(src
.offset
, src
.base
, dest
);
833 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
835 ASSERT(isSSE2Present());
836 m_assembler
.cvtsi2sd_rr(src
, dest
);
839 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
841 ASSERT(isSSE2Present());
842 m_assembler
.cvtsi2sd_mr(src
.offset
, src
.base
, dest
);
845 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
847 ASSERT(isSSE2Present());
849 if (cond
& DoubleConditionBitInvert
)
850 m_assembler
.ucomisd_rr(left
, right
);
852 m_assembler
.ucomisd_rr(right
, left
);
854 if (cond
== DoubleEqual
) {
856 return Jump(m_assembler
.jnp());
857 Jump
isUnordered(m_assembler
.jp());
858 Jump result
= Jump(m_assembler
.je());
859 isUnordered
.link(this);
861 } else if (cond
== DoubleNotEqualOrUnordered
) {
863 return Jump(m_assembler
.jp());
864 Jump
isUnordered(m_assembler
.jp());
865 Jump
isEqual(m_assembler
.je());
866 isUnordered
.link(this);
867 Jump result
= jump();
872 ASSERT(!(cond
& DoubleConditionBitSpecial
));
873 return Jump(m_assembler
.jCC(static_cast<X86Assembler::Condition
>(cond
& ~DoubleConditionBits
)));
876 // Truncates 'src' to an integer, and places the resulting 'dest'.
877 // If the result is not representable as a 32 bit value, branch.
878 // May also branch for some values that are representable in 32 bits
879 // (specifically, in this case, INT_MIN).
880 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
881 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
883 ASSERT(isSSE2Present());
884 m_assembler
.cvttsd2si_rr(src
, dest
);
885 return branch32(branchType
? NotEqual
: Equal
, dest
, TrustedImm32(0x80000000));
888 void truncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
890 ASSERT(isSSE2Present());
891 m_assembler
.cvttsd2si_rr(src
, dest
);
895 void truncateDoubleToUint32(FPRegisterID src
, RegisterID dest
)
897 ASSERT(isSSE2Present());
898 m_assembler
.cvttsd2siq_rr(src
, dest
);
902 // Convert 'src' to an integer, and places the resulting 'dest'.
903 // If the result is not representable as a 32 bit value, branch.
904 // May also branch for some values that are representable in 32 bits
905 // (specifically, in this case, 0).
906 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
, bool negZeroCheck
= true)
908 ASSERT(isSSE2Present());
909 m_assembler
.cvttsd2si_rr(src
, dest
);
911 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
913 failureCases
.append(branchTest32(Zero
, dest
));
915 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
916 convertInt32ToDouble(dest
, fpTemp
);
917 m_assembler
.ucomisd_rr(fpTemp
, src
);
918 failureCases
.append(m_assembler
.jp());
919 failureCases
.append(m_assembler
.jne());
922 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
924 ASSERT(isSSE2Present());
925 m_assembler
.xorpd_rr(scratch
, scratch
);
926 return branchDouble(DoubleNotEqual
, reg
, scratch
);
929 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
931 ASSERT(isSSE2Present());
932 m_assembler
.xorpd_rr(scratch
, scratch
);
933 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
936 void lshiftPacked(TrustedImm32 imm
, XMMRegisterID reg
)
938 ASSERT(isSSE2Present());
939 m_assembler
.psllq_i8r(imm
.m_value
, reg
);
942 void rshiftPacked(TrustedImm32 imm
, XMMRegisterID reg
)
944 ASSERT(isSSE2Present());
945 m_assembler
.psrlq_i8r(imm
.m_value
, reg
);
948 void orPacked(XMMRegisterID src
, XMMRegisterID dst
)
950 ASSERT(isSSE2Present());
951 m_assembler
.por_rr(src
, dst
);
954 void moveInt32ToPacked(RegisterID src
, XMMRegisterID dst
)
956 ASSERT(isSSE2Present());
957 m_assembler
.movd_rr(src
, dst
);
960 void movePackedToInt32(XMMRegisterID src
, RegisterID dst
)
962 ASSERT(isSSE2Present());
963 m_assembler
.movd_rr(src
, dst
);
966 // Stack manipulation operations:
968 // The ABI is assumed to provide a stack abstraction to memory,
969 // containing machine word sized units of data. Push and pop
970 // operations add and remove a single register sized unit of data
971 // to or from the stack. Peek and poke operations read or write
972 // values on the stack, without moving the current stack position.
974 void pop(RegisterID dest
)
976 m_assembler
.pop_r(dest
);
979 void push(RegisterID src
)
981 m_assembler
.push_r(src
);
984 void push(Address address
)
986 m_assembler
.push_m(address
.offset
, address
.base
);
989 void push(TrustedImm32 imm
)
991 m_assembler
.push_i32(imm
.m_value
);
995 // Register move operations:
997 // Move values in registers.
999 void move(TrustedImm32 imm
, RegisterID dest
)
1001 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
1002 // may be useful to have a separate version that sign extends the value?
1004 m_assembler
.xorl_rr(dest
, dest
);
1006 m_assembler
.movl_i32r(imm
.m_value
, dest
);
1010 void move(RegisterID src
, RegisterID dest
)
1012 // Note: on 64-bit this is is a full register move; perhaps it would be
1013 // useful to have separate move32 & movePtr, with move32 zero extending?
1015 m_assembler
.movq_rr(src
, dest
);
1018 void move(TrustedImmPtr imm
, RegisterID dest
)
1020 m_assembler
.movq_i64r(imm
.asIntptr(), dest
);
1023 void move(TrustedImm64 imm
, RegisterID dest
)
1025 m_assembler
.movq_i64r(imm
.m_value
, dest
);
1028 void swap(RegisterID reg1
, RegisterID reg2
)
1031 m_assembler
.xchgq_rr(reg1
, reg2
);
1034 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1036 m_assembler
.movsxd_rr(src
, dest
);
1039 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1041 m_assembler
.movl_rr(src
, dest
);
1044 void move(RegisterID src
, RegisterID dest
)
1047 m_assembler
.movl_rr(src
, dest
);
1050 void move(TrustedImmPtr imm
, RegisterID dest
)
1052 m_assembler
.movl_i32r(imm
.asIntptr(), dest
);
1055 void swap(RegisterID reg1
, RegisterID reg2
)
1058 m_assembler
.xchgl_rr(reg1
, reg2
);
1061 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1066 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1073 // Forwards / external control flow operations:
1075 // This set of jump and conditional branch operations return a Jump
1076 // object which may linked at a later point, allow forwards jump,
1077 // or jumps that will require external linkage (after the code has been
1080 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1081 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1082 // used (representing the names 'below' and 'above').
1084 // Operands to the comparision are provided in the expected order, e.g.
1085 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1086 // treated as a signed 32bit value, is less than or equal to 5.
1088 // jz and jnz test whether the first operand is equal to zero, and take
1089 // an optional second operand of a mask under which to perform the test.
1092 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1094 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
);
1095 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1098 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1100 m_assembler
.cmpl_rr(right
, left
);
1101 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1104 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1106 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1107 m_assembler
.testl_rr(left
, left
);
1109 m_assembler
.cmpl_ir(right
.m_value
, left
);
1110 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1113 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1115 m_assembler
.cmpl_mr(right
.offset
, right
.base
, left
);
1116 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1119 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1121 m_assembler
.cmpl_rm(right
, left
.offset
, left
.base
);
1122 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1125 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1127 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
);
1128 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1131 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1133 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
1134 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1137 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1139 return branch32(cond
, left
, right
);
1142 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1144 m_assembler
.testl_rr(reg
, mask
);
1145 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1148 void test32(ResultCondition
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1150 if (mask
.m_value
== -1)
1151 m_assembler
.testl_rr(reg
, reg
);
1152 else if (!(mask
.m_value
& ~0xff) && reg
< X86Registers::esp
) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
1153 if (mask
.m_value
== 0xff)
1154 m_assembler
.testb_rr(reg
, reg
);
1156 m_assembler
.testb_i8r(mask
.m_value
, reg
);
1158 m_assembler
.testl_i32r(mask
.m_value
, reg
);
1161 Jump
branch(ResultCondition cond
)
1163 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1166 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1168 test32(cond
, reg
, mask
);
1169 return branch(cond
);
1172 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1174 generateTest32(address
, mask
);
1175 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1178 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1180 if (mask
.m_value
== -1)
1181 m_assembler
.cmpl_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
1183 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
1184 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1187 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1189 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1190 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
1191 if (mask
.m_value
== -1)
1192 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
1194 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
1195 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1198 Jump
branchTest8(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1200 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1201 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
1202 if (mask
.m_value
== -1)
1203 m_assembler
.cmpb_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
1205 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
1206 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1209 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1211 ASSERT(!(right
.m_value
& 0xFFFFFF00));
1213 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
1214 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1219 return Jump(m_assembler
.jmp());
1222 void jump(RegisterID target
)
1224 m_assembler
.jmp_r(target
);
1227 // Address is a memory location containing the address to jump to
1228 void jump(Address address
)
1230 m_assembler
.jmp_m(address
.offset
, address
.base
);
1234 // Arithmetic control flow operations:
1236 // This set of conditional branch operations branch based
1237 // on the result of an arithmetic operation. The operation
1238 // is performed as normal, storing the result.
1240 // * jz operations branch if the result is zero.
1241 // * jo operations branch if the (signed) arithmetic
1242 // operation caused an overflow to occur.
1244 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1247 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1250 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1253 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1256 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 src
, Address dest
)
1259 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1262 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, Address dest
)
1265 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1268 Jump
branchAdd32(ResultCondition cond
, Address src
, RegisterID dest
)
1271 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1274 Jump
branchAdd32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1277 return branchAdd32(cond
, src2
, dest
);
1279 return branchAdd32(cond
, src1
, dest
);
1282 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
1285 return branchAdd32(cond
, imm
, dest
);
1288 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1291 if (cond
!= Overflow
)
1292 m_assembler
.testl_rr(dest
, dest
);
1293 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1296 Jump
branchMul32(ResultCondition cond
, Address src
, RegisterID dest
)
1299 if (cond
!= Overflow
)
1300 m_assembler
.testl_rr(dest
, dest
);
1301 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1304 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
1306 mul32(imm
, src
, dest
);
1307 if (cond
!= Overflow
)
1308 m_assembler
.testl_rr(dest
, dest
);
1309 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1312 Jump
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1315 return branchMul32(cond
, src2
, dest
);
1317 return branchMul32(cond
, src1
, dest
);
1320 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1323 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1326 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1329 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1332 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, Address dest
)
1335 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1338 Jump
branchSub32(ResultCondition cond
, RegisterID src
, Address dest
)
1341 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1344 Jump
branchSub32(ResultCondition cond
, Address src
, RegisterID dest
)
1347 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1350 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1352 // B := A - B is invalid.
1353 ASSERT(src1
== dest
|| src2
!= dest
);
1356 return branchSub32(cond
, src2
, dest
);
1359 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, TrustedImm32 src2
, RegisterID dest
)
1362 return branchSub32(cond
, src2
, dest
);
1365 Jump
branchNeg32(ResultCondition cond
, RegisterID srcDest
)
1368 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1371 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1374 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1378 // Miscellaneous operations:
1387 return Call(m_assembler
.call(), Call::LinkableNear
);
1390 Call
call(RegisterID target
)
1392 return Call(m_assembler
.call(target
), Call::None
);
1395 void call(Address address
)
1397 m_assembler
.call_m(address
.offset
, address
.base
);
1405 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
1407 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
);
1408 set32(x86Condition(cond
), dest
);
1411 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1413 m_assembler
.cmpl_rr(right
, left
);
1414 set32(x86Condition(cond
), dest
);
1417 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
1419 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1420 m_assembler
.testl_rr(left
, left
);
1422 m_assembler
.cmpl_ir(right
.m_value
, left
);
1423 set32(x86Condition(cond
), dest
);
1427 // The mask should be optional... perhaps the argument order should be
1428 // dest-src, operations always have a dest? ... possibly not true, considering
1429 // asm ops like test, or pseudo ops like pop().
1431 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1433 if (mask
.m_value
== -1)
1434 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
1436 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
1437 set32(x86Condition(cond
), dest
);
1440 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1442 generateTest32(address
, mask
);
1443 set32(x86Condition(cond
), dest
);
1446 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1447 static RelationalCondition
invert(RelationalCondition cond
)
1449 return static_cast<RelationalCondition
>(cond
^ 1);
1459 m_assembler
.mfence();
1462 static void replaceWithJump(CodeLocationLabel instructionStart
, CodeLocationLabel destination
)
1464 X86Assembler::replaceWithJump(instructionStart
.executableAddress(), destination
.executableAddress());
1467 static ptrdiff_t maxJumpReplacementSize()
1469 return X86Assembler::maxJumpReplacementSize();
1474 #define DECLARE_REGISTER(_type, _regName) \
1476 FOR_EACH_CPU_REGISTER(DECLARE_REGISTER
)
1477 #undef DECLARE_REGISTER
1480 struct ProbeContext
;
1481 typedef void (*ProbeFunction
)(struct ProbeContext
*);
1483 struct ProbeContext
{
1484 ProbeFunction probeFunction
;
1489 void dump(const char* indentation
= 0);
1491 void dumpCPURegisters(const char* indentation
);
1493 #endif // USE(MASM_PROBE)
1496 X86Assembler::Condition
x86Condition(RelationalCondition cond
)
1498 return static_cast<X86Assembler::Condition
>(cond
);
1501 X86Assembler::Condition
x86Condition(ResultCondition cond
)
1503 return static_cast<X86Assembler::Condition
>(cond
);
1506 void set32(X86Assembler::Condition cond
, RegisterID dest
)
1509 // On 32-bit x86 we can only set the first 4 registers;
1510 // esp..edi are mapped to the 'h' registers!
1512 m_assembler
.xchgl_rr(dest
, X86Registers::eax
);
1513 m_assembler
.setCC_r(cond
, X86Registers::eax
);
1514 m_assembler
.movzbl_rr(X86Registers::eax
, X86Registers::eax
);
1515 m_assembler
.xchgl_rr(dest
, X86Registers::eax
);
1519 m_assembler
.setCC_r(cond
, dest
);
1520 m_assembler
.movzbl_rr(dest
, dest
);
1524 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1525 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1526 friend class MacroAssemblerX86
;
1528 ALWAYS_INLINE
void generateTest32(Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1530 if (mask
.m_value
== -1)
1531 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
1532 else if (!(mask
.m_value
& ~0xff))
1533 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
1534 else if (!(mask
.m_value
& ~0xff00))
1535 m_assembler
.testb_im(mask
.m_value
>> 8, address
.offset
+ 1, address
.base
);
1536 else if (!(mask
.m_value
& ~0xff0000))
1537 m_assembler
.testb_im(mask
.m_value
>> 16, address
.offset
+ 2, address
.base
);
1538 else if (!(mask
.m_value
& ~0xff000000))
1539 m_assembler
.testb_im(mask
.m_value
>> 24, address
.offset
+ 3, address
.base
);
1541 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
1547 // All X86 Macs are guaranteed to support at least SSE2,
1548 static bool isSSE2Present()
1553 #else // OS(MAC_OS_X)
1555 enum SSE2CheckState
{
1561 static bool isSSE2Present()
1563 if (s_sse2CheckState
== NotCheckedSSE2
) {
1564 // Default the flags value to zero; if the compiler is
1565 // not MSVC or GCC we will read this as SSE2 not present.
1569 mov eax
, 1 // cpuid function 1 gives us the standard feature set
1582 : "%eax", "%ecx", "%edx"
1585 static const int SSE2FeatureBit
= 1 << 26;
1586 s_sse2CheckState
= (flags
& SSE2FeatureBit
) ? HasSSE2
: NoSSE2
;
1589 ASSERT(s_sse2CheckState
!= NotCheckedSSE2
);
1591 return s_sse2CheckState
== HasSSE2
;
1594 static SSE2CheckState s_sse2CheckState
;
1596 #endif // OS(MAC_OS_X)
1597 #elif !defined(NDEBUG) // CPU(X86)
1599 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1600 // but non debug add this method to keep the asserts above happy.
1601 static bool isSSE2Present()
1611 #endif // ENABLE(ASSEMBLER)
1613 #endif // MacroAssemblerX86Common_h