2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
36 class MacroAssemblerX86Common
: public AbstractMacroAssembler
<X86Assembler
> {
39 static const X86Registers::RegisterID scratchRegister
= X86Registers::r11
;
42 static const int DoubleConditionBitInvert
= 0x10;
43 static const int DoubleConditionBitSpecial
= 0x20;
44 static const int DoubleConditionBits
= DoubleConditionBitInvert
| DoubleConditionBitSpecial
;
47 typedef X86Assembler::FPRegisterID FPRegisterID
;
48 typedef X86Assembler::XMMRegisterID XMMRegisterID
;
50 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value
)
52 return value
>= -128 && value
<= 127;
55 enum RelationalCondition
{
56 Equal
= X86Assembler::ConditionE
,
57 NotEqual
= X86Assembler::ConditionNE
,
58 Above
= X86Assembler::ConditionA
,
59 AboveOrEqual
= X86Assembler::ConditionAE
,
60 Below
= X86Assembler::ConditionB
,
61 BelowOrEqual
= X86Assembler::ConditionBE
,
62 GreaterThan
= X86Assembler::ConditionG
,
63 GreaterThanOrEqual
= X86Assembler::ConditionGE
,
64 LessThan
= X86Assembler::ConditionL
,
65 LessThanOrEqual
= X86Assembler::ConditionLE
68 enum ResultCondition
{
69 Overflow
= X86Assembler::ConditionO
,
70 Signed
= X86Assembler::ConditionS
,
71 PositiveOrZero
= X86Assembler::ConditionNS
,
72 Zero
= X86Assembler::ConditionE
,
73 NonZero
= X86Assembler::ConditionNE
76 enum DoubleCondition
{
77 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
78 DoubleEqual
= X86Assembler::ConditionE
| DoubleConditionBitSpecial
,
79 DoubleNotEqual
= X86Assembler::ConditionNE
,
80 DoubleGreaterThan
= X86Assembler::ConditionA
,
81 DoubleGreaterThanOrEqual
= X86Assembler::ConditionAE
,
82 DoubleLessThan
= X86Assembler::ConditionA
| DoubleConditionBitInvert
,
83 DoubleLessThanOrEqual
= X86Assembler::ConditionAE
| DoubleConditionBitInvert
,
84 // If either operand is NaN, these conditions always evaluate to true.
85 DoubleEqualOrUnordered
= X86Assembler::ConditionE
,
86 DoubleNotEqualOrUnordered
= X86Assembler::ConditionNE
| DoubleConditionBitSpecial
,
87 DoubleGreaterThanOrUnordered
= X86Assembler::ConditionB
| DoubleConditionBitInvert
,
88 DoubleGreaterThanOrEqualOrUnordered
= X86Assembler::ConditionBE
| DoubleConditionBitInvert
,
89 DoubleLessThanOrUnordered
= X86Assembler::ConditionB
,
90 DoubleLessThanOrEqualOrUnordered
= X86Assembler::ConditionBE
,
93 !((X86Assembler::ConditionE
| X86Assembler::ConditionNE
| X86Assembler::ConditionA
| X86Assembler::ConditionAE
| X86Assembler::ConditionB
| X86Assembler::ConditionBE
) & DoubleConditionBits
),
94 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes
);
96 static const RegisterID stackPointerRegister
= X86Registers::esp
;
98 #if ENABLE(JIT_CONSTANT_BLINDING)
99 static bool shouldBlindForSpecificArch(uint32_t value
) { return value
>= 0x00ffffff; }
101 static bool shouldBlindForSpecificArch(uint64_t value
) { return value
>= 0x00ffffff; }
102 #if OS(DARWIN) // On 64-bit systems other than DARWIN uint64_t and uintptr_t are the same type so overload is prohibited.
103 static bool shouldBlindForSpecificArch(uintptr_t value
) { return value
>= 0x00ffffff; }
108 // Integer arithmetic operations:
110 // Operations are typically two operand - operation(source, srcDst)
111 // For many operations the source may be an TrustedImm32, the srcDst operand
112 // may often be a memory location (explictly described using an Address
115 void add32(RegisterID src
, RegisterID dest
)
117 m_assembler
.addl_rr(src
, dest
);
120 void add32(TrustedImm32 imm
, Address address
)
122 m_assembler
.addl_im(imm
.m_value
, address
.offset
, address
.base
);
125 void add32(TrustedImm32 imm
, RegisterID dest
)
127 m_assembler
.addl_ir(imm
.m_value
, dest
);
130 void add32(Address src
, RegisterID dest
)
132 m_assembler
.addl_mr(src
.offset
, src
.base
, dest
);
135 void add32(RegisterID src
, Address dest
)
137 m_assembler
.addl_rm(src
, dest
.offset
, dest
.base
);
140 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
142 m_assembler
.leal_mr(imm
.m_value
, src
, dest
);
145 void and32(RegisterID src
, RegisterID dest
)
147 m_assembler
.andl_rr(src
, dest
);
150 void and32(TrustedImm32 imm
, RegisterID dest
)
152 m_assembler
.andl_ir(imm
.m_value
, dest
);
155 void and32(RegisterID src
, Address dest
)
157 m_assembler
.andl_rm(src
, dest
.offset
, dest
.base
);
160 void and32(Address src
, RegisterID dest
)
162 m_assembler
.andl_mr(src
.offset
, src
.base
, dest
);
165 void and32(TrustedImm32 imm
, Address address
)
167 m_assembler
.andl_im(imm
.m_value
, address
.offset
, address
.base
);
170 void and32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
173 zeroExtend32ToPtr(op1
, dest
);
174 else if (op1
== dest
)
182 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
188 void lshift32(RegisterID shift_amount
, RegisterID dest
)
190 ASSERT(shift_amount
!= dest
);
192 if (shift_amount
== X86Registers::ecx
)
193 m_assembler
.shll_CLr(dest
);
195 // On x86 we can only shift by ecx; if asked to shift by another register we'll
196 // need rejig the shift amount into ecx first, and restore the registers afterwards.
197 // If we dest is ecx, then shift the swapped register!
198 swap(shift_amount
, X86Registers::ecx
);
199 m_assembler
.shll_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
200 swap(shift_amount
, X86Registers::ecx
);
204 void lshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
206 ASSERT(shift_amount
!= dest
);
210 lshift32(shift_amount
, dest
);
213 void lshift32(TrustedImm32 imm
, RegisterID dest
)
215 m_assembler
.shll_i8r(imm
.m_value
, dest
);
218 void lshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
225 void mul32(RegisterID src
, RegisterID dest
)
227 m_assembler
.imull_rr(src
, dest
);
230 void mul32(Address src
, RegisterID dest
)
232 m_assembler
.imull_mr(src
.offset
, src
.base
, dest
);
235 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
237 m_assembler
.imull_i32r(src
, imm
.m_value
, dest
);
240 void neg32(RegisterID srcDest
)
242 m_assembler
.negl_r(srcDest
);
245 void neg32(Address srcDest
)
247 m_assembler
.negl_m(srcDest
.offset
, srcDest
.base
);
250 void or32(RegisterID src
, RegisterID dest
)
252 m_assembler
.orl_rr(src
, dest
);
255 void or32(TrustedImm32 imm
, RegisterID dest
)
257 m_assembler
.orl_ir(imm
.m_value
, dest
);
260 void or32(RegisterID src
, Address dest
)
262 m_assembler
.orl_rm(src
, dest
.offset
, dest
.base
);
265 void or32(Address src
, RegisterID dest
)
267 m_assembler
.orl_mr(src
.offset
, src
.base
, dest
);
270 void or32(TrustedImm32 imm
, Address address
)
272 m_assembler
.orl_im(imm
.m_value
, address
.offset
, address
.base
);
275 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
278 zeroExtend32ToPtr(op1
, dest
);
279 else if (op1
== dest
)
287 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
293 void rshift32(RegisterID shift_amount
, RegisterID dest
)
295 ASSERT(shift_amount
!= dest
);
297 if (shift_amount
== X86Registers::ecx
)
298 m_assembler
.sarl_CLr(dest
);
300 // On x86 we can only shift by ecx; if asked to shift by another register we'll
301 // need rejig the shift amount into ecx first, and restore the registers afterwards.
302 // If we dest is ecx, then shift the swapped register!
303 swap(shift_amount
, X86Registers::ecx
);
304 m_assembler
.sarl_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
305 swap(shift_amount
, X86Registers::ecx
);
309 void rshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
311 ASSERT(shift_amount
!= dest
);
315 rshift32(shift_amount
, dest
);
318 void rshift32(TrustedImm32 imm
, RegisterID dest
)
320 m_assembler
.sarl_i8r(imm
.m_value
, dest
);
323 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
330 void urshift32(RegisterID shift_amount
, RegisterID dest
)
332 ASSERT(shift_amount
!= dest
);
334 if (shift_amount
== X86Registers::ecx
)
335 m_assembler
.shrl_CLr(dest
);
337 // On x86 we can only shift by ecx; if asked to shift by another register we'll
338 // need rejig the shift amount into ecx first, and restore the registers afterwards.
339 // If we dest is ecx, then shift the swapped register!
340 swap(shift_amount
, X86Registers::ecx
);
341 m_assembler
.shrl_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
342 swap(shift_amount
, X86Registers::ecx
);
346 void urshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
348 ASSERT(shift_amount
!= dest
);
352 urshift32(shift_amount
, dest
);
355 void urshift32(TrustedImm32 imm
, RegisterID dest
)
357 m_assembler
.shrl_i8r(imm
.m_value
, dest
);
360 void urshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
364 urshift32(imm
, dest
);
367 void sub32(RegisterID src
, RegisterID dest
)
369 m_assembler
.subl_rr(src
, dest
);
372 void sub32(TrustedImm32 imm
, RegisterID dest
)
374 m_assembler
.subl_ir(imm
.m_value
, dest
);
377 void sub32(TrustedImm32 imm
, Address address
)
379 m_assembler
.subl_im(imm
.m_value
, address
.offset
, address
.base
);
382 void sub32(Address src
, RegisterID dest
)
384 m_assembler
.subl_mr(src
.offset
, src
.base
, dest
);
387 void sub32(RegisterID src
, Address dest
)
389 m_assembler
.subl_rm(src
, dest
.offset
, dest
.base
);
392 void xor32(RegisterID src
, RegisterID dest
)
394 m_assembler
.xorl_rr(src
, dest
);
397 void xor32(TrustedImm32 imm
, Address dest
)
399 if (imm
.m_value
== -1)
400 m_assembler
.notl_m(dest
.offset
, dest
.base
);
402 m_assembler
.xorl_im(imm
.m_value
, dest
.offset
, dest
.base
);
405 void xor32(TrustedImm32 imm
, RegisterID dest
)
407 if (imm
.m_value
== -1)
408 m_assembler
.notl_r(dest
);
410 m_assembler
.xorl_ir(imm
.m_value
, dest
);
413 void xor32(RegisterID src
, Address dest
)
415 m_assembler
.xorl_rm(src
, dest
.offset
, dest
.base
);
418 void xor32(Address src
, RegisterID dest
)
420 m_assembler
.xorl_mr(src
.offset
, src
.base
, dest
);
423 void xor32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
426 move(TrustedImm32(0), dest
);
427 else if (op1
== dest
)
435 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
441 void sqrtDouble(FPRegisterID src
, FPRegisterID dst
)
443 m_assembler
.sqrtsd_rr(src
, dst
);
446 void absDouble(FPRegisterID src
, FPRegisterID dst
)
449 static const double negativeZeroConstant
= -0.0;
450 loadDouble(&negativeZeroConstant
, dst
);
451 m_assembler
.andnpd_rr(src
, dst
);
454 void negateDouble(FPRegisterID src
, FPRegisterID dst
)
457 static const double negativeZeroConstant
= -0.0;
458 loadDouble(&negativeZeroConstant
, dst
);
459 m_assembler
.xorpd_rr(src
, dst
);
463 // Memory access operations:
465 // Loads are of the form load(address, destination) and stores of the form
466 // store(source, address). The source for a store may be an TrustedImm32. Address
467 // operand objects to loads and store will be implicitly constructed if a
468 // register is passed.
470 void load32(ImplicitAddress address
, RegisterID dest
)
472 m_assembler
.movl_mr(address
.offset
, address
.base
, dest
);
475 void load32(BaseIndex address
, RegisterID dest
)
477 m_assembler
.movl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
480 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
482 load32(address
, dest
);
485 void load16Unaligned(BaseIndex address
, RegisterID dest
)
487 load16(address
, dest
);
490 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
493 m_assembler
.movl_mr_disp32(address
.offset
, address
.base
, dest
);
494 return DataLabel32(this);
497 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
500 m_assembler
.movl_mr_disp8(address
.offset
, address
.base
, dest
);
501 return DataLabelCompact(this);
504 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact
, int32_t value
)
506 ASSERT(isCompactPtrAlignedAddressOffset(value
));
507 AssemblerType_T::repatchCompact(dataLabelCompact
.dataLocation(), value
);
510 DataLabelCompact
loadCompactWithAddressOffsetPatch(Address address
, RegisterID dest
)
513 m_assembler
.movl_mr_disp8(address
.offset
, address
.base
, dest
);
514 return DataLabelCompact(this);
517 void load8(BaseIndex address
, RegisterID dest
)
519 m_assembler
.movzbl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
522 void load8(ImplicitAddress address
, RegisterID dest
)
524 m_assembler
.movzbl_mr(address
.offset
, address
.base
, dest
);
527 void load8Signed(BaseIndex address
, RegisterID dest
)
529 m_assembler
.movsbl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
532 void load8Signed(ImplicitAddress address
, RegisterID dest
)
534 m_assembler
.movsbl_mr(address
.offset
, address
.base
, dest
);
537 void load16(BaseIndex address
, RegisterID dest
)
539 m_assembler
.movzwl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
542 void load16(Address address
, RegisterID dest
)
544 m_assembler
.movzwl_mr(address
.offset
, address
.base
, dest
);
547 void load16Signed(BaseIndex address
, RegisterID dest
)
549 m_assembler
.movswl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
552 void load16Signed(Address address
, RegisterID dest
)
554 m_assembler
.movswl_mr(address
.offset
, address
.base
, dest
);
557 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
560 m_assembler
.movl_rm_disp32(src
, address
.offset
, address
.base
);
561 return DataLabel32(this);
564 void store32(RegisterID src
, ImplicitAddress address
)
566 m_assembler
.movl_rm(src
, address
.offset
, address
.base
);
569 void store32(RegisterID src
, BaseIndex address
)
571 m_assembler
.movl_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
574 void store32(TrustedImm32 imm
, ImplicitAddress address
)
576 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
);
579 void store32(TrustedImm32 imm
, BaseIndex address
)
581 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
584 void store8(TrustedImm32 imm
, Address address
)
586 ASSERT(-128 <= imm
.m_value
&& imm
.m_value
< 128);
587 m_assembler
.movb_i8m(imm
.m_value
, address
.offset
, address
.base
);
590 void store8(TrustedImm32 imm
, BaseIndex address
)
592 ASSERT(-128 <= imm
.m_value
&& imm
.m_value
< 128);
593 m_assembler
.movb_i8m(imm
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
596 void store8(RegisterID src
, BaseIndex address
)
599 // On 32-bit x86 we can only store from the first 4 registers;
600 // esp..edi are mapped to the 'h' registers!
602 // Pick a temporary register.
604 if (address
.base
!= X86Registers::eax
&& address
.index
!= X86Registers::eax
)
605 temp
= X86Registers::eax
;
606 else if (address
.base
!= X86Registers::ebx
&& address
.index
!= X86Registers::ebx
)
607 temp
= X86Registers::ebx
;
609 ASSERT(address
.base
!= X86Registers::ecx
&& address
.index
!= X86Registers::ecx
);
610 temp
= X86Registers::ecx
;
613 // Swap to the temporary register to perform the store.
615 m_assembler
.movb_rm(temp
, address
.offset
, address
.base
, address
.index
, address
.scale
);
620 m_assembler
.movb_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
623 void store16(RegisterID src
, BaseIndex address
)
626 // On 32-bit x86 we can only store from the first 4 registers;
627 // esp..edi are mapped to the 'h' registers!
629 // Pick a temporary register.
631 if (address
.base
!= X86Registers::eax
&& address
.index
!= X86Registers::eax
)
632 temp
= X86Registers::eax
;
633 else if (address
.base
!= X86Registers::ebx
&& address
.index
!= X86Registers::ebx
)
634 temp
= X86Registers::ebx
;
636 ASSERT(address
.base
!= X86Registers::ecx
&& address
.index
!= X86Registers::ecx
);
637 temp
= X86Registers::ecx
;
640 // Swap to the temporary register to perform the store.
642 m_assembler
.movw_rm(temp
, address
.offset
, address
.base
, address
.index
, address
.scale
);
647 m_assembler
.movw_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
651 // Floating-point operation:
653 // Presently only supports SSE, not x87 floating point.
655 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
657 ASSERT(isSSE2Present());
659 m_assembler
.movsd_rr(src
, dest
);
662 void loadDouble(const void* address
, FPRegisterID dest
)
665 ASSERT(isSSE2Present());
666 m_assembler
.movsd_mr(address
, dest
);
668 move(TrustedImmPtr(address
), scratchRegister
);
669 loadDouble(scratchRegister
, dest
);
673 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
675 ASSERT(isSSE2Present());
676 m_assembler
.movsd_mr(address
.offset
, address
.base
, dest
);
679 void loadDouble(BaseIndex address
, FPRegisterID dest
)
681 ASSERT(isSSE2Present());
682 m_assembler
.movsd_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
684 void loadFloat(BaseIndex address
, FPRegisterID dest
)
686 ASSERT(isSSE2Present());
687 m_assembler
.movss_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
690 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
692 ASSERT(isSSE2Present());
693 m_assembler
.movsd_rm(src
, address
.offset
, address
.base
);
696 void storeDouble(FPRegisterID src
, BaseIndex address
)
698 ASSERT(isSSE2Present());
699 m_assembler
.movsd_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
702 void storeFloat(FPRegisterID src
, BaseIndex address
)
704 ASSERT(isSSE2Present());
705 m_assembler
.movss_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
708 void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dst
)
710 ASSERT(isSSE2Present());
711 m_assembler
.cvtsd2ss_rr(src
, dst
);
714 void convertFloatToDouble(FPRegisterID src
, FPRegisterID dst
)
716 ASSERT(isSSE2Present());
717 m_assembler
.cvtss2sd_rr(src
, dst
);
720 void addDouble(FPRegisterID src
, FPRegisterID dest
)
722 ASSERT(isSSE2Present());
723 m_assembler
.addsd_rr(src
, dest
);
726 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
728 ASSERT(isSSE2Present());
730 addDouble(op2
, dest
);
732 moveDouble(op2
, dest
);
733 addDouble(op1
, dest
);
737 void addDouble(Address src
, FPRegisterID dest
)
739 ASSERT(isSSE2Present());
740 m_assembler
.addsd_mr(src
.offset
, src
.base
, dest
);
743 void divDouble(FPRegisterID src
, FPRegisterID dest
)
745 ASSERT(isSSE2Present());
746 m_assembler
.divsd_rr(src
, dest
);
749 void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
751 // B := A / B is invalid.
752 ASSERT(op1
== dest
|| op2
!= dest
);
754 moveDouble(op1
, dest
);
755 divDouble(op2
, dest
);
758 void divDouble(Address src
, FPRegisterID dest
)
760 ASSERT(isSSE2Present());
761 m_assembler
.divsd_mr(src
.offset
, src
.base
, dest
);
764 void subDouble(FPRegisterID src
, FPRegisterID dest
)
766 ASSERT(isSSE2Present());
767 m_assembler
.subsd_rr(src
, dest
);
770 void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
772 // B := A - B is invalid.
773 ASSERT(op1
== dest
|| op2
!= dest
);
775 moveDouble(op1
, dest
);
776 subDouble(op2
, dest
);
779 void subDouble(Address src
, FPRegisterID dest
)
781 ASSERT(isSSE2Present());
782 m_assembler
.subsd_mr(src
.offset
, src
.base
, dest
);
785 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
787 ASSERT(isSSE2Present());
788 m_assembler
.mulsd_rr(src
, dest
);
791 void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
793 ASSERT(isSSE2Present());
795 mulDouble(op2
, dest
);
797 moveDouble(op2
, dest
);
798 mulDouble(op1
, dest
);
802 void mulDouble(Address src
, FPRegisterID dest
)
804 ASSERT(isSSE2Present());
805 m_assembler
.mulsd_mr(src
.offset
, src
.base
, dest
);
808 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
810 ASSERT(isSSE2Present());
811 m_assembler
.cvtsi2sd_rr(src
, dest
);
814 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
816 ASSERT(isSSE2Present());
817 m_assembler
.cvtsi2sd_mr(src
.offset
, src
.base
, dest
);
820 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
822 ASSERT(isSSE2Present());
824 if (cond
& DoubleConditionBitInvert
)
825 m_assembler
.ucomisd_rr(left
, right
);
827 m_assembler
.ucomisd_rr(right
, left
);
829 if (cond
== DoubleEqual
) {
831 return Jump(m_assembler
.jnp());
832 Jump
isUnordered(m_assembler
.jp());
833 Jump result
= Jump(m_assembler
.je());
834 isUnordered
.link(this);
836 } else if (cond
== DoubleNotEqualOrUnordered
) {
838 return Jump(m_assembler
.jp());
839 Jump
isUnordered(m_assembler
.jp());
840 Jump
isEqual(m_assembler
.je());
841 isUnordered
.link(this);
842 Jump result
= jump();
847 ASSERT(!(cond
& DoubleConditionBitSpecial
));
848 return Jump(m_assembler
.jCC(static_cast<X86Assembler::Condition
>(cond
& ~DoubleConditionBits
)));
851 // Truncates 'src' to an integer, and places the resulting 'dest'.
852 // If the result is not representable as a 32 bit value, branch.
853 // May also branch for some values that are representable in 32 bits
854 // (specifically, in this case, INT_MIN).
855 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
856 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
858 ASSERT(isSSE2Present());
859 m_assembler
.cvttsd2si_rr(src
, dest
);
860 return branch32(branchType
? NotEqual
: Equal
, dest
, TrustedImm32(0x80000000));
863 Jump
branchTruncateDoubleToUint32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
865 ASSERT(isSSE2Present());
866 m_assembler
.cvttsd2si_rr(src
, dest
);
867 return branch32(branchType
? GreaterThanOrEqual
: LessThan
, dest
, TrustedImm32(0));
870 void truncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
872 ASSERT(isSSE2Present());
873 m_assembler
.cvttsd2si_rr(src
, dest
);
877 void truncateDoubleToUint32(FPRegisterID src
, RegisterID dest
)
879 ASSERT(isSSE2Present());
880 m_assembler
.cvttsd2siq_rr(src
, dest
);
884 // Convert 'src' to an integer, and places the resulting 'dest'.
885 // If the result is not representable as a 32 bit value, branch.
886 // May also branch for some values that are representable in 32 bits
887 // (specifically, in this case, 0).
888 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
, bool negZeroCheck
= true)
890 ASSERT(isSSE2Present());
891 m_assembler
.cvttsd2si_rr(src
, dest
);
893 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
895 failureCases
.append(branchTest32(Zero
, dest
));
897 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
898 convertInt32ToDouble(dest
, fpTemp
);
899 m_assembler
.ucomisd_rr(fpTemp
, src
);
900 failureCases
.append(m_assembler
.jp());
901 failureCases
.append(m_assembler
.jne());
904 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
906 ASSERT(isSSE2Present());
907 m_assembler
.xorpd_rr(scratch
, scratch
);
908 return branchDouble(DoubleNotEqual
, reg
, scratch
);
911 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
913 ASSERT(isSSE2Present());
914 m_assembler
.xorpd_rr(scratch
, scratch
);
915 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
918 void lshiftPacked(TrustedImm32 imm
, XMMRegisterID reg
)
920 ASSERT(isSSE2Present());
921 m_assembler
.psllq_i8r(imm
.m_value
, reg
);
924 void rshiftPacked(TrustedImm32 imm
, XMMRegisterID reg
)
926 ASSERT(isSSE2Present());
927 m_assembler
.psrlq_i8r(imm
.m_value
, reg
);
930 void orPacked(XMMRegisterID src
, XMMRegisterID dst
)
932 ASSERT(isSSE2Present());
933 m_assembler
.por_rr(src
, dst
);
936 void moveInt32ToPacked(RegisterID src
, XMMRegisterID dst
)
938 ASSERT(isSSE2Present());
939 m_assembler
.movd_rr(src
, dst
);
942 void movePackedToInt32(XMMRegisterID src
, RegisterID dst
)
944 ASSERT(isSSE2Present());
945 m_assembler
.movd_rr(src
, dst
);
948 // Stack manipulation operations:
950 // The ABI is assumed to provide a stack abstraction to memory,
951 // containing machine word sized units of data. Push and pop
952 // operations add and remove a single register sized unit of data
953 // to or from the stack. Peek and poke operations read or write
954 // values on the stack, without moving the current stack position.
956 void pop(RegisterID dest
)
958 m_assembler
.pop_r(dest
);
961 void push(RegisterID src
)
963 m_assembler
.push_r(src
);
966 void push(Address address
)
968 m_assembler
.push_m(address
.offset
, address
.base
);
971 void push(TrustedImm32 imm
)
973 m_assembler
.push_i32(imm
.m_value
);
977 // Register move operations:
979 // Move values in registers.
981 void move(TrustedImm32 imm
, RegisterID dest
)
983 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
984 // may be useful to have a separate version that sign extends the value?
986 m_assembler
.xorl_rr(dest
, dest
);
988 m_assembler
.movl_i32r(imm
.m_value
, dest
);
992 void move(RegisterID src
, RegisterID dest
)
994 // Note: on 64-bit this is is a full register move; perhaps it would be
995 // useful to have separate move32 & movePtr, with move32 zero extending?
997 m_assembler
.movq_rr(src
, dest
);
1000 void move(TrustedImmPtr imm
, RegisterID dest
)
1002 m_assembler
.movq_i64r(imm
.asIntptr(), dest
);
1005 void move(TrustedImm64 imm
, RegisterID dest
)
1007 m_assembler
.movq_i64r(imm
.m_value
, dest
);
1010 void swap(RegisterID reg1
, RegisterID reg2
)
1013 m_assembler
.xchgq_rr(reg1
, reg2
);
1016 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1018 m_assembler
.movsxd_rr(src
, dest
);
1021 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1023 m_assembler
.movl_rr(src
, dest
);
1026 void move(RegisterID src
, RegisterID dest
)
1029 m_assembler
.movl_rr(src
, dest
);
1032 void move(TrustedImmPtr imm
, RegisterID dest
)
1034 m_assembler
.movl_i32r(imm
.asIntptr(), dest
);
1037 void swap(RegisterID reg1
, RegisterID reg2
)
1040 m_assembler
.xchgl_rr(reg1
, reg2
);
1043 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1048 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1055 // Forwards / external control flow operations:
1057 // This set of jump and conditional branch operations return a Jump
1058 // object which may linked at a later point, allow forwards jump,
1059 // or jumps that will require external linkage (after the code has been
1062 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1063 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1064 // used (representing the names 'below' and 'above').
1066 // Operands to the comparision are provided in the expected order, e.g.
1067 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1068 // treated as a signed 32bit value, is less than or equal to 5.
1070 // jz and jnz test whether the first operand is equal to zero, and take
1071 // an optional second operand of a mask under which to perform the test.
1074 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1076 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
);
1077 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1080 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1082 m_assembler
.cmpl_rr(right
, left
);
1083 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1086 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1088 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1089 m_assembler
.testl_rr(left
, left
);
1091 m_assembler
.cmpl_ir(right
.m_value
, left
);
1092 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1095 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1097 m_assembler
.cmpl_mr(right
.offset
, right
.base
, left
);
1098 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1101 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1103 m_assembler
.cmpl_rm(right
, left
.offset
, left
.base
);
1104 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1107 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1109 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
);
1110 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1113 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1115 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
1116 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1119 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1121 return branch32(cond
, left
, right
);
1124 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1126 m_assembler
.testl_rr(reg
, mask
);
1127 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1130 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1132 // if we are only interested in the low seven bits, this can be tested with a testb
1133 if (mask
.m_value
== -1)
1134 m_assembler
.testl_rr(reg
, reg
);
1136 m_assembler
.testl_i32r(mask
.m_value
, reg
);
1137 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1140 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1142 if (mask
.m_value
== -1)
1143 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
1145 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
1146 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1149 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1151 if (mask
.m_value
== -1)
1152 m_assembler
.cmpl_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
1154 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
1155 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1158 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1160 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1161 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
1162 if (mask
.m_value
== -1)
1163 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
1165 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
1166 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1169 Jump
branchTest8(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1171 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1172 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
1173 if (mask
.m_value
== -1)
1174 m_assembler
.cmpb_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
1176 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
1177 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1180 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1182 ASSERT(!(right
.m_value
& 0xFFFFFF00));
1184 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
1185 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1190 return Jump(m_assembler
.jmp());
1193 void jump(RegisterID target
)
1195 m_assembler
.jmp_r(target
);
1198 // Address is a memory location containing the address to jump to
1199 void jump(Address address
)
1201 m_assembler
.jmp_m(address
.offset
, address
.base
);
1205 // Arithmetic control flow operations:
1207 // This set of conditional branch operations branch based
1208 // on the result of an arithmetic operation. The operation
1209 // is performed as normal, storing the result.
1211 // * jz operations branch if the result is zero.
1212 // * jo operations branch if the (signed) arithmetic
1213 // operation caused an overflow to occur.
1215 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1218 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1221 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1224 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1227 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 src
, Address dest
)
1230 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1233 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, Address dest
)
1236 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1239 Jump
branchAdd32(ResultCondition cond
, Address src
, RegisterID dest
)
1242 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1245 Jump
branchAdd32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1248 return branchAdd32(cond
, src2
, dest
);
1250 return branchAdd32(cond
, src1
, dest
);
1253 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
1256 return branchAdd32(cond
, imm
, dest
);
1259 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1262 if (cond
!= Overflow
)
1263 m_assembler
.testl_rr(dest
, dest
);
1264 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1267 Jump
branchMul32(ResultCondition cond
, Address src
, RegisterID dest
)
1270 if (cond
!= Overflow
)
1271 m_assembler
.testl_rr(dest
, dest
);
1272 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1275 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
1277 mul32(imm
, src
, dest
);
1278 if (cond
!= Overflow
)
1279 m_assembler
.testl_rr(dest
, dest
);
1280 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1283 Jump
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1286 return branchMul32(cond
, src2
, dest
);
1288 return branchMul32(cond
, src1
, dest
);
1291 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1294 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1297 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1300 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1303 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, Address dest
)
1306 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1309 Jump
branchSub32(ResultCondition cond
, RegisterID src
, Address dest
)
1312 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1315 Jump
branchSub32(ResultCondition cond
, Address src
, RegisterID dest
)
1318 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1321 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1323 // B := A - B is invalid.
1324 ASSERT(src1
== dest
|| src2
!= dest
);
1327 return branchSub32(cond
, src2
, dest
);
1330 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, TrustedImm32 src2
, RegisterID dest
)
1333 return branchSub32(cond
, src2
, dest
);
1336 Jump
branchNeg32(ResultCondition cond
, RegisterID srcDest
)
1339 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1342 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1345 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1349 // Miscellaneous operations:
1358 return Call(m_assembler
.call(), Call::LinkableNear
);
1361 Call
call(RegisterID target
)
1363 return Call(m_assembler
.call(target
), Call::None
);
1366 void call(Address address
)
1368 m_assembler
.call_m(address
.offset
, address
.base
);
1376 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
1378 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
);
1379 set32(x86Condition(cond
), dest
);
1382 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1384 m_assembler
.cmpl_rr(right
, left
);
1385 set32(x86Condition(cond
), dest
);
1388 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
1390 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1391 m_assembler
.testl_rr(left
, left
);
1393 m_assembler
.cmpl_ir(right
.m_value
, left
);
1394 set32(x86Condition(cond
), dest
);
1398 // The mask should be optional... perhaps the argument order should be
1399 // dest-src, operations always have a dest? ... possibly not true, considering
1400 // asm ops like test, or pseudo ops like pop().
1402 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1404 if (mask
.m_value
== -1)
1405 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
1407 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
1408 set32(x86Condition(cond
), dest
);
1411 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1413 if (mask
.m_value
== -1)
1414 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
1416 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
1417 set32(x86Condition(cond
), dest
);
1420 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1421 static RelationalCondition
invert(RelationalCondition cond
)
1423 return static_cast<RelationalCondition
>(cond
^ 1);
1431 static void replaceWithJump(CodeLocationLabel instructionStart
, CodeLocationLabel destination
)
1433 X86Assembler::replaceWithJump(instructionStart
.executableAddress(), destination
.executableAddress());
1436 static ptrdiff_t maxJumpReplacementSize()
1438 return X86Assembler::maxJumpReplacementSize();
1442 X86Assembler::Condition
x86Condition(RelationalCondition cond
)
1444 return static_cast<X86Assembler::Condition
>(cond
);
1447 X86Assembler::Condition
x86Condition(ResultCondition cond
)
1449 return static_cast<X86Assembler::Condition
>(cond
);
1452 void set32(X86Assembler::Condition cond
, RegisterID dest
)
1455 // On 32-bit x86 we can only set the first 4 registers;
1456 // esp..edi are mapped to the 'h' registers!
1458 m_assembler
.xchgl_rr(dest
, X86Registers::eax
);
1459 m_assembler
.setCC_r(cond
, X86Registers::eax
);
1460 m_assembler
.movzbl_rr(X86Registers::eax
, X86Registers::eax
);
1461 m_assembler
.xchgl_rr(dest
, X86Registers::eax
);
1465 m_assembler
.setCC_r(cond
, dest
);
1466 m_assembler
.movzbl_rr(dest
, dest
);
1470 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1471 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1472 friend class MacroAssemblerX86
;
1477 // All X86 Macs are guaranteed to support at least SSE2,
1478 static bool isSSE2Present()
1483 #else // OS(MAC_OS_X)
1485 enum SSE2CheckState
{
1491 static bool isSSE2Present()
1493 if (s_sse2CheckState
== NotCheckedSSE2
) {
1494 // Default the flags value to zero; if the compiler is
1495 // not MSVC or GCC we will read this as SSE2 not present.
1499 mov eax
, 1 // cpuid function 1 gives us the standard feature set
1512 : "%eax", "%ecx", "%edx"
1515 static const int SSE2FeatureBit
= 1 << 26;
1516 s_sse2CheckState
= (flags
& SSE2FeatureBit
) ? HasSSE2
: NoSSE2
;
1519 ASSERT(s_sse2CheckState
!= NotCheckedSSE2
);
1521 return s_sse2CheckState
== HasSSE2
;
1524 static SSE2CheckState s_sse2CheckState
;
1526 #endif // OS(MAC_OS_X)
1527 #elif !defined(NDEBUG) // CPU(X86)
1529 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1530 // but non debug add this method to keep the asserts above happy.
1531 static bool isSSE2Present()
1541 #endif // ENABLE(ASSEMBLER)
1543 #endif // MacroAssemblerX86Common_h