2 * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
36 class MacroAssemblerX86Common
: public AbstractMacroAssembler
<X86Assembler
, MacroAssemblerX86Common
> {
39 static const X86Registers::RegisterID scratchRegister
= X86Registers::r11
;
43 static const int DoubleConditionBitInvert
= 0x10;
44 static const int DoubleConditionBitSpecial
= 0x20;
45 static const int DoubleConditionBits
= DoubleConditionBitInvert
| DoubleConditionBitSpecial
;
48 typedef X86Assembler::XMMRegisterID XMMRegisterID
;
50 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value
)
52 return value
>= -128 && value
<= 127;
55 enum RelationalCondition
{
56 Equal
= X86Assembler::ConditionE
,
57 NotEqual
= X86Assembler::ConditionNE
,
58 Above
= X86Assembler::ConditionA
,
59 AboveOrEqual
= X86Assembler::ConditionAE
,
60 Below
= X86Assembler::ConditionB
,
61 BelowOrEqual
= X86Assembler::ConditionBE
,
62 GreaterThan
= X86Assembler::ConditionG
,
63 GreaterThanOrEqual
= X86Assembler::ConditionGE
,
64 LessThan
= X86Assembler::ConditionL
,
65 LessThanOrEqual
= X86Assembler::ConditionLE
68 enum ResultCondition
{
69 Overflow
= X86Assembler::ConditionO
,
70 Signed
= X86Assembler::ConditionS
,
71 PositiveOrZero
= X86Assembler::ConditionNS
,
72 Zero
= X86Assembler::ConditionE
,
73 NonZero
= X86Assembler::ConditionNE
76 enum DoubleCondition
{
77 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
78 DoubleEqual
= X86Assembler::ConditionE
| DoubleConditionBitSpecial
,
79 DoubleNotEqual
= X86Assembler::ConditionNE
,
80 DoubleGreaterThan
= X86Assembler::ConditionA
,
81 DoubleGreaterThanOrEqual
= X86Assembler::ConditionAE
,
82 DoubleLessThan
= X86Assembler::ConditionA
| DoubleConditionBitInvert
,
83 DoubleLessThanOrEqual
= X86Assembler::ConditionAE
| DoubleConditionBitInvert
,
84 // If either operand is NaN, these conditions always evaluate to true.
85 DoubleEqualOrUnordered
= X86Assembler::ConditionE
,
86 DoubleNotEqualOrUnordered
= X86Assembler::ConditionNE
| DoubleConditionBitSpecial
,
87 DoubleGreaterThanOrUnordered
= X86Assembler::ConditionB
| DoubleConditionBitInvert
,
88 DoubleGreaterThanOrEqualOrUnordered
= X86Assembler::ConditionBE
| DoubleConditionBitInvert
,
89 DoubleLessThanOrUnordered
= X86Assembler::ConditionB
,
90 DoubleLessThanOrEqualOrUnordered
= X86Assembler::ConditionBE
,
93 !((X86Assembler::ConditionE
| X86Assembler::ConditionNE
| X86Assembler::ConditionA
| X86Assembler::ConditionAE
| X86Assembler::ConditionB
| X86Assembler::ConditionBE
) & DoubleConditionBits
),
94 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes
);
96 static const RegisterID stackPointerRegister
= X86Registers::esp
;
97 static const RegisterID framePointerRegister
= X86Registers::ebp
;
99 static bool canBlind() { return true; }
100 static bool shouldBlindForSpecificArch(uint32_t value
) { return value
>= 0x00ffffff; }
101 static bool shouldBlindForSpecificArch(uint64_t value
) { return value
>= 0x00ffffff; }
103 // Integer arithmetic operations:
105 // Operations are typically two operand - operation(source, srcDst)
106 // For many operations the source may be an TrustedImm32, the srcDst operand
107 // may often be a memory location (explictly described using an Address
110 void add32(RegisterID src
, RegisterID dest
)
112 m_assembler
.addl_rr(src
, dest
);
115 void add32(TrustedImm32 imm
, Address address
)
117 m_assembler
.addl_im(imm
.m_value
, address
.offset
, address
.base
);
120 void add32(TrustedImm32 imm
, RegisterID dest
)
122 if (imm
.m_value
== 1)
123 m_assembler
.inc_r(dest
);
125 m_assembler
.addl_ir(imm
.m_value
, dest
);
128 void add32(Address src
, RegisterID dest
)
130 m_assembler
.addl_mr(src
.offset
, src
.base
, dest
);
133 void add32(RegisterID src
, Address dest
)
135 m_assembler
.addl_rm(src
, dest
.offset
, dest
.base
);
138 void add32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
140 m_assembler
.leal_mr(imm
.m_value
, src
, dest
);
143 void and32(RegisterID src
, RegisterID dest
)
145 m_assembler
.andl_rr(src
, dest
);
148 void and32(TrustedImm32 imm
, RegisterID dest
)
150 m_assembler
.andl_ir(imm
.m_value
, dest
);
153 void and32(RegisterID src
, Address dest
)
155 m_assembler
.andl_rm(src
, dest
.offset
, dest
.base
);
158 void and32(Address src
, RegisterID dest
)
160 m_assembler
.andl_mr(src
.offset
, src
.base
, dest
);
163 void and32(TrustedImm32 imm
, Address address
)
165 m_assembler
.andl_im(imm
.m_value
, address
.offset
, address
.base
);
168 void and32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
171 zeroExtend32ToPtr(op1
, dest
);
172 else if (op1
== dest
)
180 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
186 void countLeadingZeros32(RegisterID src
, RegisterID dst
)
188 m_assembler
.bsr_rr(src
, dst
);
189 Jump srcIsNonZero
= m_assembler
.jCC(x86Condition(NonZero
));
190 move(TrustedImm32(32), dst
);
192 Jump skipNonZeroCase
= jump();
193 srcIsNonZero
.link(this);
194 xor32(TrustedImm32(0x1f), dst
);
195 skipNonZeroCase
.link(this);
198 void lshift32(RegisterID shift_amount
, RegisterID dest
)
200 ASSERT(shift_amount
!= dest
);
202 if (shift_amount
== X86Registers::ecx
)
203 m_assembler
.shll_CLr(dest
);
205 // On x86 we can only shift by ecx; if asked to shift by another register we'll
206 // need rejig the shift amount into ecx first, and restore the registers afterwards.
207 // If we dest is ecx, then shift the swapped register!
208 swap(shift_amount
, X86Registers::ecx
);
209 m_assembler
.shll_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
210 swap(shift_amount
, X86Registers::ecx
);
214 void lshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
216 ASSERT(shift_amount
!= dest
);
220 lshift32(shift_amount
, dest
);
223 void lshift32(TrustedImm32 imm
, RegisterID dest
)
225 m_assembler
.shll_i8r(imm
.m_value
, dest
);
228 void lshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
235 void mul32(RegisterID src
, RegisterID dest
)
237 m_assembler
.imull_rr(src
, dest
);
240 void mul32(Address src
, RegisterID dest
)
242 m_assembler
.imull_mr(src
.offset
, src
.base
, dest
);
245 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
247 m_assembler
.imull_i32r(src
, imm
.m_value
, dest
);
250 void neg32(RegisterID srcDest
)
252 m_assembler
.negl_r(srcDest
);
255 void neg32(Address srcDest
)
257 m_assembler
.negl_m(srcDest
.offset
, srcDest
.base
);
260 void or32(RegisterID src
, RegisterID dest
)
262 m_assembler
.orl_rr(src
, dest
);
265 void or32(TrustedImm32 imm
, RegisterID dest
)
267 m_assembler
.orl_ir(imm
.m_value
, dest
);
270 void or32(RegisterID src
, Address dest
)
272 m_assembler
.orl_rm(src
, dest
.offset
, dest
.base
);
275 void or32(Address src
, RegisterID dest
)
277 m_assembler
.orl_mr(src
.offset
, src
.base
, dest
);
280 void or32(TrustedImm32 imm
, Address address
)
282 m_assembler
.orl_im(imm
.m_value
, address
.offset
, address
.base
);
285 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
288 zeroExtend32ToPtr(op1
, dest
);
289 else if (op1
== dest
)
297 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
303 void rshift32(RegisterID shift_amount
, RegisterID dest
)
305 ASSERT(shift_amount
!= dest
);
307 if (shift_amount
== X86Registers::ecx
)
308 m_assembler
.sarl_CLr(dest
);
310 // On x86 we can only shift by ecx; if asked to shift by another register we'll
311 // need rejig the shift amount into ecx first, and restore the registers afterwards.
312 // If we dest is ecx, then shift the swapped register!
313 swap(shift_amount
, X86Registers::ecx
);
314 m_assembler
.sarl_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
315 swap(shift_amount
, X86Registers::ecx
);
319 void rshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
321 ASSERT(shift_amount
!= dest
);
325 rshift32(shift_amount
, dest
);
328 void rshift32(TrustedImm32 imm
, RegisterID dest
)
330 m_assembler
.sarl_i8r(imm
.m_value
, dest
);
333 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
340 void urshift32(RegisterID shift_amount
, RegisterID dest
)
342 ASSERT(shift_amount
!= dest
);
344 if (shift_amount
== X86Registers::ecx
)
345 m_assembler
.shrl_CLr(dest
);
347 // On x86 we can only shift by ecx; if asked to shift by another register we'll
348 // need rejig the shift amount into ecx first, and restore the registers afterwards.
349 // If we dest is ecx, then shift the swapped register!
350 swap(shift_amount
, X86Registers::ecx
);
351 m_assembler
.shrl_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
352 swap(shift_amount
, X86Registers::ecx
);
356 void urshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
358 ASSERT(shift_amount
!= dest
);
362 urshift32(shift_amount
, dest
);
365 void urshift32(TrustedImm32 imm
, RegisterID dest
)
367 m_assembler
.shrl_i8r(imm
.m_value
, dest
);
370 void urshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
374 urshift32(imm
, dest
);
377 void sub32(RegisterID src
, RegisterID dest
)
379 m_assembler
.subl_rr(src
, dest
);
382 void sub32(TrustedImm32 imm
, RegisterID dest
)
384 if (imm
.m_value
== 1)
385 m_assembler
.dec_r(dest
);
387 m_assembler
.subl_ir(imm
.m_value
, dest
);
390 void sub32(TrustedImm32 imm
, Address address
)
392 m_assembler
.subl_im(imm
.m_value
, address
.offset
, address
.base
);
395 void sub32(Address src
, RegisterID dest
)
397 m_assembler
.subl_mr(src
.offset
, src
.base
, dest
);
400 void sub32(RegisterID src
, Address dest
)
402 m_assembler
.subl_rm(src
, dest
.offset
, dest
.base
);
405 void xor32(RegisterID src
, RegisterID dest
)
407 m_assembler
.xorl_rr(src
, dest
);
410 void xor32(TrustedImm32 imm
, Address dest
)
412 if (imm
.m_value
== -1)
413 m_assembler
.notl_m(dest
.offset
, dest
.base
);
415 m_assembler
.xorl_im(imm
.m_value
, dest
.offset
, dest
.base
);
418 void xor32(TrustedImm32 imm
, RegisterID dest
)
420 if (imm
.m_value
== -1)
421 m_assembler
.notl_r(dest
);
423 m_assembler
.xorl_ir(imm
.m_value
, dest
);
426 void xor32(RegisterID src
, Address dest
)
428 m_assembler
.xorl_rm(src
, dest
.offset
, dest
.base
);
431 void xor32(Address src
, RegisterID dest
)
433 m_assembler
.xorl_mr(src
.offset
, src
.base
, dest
);
436 void xor32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
439 move(TrustedImm32(0), dest
);
440 else if (op1
== dest
)
448 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
454 void sqrtDouble(FPRegisterID src
, FPRegisterID dst
)
456 m_assembler
.sqrtsd_rr(src
, dst
);
459 void absDouble(FPRegisterID src
, FPRegisterID dst
)
462 static const double negativeZeroConstant
= -0.0;
463 loadDouble(TrustedImmPtr(&negativeZeroConstant
), dst
);
464 m_assembler
.andnpd_rr(src
, dst
);
467 void negateDouble(FPRegisterID src
, FPRegisterID dst
)
470 static const double negativeZeroConstant
= -0.0;
471 loadDouble(TrustedImmPtr(&negativeZeroConstant
), dst
);
472 m_assembler
.xorpd_rr(src
, dst
);
476 // Memory access operations:
478 // Loads are of the form load(address, destination) and stores of the form
479 // store(source, address). The source for a store may be an TrustedImm32. Address
480 // operand objects to loads and store will be implicitly constructed if a
481 // register is passed.
483 void load32(ImplicitAddress address
, RegisterID dest
)
485 m_assembler
.movl_mr(address
.offset
, address
.base
, dest
);
488 void load32(BaseIndex address
, RegisterID dest
)
490 m_assembler
.movl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
493 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
495 load32(address
, dest
);
498 void load16Unaligned(BaseIndex address
, RegisterID dest
)
500 load16(address
, dest
);
503 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
506 m_assembler
.movl_mr_disp32(address
.offset
, address
.base
, dest
);
507 return DataLabel32(this);
510 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
513 m_assembler
.movl_mr_disp8(address
.offset
, address
.base
, dest
);
514 return DataLabelCompact(this);
517 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact
, int32_t value
)
519 ASSERT(isCompactPtrAlignedAddressOffset(value
));
520 AssemblerType_T::repatchCompact(dataLabelCompact
.dataLocation(), value
);
523 DataLabelCompact
loadCompactWithAddressOffsetPatch(Address address
, RegisterID dest
)
526 m_assembler
.movl_mr_disp8(address
.offset
, address
.base
, dest
);
527 return DataLabelCompact(this);
530 void load8(BaseIndex address
, RegisterID dest
)
532 m_assembler
.movzbl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
535 void load8(ImplicitAddress address
, RegisterID dest
)
537 m_assembler
.movzbl_mr(address
.offset
, address
.base
, dest
);
540 void load8SignedExtendTo32(BaseIndex address
, RegisterID dest
)
542 m_assembler
.movsbl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
545 void load8SignedExtendTo32(ImplicitAddress address
, RegisterID dest
)
547 m_assembler
.movsbl_mr(address
.offset
, address
.base
, dest
);
550 void load16(BaseIndex address
, RegisterID dest
)
552 m_assembler
.movzwl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
555 void load16(Address address
, RegisterID dest
)
557 m_assembler
.movzwl_mr(address
.offset
, address
.base
, dest
);
560 void load16SignedExtendTo32(BaseIndex address
, RegisterID dest
)
562 m_assembler
.movswl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
565 void load16SignedExtendTo32(Address address
, RegisterID dest
)
567 m_assembler
.movswl_mr(address
.offset
, address
.base
, dest
);
570 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
573 m_assembler
.movl_rm_disp32(src
, address
.offset
, address
.base
);
574 return DataLabel32(this);
577 void store32(RegisterID src
, ImplicitAddress address
)
579 m_assembler
.movl_rm(src
, address
.offset
, address
.base
);
582 void store32(RegisterID src
, BaseIndex address
)
584 m_assembler
.movl_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
587 void store32(TrustedImm32 imm
, ImplicitAddress address
)
589 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
);
592 void store32(TrustedImm32 imm
, BaseIndex address
)
594 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
597 void store8(TrustedImm32 imm
, Address address
)
599 ASSERT(-128 <= imm
.m_value
&& imm
.m_value
< 128);
600 m_assembler
.movb_i8m(imm
.m_value
, address
.offset
, address
.base
);
603 void store8(TrustedImm32 imm
, BaseIndex address
)
605 ASSERT(-128 <= imm
.m_value
&& imm
.m_value
< 128);
606 m_assembler
.movb_i8m(imm
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
609 static ALWAYS_INLINE RegisterID
getUnusedRegister(BaseIndex address
)
611 if (address
.base
!= X86Registers::eax
&& address
.index
!= X86Registers::eax
)
612 return X86Registers::eax
;
614 if (address
.base
!= X86Registers::ebx
&& address
.index
!= X86Registers::ebx
)
615 return X86Registers::ebx
;
617 ASSERT(address
.base
!= X86Registers::ecx
&& address
.index
!= X86Registers::ecx
);
618 return X86Registers::ecx
;
621 static ALWAYS_INLINE RegisterID
getUnusedRegister(Address address
)
623 if (address
.base
!= X86Registers::eax
)
624 return X86Registers::eax
;
626 ASSERT(address
.base
!= X86Registers::edx
);
627 return X86Registers::edx
;
630 void store8(RegisterID src
, BaseIndex address
)
633 // On 32-bit x86 we can only store from the first 4 registers;
634 // esp..edi are mapped to the 'h' registers!
636 // Pick a temporary register.
637 RegisterID temp
= getUnusedRegister(address
);
639 // Swap to the temporary register to perform the store.
641 m_assembler
.movb_rm(temp
, address
.offset
, address
.base
, address
.index
, address
.scale
);
646 m_assembler
.movb_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
649 void store8(RegisterID src
, Address address
)
652 // On 32-bit x86 we can only store from the first 4 registers;
653 // esp..edi are mapped to the 'h' registers!
655 // Pick a temporary register.
656 RegisterID temp
= getUnusedRegister(address
);
658 // Swap to the temporary register to perform the store.
660 m_assembler
.movb_rm(temp
, address
.offset
, address
.base
);
665 m_assembler
.movb_rm(src
, address
.offset
, address
.base
);
668 void store16(RegisterID src
, BaseIndex address
)
671 // On 32-bit x86 we can only store from the first 4 registers;
672 // esp..edi are mapped to the 'h' registers!
674 // Pick a temporary register.
675 RegisterID temp
= getUnusedRegister(address
);
677 // Swap to the temporary register to perform the store.
679 m_assembler
.movw_rm(temp
, address
.offset
, address
.base
, address
.index
, address
.scale
);
684 m_assembler
.movw_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
688 // Floating-point operation:
690 // Presently only supports SSE, not x87 floating point.
692 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
694 ASSERT(isSSE2Present());
696 m_assembler
.movsd_rr(src
, dest
);
699 void loadDouble(TrustedImmPtr address
, FPRegisterID dest
)
702 ASSERT(isSSE2Present());
703 m_assembler
.movsd_mr(address
.m_value
, dest
);
705 move(address
, scratchRegister
);
706 loadDouble(scratchRegister
, dest
);
710 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
712 ASSERT(isSSE2Present());
713 m_assembler
.movsd_mr(address
.offset
, address
.base
, dest
);
716 void loadDouble(BaseIndex address
, FPRegisterID dest
)
718 ASSERT(isSSE2Present());
719 m_assembler
.movsd_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
721 void loadFloat(BaseIndex address
, FPRegisterID dest
)
723 ASSERT(isSSE2Present());
724 m_assembler
.movss_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
727 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
729 ASSERT(isSSE2Present());
730 m_assembler
.movsd_rm(src
, address
.offset
, address
.base
);
733 void storeDouble(FPRegisterID src
, BaseIndex address
)
735 ASSERT(isSSE2Present());
736 m_assembler
.movsd_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
739 void storeFloat(FPRegisterID src
, BaseIndex address
)
741 ASSERT(isSSE2Present());
742 m_assembler
.movss_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
745 void convertDoubleToFloat(FPRegisterID src
, FPRegisterID dst
)
747 ASSERT(isSSE2Present());
748 m_assembler
.cvtsd2ss_rr(src
, dst
);
751 void convertFloatToDouble(FPRegisterID src
, FPRegisterID dst
)
753 ASSERT(isSSE2Present());
754 m_assembler
.cvtss2sd_rr(src
, dst
);
757 void addDouble(FPRegisterID src
, FPRegisterID dest
)
759 ASSERT(isSSE2Present());
760 m_assembler
.addsd_rr(src
, dest
);
763 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
765 ASSERT(isSSE2Present());
767 addDouble(op2
, dest
);
769 moveDouble(op2
, dest
);
770 addDouble(op1
, dest
);
774 void addDouble(Address src
, FPRegisterID dest
)
776 ASSERT(isSSE2Present());
777 m_assembler
.addsd_mr(src
.offset
, src
.base
, dest
);
780 void divDouble(FPRegisterID src
, FPRegisterID dest
)
782 ASSERT(isSSE2Present());
783 m_assembler
.divsd_rr(src
, dest
);
786 void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
788 // B := A / B is invalid.
789 ASSERT(op1
== dest
|| op2
!= dest
);
791 moveDouble(op1
, dest
);
792 divDouble(op2
, dest
);
795 void divDouble(Address src
, FPRegisterID dest
)
797 ASSERT(isSSE2Present());
798 m_assembler
.divsd_mr(src
.offset
, src
.base
, dest
);
801 void subDouble(FPRegisterID src
, FPRegisterID dest
)
803 ASSERT(isSSE2Present());
804 m_assembler
.subsd_rr(src
, dest
);
807 void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
809 // B := A - B is invalid.
810 ASSERT(op1
== dest
|| op2
!= dest
);
812 moveDouble(op1
, dest
);
813 subDouble(op2
, dest
);
816 void subDouble(Address src
, FPRegisterID dest
)
818 ASSERT(isSSE2Present());
819 m_assembler
.subsd_mr(src
.offset
, src
.base
, dest
);
822 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
824 ASSERT(isSSE2Present());
825 m_assembler
.mulsd_rr(src
, dest
);
828 void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
830 ASSERT(isSSE2Present());
832 mulDouble(op2
, dest
);
834 moveDouble(op2
, dest
);
835 mulDouble(op1
, dest
);
839 void mulDouble(Address src
, FPRegisterID dest
)
841 ASSERT(isSSE2Present());
842 m_assembler
.mulsd_mr(src
.offset
, src
.base
, dest
);
845 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
847 ASSERT(isSSE2Present());
848 m_assembler
.cvtsi2sd_rr(src
, dest
);
851 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
853 ASSERT(isSSE2Present());
854 m_assembler
.cvtsi2sd_mr(src
.offset
, src
.base
, dest
);
857 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
859 ASSERT(isSSE2Present());
861 if (cond
& DoubleConditionBitInvert
)
862 m_assembler
.ucomisd_rr(left
, right
);
864 m_assembler
.ucomisd_rr(right
, left
);
866 if (cond
== DoubleEqual
) {
868 return Jump(m_assembler
.jnp());
869 Jump
isUnordered(m_assembler
.jp());
870 Jump result
= Jump(m_assembler
.je());
871 isUnordered
.link(this);
873 } else if (cond
== DoubleNotEqualOrUnordered
) {
875 return Jump(m_assembler
.jp());
876 Jump
isUnordered(m_assembler
.jp());
877 Jump
isEqual(m_assembler
.je());
878 isUnordered
.link(this);
879 Jump result
= jump();
884 ASSERT(!(cond
& DoubleConditionBitSpecial
));
885 return Jump(m_assembler
.jCC(static_cast<X86Assembler::Condition
>(cond
& ~DoubleConditionBits
)));
888 // Truncates 'src' to an integer, and places the resulting 'dest'.
889 // If the result is not representable as a 32 bit value, branch.
890 // May also branch for some values that are representable in 32 bits
891 // (specifically, in this case, INT_MIN).
892 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
893 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
895 ASSERT(isSSE2Present());
896 m_assembler
.cvttsd2si_rr(src
, dest
);
897 return branch32(branchType
? NotEqual
: Equal
, dest
, TrustedImm32(0x80000000));
900 void truncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
902 ASSERT(isSSE2Present());
903 m_assembler
.cvttsd2si_rr(src
, dest
);
907 void truncateDoubleToUint32(FPRegisterID src
, RegisterID dest
)
909 ASSERT(isSSE2Present());
910 m_assembler
.cvttsd2siq_rr(src
, dest
);
914 // Convert 'src' to an integer, and places the resulting 'dest'.
915 // If the result is not representable as a 32 bit value, branch.
916 // May also branch for some values that are representable in 32 bits
917 // (specifically, in this case, 0).
918 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
, bool negZeroCheck
= true)
920 ASSERT(isSSE2Present());
921 m_assembler
.cvttsd2si_rr(src
, dest
);
923 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
926 Jump valueIsNonZero
= branchTest32(NonZero
, dest
);
927 m_assembler
.movmskpd_rr(src
, scratchRegister
);
928 failureCases
.append(branchTest32(NonZero
, scratchRegister
, TrustedImm32(1)));
929 valueIsNonZero
.link(this);
933 failureCases
.append(branchTest32(Zero
, dest
));
936 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
937 convertInt32ToDouble(dest
, fpTemp
);
938 m_assembler
.ucomisd_rr(fpTemp
, src
);
939 failureCases
.append(m_assembler
.jp());
940 failureCases
.append(m_assembler
.jne());
943 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
945 ASSERT(isSSE2Present());
946 m_assembler
.xorpd_rr(scratch
, scratch
);
947 return branchDouble(DoubleNotEqual
, reg
, scratch
);
950 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
952 ASSERT(isSSE2Present());
953 m_assembler
.xorpd_rr(scratch
, scratch
);
954 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
957 void lshiftPacked(TrustedImm32 imm
, XMMRegisterID reg
)
959 ASSERT(isSSE2Present());
960 m_assembler
.psllq_i8r(imm
.m_value
, reg
);
963 void rshiftPacked(TrustedImm32 imm
, XMMRegisterID reg
)
965 ASSERT(isSSE2Present());
966 m_assembler
.psrlq_i8r(imm
.m_value
, reg
);
969 void orPacked(XMMRegisterID src
, XMMRegisterID dst
)
971 ASSERT(isSSE2Present());
972 m_assembler
.por_rr(src
, dst
);
975 void moveInt32ToPacked(RegisterID src
, XMMRegisterID dst
)
977 ASSERT(isSSE2Present());
978 m_assembler
.movd_rr(src
, dst
);
981 void movePackedToInt32(XMMRegisterID src
, RegisterID dst
)
983 ASSERT(isSSE2Present());
984 m_assembler
.movd_rr(src
, dst
);
987 // Stack manipulation operations:
989 // The ABI is assumed to provide a stack abstraction to memory,
990 // containing machine word sized units of data. Push and pop
991 // operations add and remove a single register sized unit of data
992 // to or from the stack. Peek and poke operations read or write
993 // values on the stack, without moving the current stack position.
995 void pop(RegisterID dest
)
997 m_assembler
.pop_r(dest
);
1000 void push(RegisterID src
)
1002 m_assembler
.push_r(src
);
1005 void push(Address address
)
1007 m_assembler
.push_m(address
.offset
, address
.base
);
1010 void push(TrustedImm32 imm
)
1012 m_assembler
.push_i32(imm
.m_value
);
1016 // Register move operations:
1018 // Move values in registers.
1020 void move(TrustedImm32 imm
, RegisterID dest
)
1022 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
1023 // may be useful to have a separate version that sign extends the value?
1025 m_assembler
.xorl_rr(dest
, dest
);
1027 m_assembler
.movl_i32r(imm
.m_value
, dest
);
1031 void move(RegisterID src
, RegisterID dest
)
1033 // Note: on 64-bit this is is a full register move; perhaps it would be
1034 // useful to have separate move32 & movePtr, with move32 zero extending?
1036 m_assembler
.movq_rr(src
, dest
);
1039 void move(TrustedImmPtr imm
, RegisterID dest
)
1041 m_assembler
.movq_i64r(imm
.asIntptr(), dest
);
1044 void move(TrustedImm64 imm
, RegisterID dest
)
1046 m_assembler
.movq_i64r(imm
.m_value
, dest
);
1049 void swap(RegisterID reg1
, RegisterID reg2
)
1052 m_assembler
.xchgq_rr(reg1
, reg2
);
1055 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1057 m_assembler
.movsxd_rr(src
, dest
);
1060 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1062 m_assembler
.movl_rr(src
, dest
);
1065 void move(RegisterID src
, RegisterID dest
)
1068 m_assembler
.movl_rr(src
, dest
);
1071 void move(TrustedImmPtr imm
, RegisterID dest
)
1073 m_assembler
.movl_i32r(imm
.asIntptr(), dest
);
1076 void swap(RegisterID reg1
, RegisterID reg2
)
1079 m_assembler
.xchgl_rr(reg1
, reg2
);
1082 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
1087 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
1094 // Forwards / external control flow operations:
1096 // This set of jump and conditional branch operations return a Jump
1097 // object which may linked at a later point, allow forwards jump,
1098 // or jumps that will require external linkage (after the code has been
1101 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
1102 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
1103 // used (representing the names 'below' and 'above').
1105 // Operands to the comparision are provided in the expected order, e.g.
1106 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
1107 // treated as a signed 32bit value, is less than or equal to 5.
1109 // jz and jnz test whether the first operand is equal to zero, and take
1110 // an optional second operand of a mask under which to perform the test.
1113 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1115 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
);
1116 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1119 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
1121 m_assembler
.cmpl_rr(right
, left
);
1122 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1125 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
1127 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1128 m_assembler
.testl_rr(left
, left
);
1130 m_assembler
.cmpl_ir(right
.m_value
, left
);
1131 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1134 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
1136 m_assembler
.cmpl_mr(right
.offset
, right
.base
, left
);
1137 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1140 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
1142 m_assembler
.cmpl_rm(right
, left
.offset
, left
.base
);
1143 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1146 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
1148 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
);
1149 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1152 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1154 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
1155 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1158 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1160 return branch32(cond
, left
, right
);
1163 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
1165 m_assembler
.testl_rr(reg
, mask
);
1166 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1169 void test32(ResultCondition
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1171 if (mask
.m_value
== -1)
1172 m_assembler
.testl_rr(reg
, reg
);
1173 else if (!(mask
.m_value
& ~0xff) && reg
< X86Registers::esp
) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
1174 if (mask
.m_value
== 0xff)
1175 m_assembler
.testb_rr(reg
, reg
);
1177 m_assembler
.testb_i8r(mask
.m_value
, reg
);
1179 m_assembler
.testl_i32r(mask
.m_value
, reg
);
1182 Jump
branch(ResultCondition cond
)
1184 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1187 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
1189 test32(cond
, reg
, mask
);
1190 return branch(cond
);
1193 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1195 generateTest32(address
, mask
);
1196 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1199 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1201 if (mask
.m_value
== -1)
1202 m_assembler
.cmpl_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
1204 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
1205 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1208 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1210 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1211 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
1212 if (mask
.m_value
== -1)
1213 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
1215 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
1216 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1219 Jump
branchTest8(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
1221 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
1222 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
1223 if (mask
.m_value
== -1)
1224 m_assembler
.cmpb_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
1226 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
1227 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1230 Jump
branch8(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
1232 ASSERT(!(right
.m_value
& 0xFFFFFF00));
1234 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
1235 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1240 return Jump(m_assembler
.jmp());
1243 void jump(RegisterID target
)
1245 m_assembler
.jmp_r(target
);
1248 // Address is a memory location containing the address to jump to
1249 void jump(Address address
)
1251 m_assembler
.jmp_m(address
.offset
, address
.base
);
1255 // Arithmetic control flow operations:
1257 // This set of conditional branch operations branch based
1258 // on the result of an arithmetic operation. The operation
1259 // is performed as normal, storing the result.
1261 // * jz operations branch if the result is zero.
1262 // * jo operations branch if the (signed) arithmetic
1263 // operation caused an overflow to occur.
1265 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1268 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1271 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1274 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1277 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 src
, Address dest
)
1280 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1283 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, Address dest
)
1286 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1289 Jump
branchAdd32(ResultCondition cond
, Address src
, RegisterID dest
)
1292 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1295 Jump
branchAdd32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1298 return branchAdd32(cond
, src2
, dest
);
1300 return branchAdd32(cond
, src1
, dest
);
1303 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
1306 return branchAdd32(cond
, imm
, dest
);
1309 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1312 if (cond
!= Overflow
)
1313 m_assembler
.testl_rr(dest
, dest
);
1314 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1317 Jump
branchMul32(ResultCondition cond
, Address src
, RegisterID dest
)
1320 if (cond
!= Overflow
)
1321 m_assembler
.testl_rr(dest
, dest
);
1322 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1325 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
1327 mul32(imm
, src
, dest
);
1328 if (cond
!= Overflow
)
1329 m_assembler
.testl_rr(dest
, dest
);
1330 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1333 Jump
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1336 return branchMul32(cond
, src2
, dest
);
1338 return branchMul32(cond
, src1
, dest
);
1341 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1344 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1347 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1350 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1353 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, Address dest
)
1356 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1359 Jump
branchSub32(ResultCondition cond
, RegisterID src
, Address dest
)
1362 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1365 Jump
branchSub32(ResultCondition cond
, Address src
, RegisterID dest
)
1368 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1371 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1373 // B := A - B is invalid.
1374 ASSERT(src1
== dest
|| src2
!= dest
);
1377 return branchSub32(cond
, src2
, dest
);
1380 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, TrustedImm32 src2
, RegisterID dest
)
1383 return branchSub32(cond
, src2
, dest
);
1386 Jump
branchNeg32(ResultCondition cond
, RegisterID srcDest
)
1389 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1392 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1395 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1399 // Miscellaneous operations:
1408 return Call(m_assembler
.call(), Call::LinkableNear
);
1411 Call
call(RegisterID target
)
1413 return Call(m_assembler
.call(target
), Call::None
);
1416 void call(Address address
)
1418 m_assembler
.call_m(address
.offset
, address
.base
);
1426 void compare8(RelationalCondition cond
, Address left
, TrustedImm32 right
, RegisterID dest
)
1428 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
);
1429 set32(x86Condition(cond
), dest
);
1432 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1434 m_assembler
.cmpl_rr(right
, left
);
1435 set32(x86Condition(cond
), dest
);
1438 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
1440 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1441 m_assembler
.testl_rr(left
, left
);
1443 m_assembler
.cmpl_ir(right
.m_value
, left
);
1444 set32(x86Condition(cond
), dest
);
1448 // The mask should be optional... perhaps the argument order should be
1449 // dest-src, operations always have a dest? ... possibly not true, considering
1450 // asm ops like test, or pseudo ops like pop().
1452 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1454 if (mask
.m_value
== -1)
1455 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
1457 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
1458 set32(x86Condition(cond
), dest
);
1461 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1463 generateTest32(address
, mask
);
1464 set32(x86Condition(cond
), dest
);
1467 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1468 static RelationalCondition
invert(RelationalCondition cond
)
1470 return static_cast<RelationalCondition
>(cond
^ 1);
1480 m_assembler
.mfence();
1483 static void replaceWithJump(CodeLocationLabel instructionStart
, CodeLocationLabel destination
)
1485 X86Assembler::replaceWithJump(instructionStart
.executableAddress(), destination
.executableAddress());
1488 static ptrdiff_t maxJumpReplacementSize()
1490 return X86Assembler::maxJumpReplacementSize();
1493 #if ENABLE(MASM_PROBE)
1494 // Methods required by the MASM_PROBE mechanism as defined in
1495 // AbstractMacroAssembler.h.
1496 static void printCPURegisters(CPUState
&, int indentation
= 0);
1497 static void printRegister(CPUState
&, RegisterID
);
1498 static void printRegister(CPUState
&, FPRegisterID
);
1499 void probe(ProbeFunction
, void* arg1
= 0, void* arg2
= 0);
1500 #endif // ENABLE(MASM_PROBE)
1503 X86Assembler::Condition
x86Condition(RelationalCondition cond
)
1505 return static_cast<X86Assembler::Condition
>(cond
);
1508 X86Assembler::Condition
x86Condition(ResultCondition cond
)
1510 return static_cast<X86Assembler::Condition
>(cond
);
1513 void set32(X86Assembler::Condition cond
, RegisterID dest
)
1516 // On 32-bit x86 we can only set the first 4 registers;
1517 // esp..edi are mapped to the 'h' registers!
1519 m_assembler
.xchgl_rr(dest
, X86Registers::eax
);
1520 m_assembler
.setCC_r(cond
, X86Registers::eax
);
1521 m_assembler
.movzbl_rr(X86Registers::eax
, X86Registers::eax
);
1522 m_assembler
.xchgl_rr(dest
, X86Registers::eax
);
1526 m_assembler
.setCC_r(cond
, dest
);
1527 m_assembler
.movzbl_rr(dest
, dest
);
1531 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1532 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1533 friend class MacroAssemblerX86
;
1535 ALWAYS_INLINE
void generateTest32(Address address
, TrustedImm32 mask
= TrustedImm32(-1))
1537 if (mask
.m_value
== -1)
1538 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
1539 else if (!(mask
.m_value
& ~0xff))
1540 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
1541 else if (!(mask
.m_value
& ~0xff00))
1542 m_assembler
.testb_im(mask
.m_value
>> 8, address
.offset
+ 1, address
.base
);
1543 else if (!(mask
.m_value
& ~0xff0000))
1544 m_assembler
.testb_im(mask
.m_value
>> 16, address
.offset
+ 2, address
.base
);
1545 else if (!(mask
.m_value
& ~0xff000000))
1546 m_assembler
.testb_im(mask
.m_value
>> 24, address
.offset
+ 3, address
.base
);
1548 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
1554 // All X86 Macs are guaranteed to support at least SSE2,
1555 static bool isSSE2Present()
1560 #else // OS(MAC_OS_X)
1562 enum SSE2CheckState
{
1568 static bool isSSE2Present()
1570 if (s_sse2CheckState
== NotCheckedSSE2
) {
1571 // Default the flags value to zero; if the compiler is
1572 // not MSVC or GCC we will read this as SSE2 not present.
1576 mov eax
, 1 // cpuid function 1 gives us the standard feature set
1589 : "%eax", "%ecx", "%edx"
1592 static const int SSE2FeatureBit
= 1 << 26;
1593 s_sse2CheckState
= (flags
& SSE2FeatureBit
) ? HasSSE2
: NoSSE2
;
1596 ASSERT(s_sse2CheckState
!= NotCheckedSSE2
);
1598 return s_sse2CheckState
== HasSSE2
;
1601 static SSE2CheckState s_sse2CheckState
;
1603 #endif // OS(MAC_OS_X)
1604 #elif !defined(NDEBUG) // CPU(X86)
1606 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1607 // but non debug add this method to keep the asserts above happy.
1608 static bool isSSE2Present()
1618 #endif // ENABLE(ASSEMBLER)
1620 #endif // MacroAssemblerX86Common_h