2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
36 class MacroAssemblerX86Common
: public AbstractMacroAssembler
<X86Assembler
> {
37 static const int DoubleConditionBitInvert
= 0x10;
38 static const int DoubleConditionBitSpecial
= 0x20;
39 static const int DoubleConditionBits
= DoubleConditionBitInvert
| DoubleConditionBitSpecial
;
42 typedef X86Assembler::FPRegisterID FPRegisterID
;
44 static const int MaximumCompactPtrAlignedAddressOffset
= 127;
46 enum RelationalCondition
{
47 Equal
= X86Assembler::ConditionE
,
48 NotEqual
= X86Assembler::ConditionNE
,
49 Above
= X86Assembler::ConditionA
,
50 AboveOrEqual
= X86Assembler::ConditionAE
,
51 Below
= X86Assembler::ConditionB
,
52 BelowOrEqual
= X86Assembler::ConditionBE
,
53 GreaterThan
= X86Assembler::ConditionG
,
54 GreaterThanOrEqual
= X86Assembler::ConditionGE
,
55 LessThan
= X86Assembler::ConditionL
,
56 LessThanOrEqual
= X86Assembler::ConditionLE
59 enum ResultCondition
{
60 Overflow
= X86Assembler::ConditionO
,
61 Signed
= X86Assembler::ConditionS
,
62 Zero
= X86Assembler::ConditionE
,
63 NonZero
= X86Assembler::ConditionNE
66 enum DoubleCondition
{
67 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
68 DoubleEqual
= X86Assembler::ConditionE
| DoubleConditionBitSpecial
,
69 DoubleNotEqual
= X86Assembler::ConditionNE
,
70 DoubleGreaterThan
= X86Assembler::ConditionA
,
71 DoubleGreaterThanOrEqual
= X86Assembler::ConditionAE
,
72 DoubleLessThan
= X86Assembler::ConditionA
| DoubleConditionBitInvert
,
73 DoubleLessThanOrEqual
= X86Assembler::ConditionAE
| DoubleConditionBitInvert
,
74 // If either operand is NaN, these conditions always evaluate to true.
75 DoubleEqualOrUnordered
= X86Assembler::ConditionE
,
76 DoubleNotEqualOrUnordered
= X86Assembler::ConditionNE
| DoubleConditionBitSpecial
,
77 DoubleGreaterThanOrUnordered
= X86Assembler::ConditionB
| DoubleConditionBitInvert
,
78 DoubleGreaterThanOrEqualOrUnordered
= X86Assembler::ConditionBE
| DoubleConditionBitInvert
,
79 DoubleLessThanOrUnordered
= X86Assembler::ConditionB
,
80 DoubleLessThanOrEqualOrUnordered
= X86Assembler::ConditionBE
,
83 !((X86Assembler::ConditionE
| X86Assembler::ConditionNE
| X86Assembler::ConditionA
| X86Assembler::ConditionAE
| X86Assembler::ConditionB
| X86Assembler::ConditionBE
) & DoubleConditionBits
),
84 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes
);
86 static const RegisterID stackPointerRegister
= X86Registers::esp
;
88 // Integer arithmetic operations:
90 // Operations are typically two operand - operation(source, srcDst)
91 // For many operations the source may be an TrustedImm32, the srcDst operand
92 // may often be a memory location (explictly described using an Address
95 void add32(RegisterID src
, RegisterID dest
)
97 m_assembler
.addl_rr(src
, dest
);
100 void add32(TrustedImm32 imm
, Address address
)
102 m_assembler
.addl_im(imm
.m_value
, address
.offset
, address
.base
);
105 void add32(TrustedImm32 imm
, RegisterID dest
)
107 m_assembler
.addl_ir(imm
.m_value
, dest
);
110 void add32(Address src
, RegisterID dest
)
112 m_assembler
.addl_mr(src
.offset
, src
.base
, dest
);
115 void add32(RegisterID src
, Address dest
)
117 m_assembler
.addl_rm(src
, dest
.offset
, dest
.base
);
120 void and32(RegisterID src
, RegisterID dest
)
122 m_assembler
.andl_rr(src
, dest
);
125 void and32(TrustedImm32 imm
, RegisterID dest
)
127 m_assembler
.andl_ir(imm
.m_value
, dest
);
130 void and32(RegisterID src
, Address dest
)
132 m_assembler
.andl_rm(src
, dest
.offset
, dest
.base
);
135 void and32(Address src
, RegisterID dest
)
137 m_assembler
.andl_mr(src
.offset
, src
.base
, dest
);
140 void and32(TrustedImm32 imm
, Address address
)
142 m_assembler
.andl_im(imm
.m_value
, address
.offset
, address
.base
);
145 void and32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
148 zeroExtend32ToPtr(op1
, dest
);
149 else if (op1
== dest
)
157 void and32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
163 void lshift32(RegisterID shift_amount
, RegisterID dest
)
165 ASSERT(shift_amount
!= dest
);
167 if (shift_amount
== X86Registers::ecx
)
168 m_assembler
.shll_CLr(dest
);
170 // On x86 we can only shift by ecx; if asked to shift by another register we'll
171 // need rejig the shift amount into ecx first, and restore the registers afterwards.
172 // If we dest is ecx, then shift the swapped register!
173 swap(shift_amount
, X86Registers::ecx
);
174 m_assembler
.shll_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
175 swap(shift_amount
, X86Registers::ecx
);
179 void lshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
181 ASSERT(shift_amount
!= dest
);
185 lshift32(shift_amount
, dest
);
188 void lshift32(TrustedImm32 imm
, RegisterID dest
)
190 m_assembler
.shll_i8r(imm
.m_value
, dest
);
193 void lshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
200 void mul32(RegisterID src
, RegisterID dest
)
202 m_assembler
.imull_rr(src
, dest
);
205 void mul32(Address src
, RegisterID dest
)
207 m_assembler
.imull_mr(src
.offset
, src
.base
, dest
);
210 void mul32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
212 m_assembler
.imull_i32r(src
, imm
.m_value
, dest
);
215 void neg32(RegisterID srcDest
)
217 m_assembler
.negl_r(srcDest
);
220 void neg32(Address srcDest
)
222 m_assembler
.negl_m(srcDest
.offset
, srcDest
.base
);
225 void not32(RegisterID srcDest
)
227 m_assembler
.notl_r(srcDest
);
230 void not32(Address srcDest
)
232 m_assembler
.notl_m(srcDest
.offset
, srcDest
.base
);
235 void or32(RegisterID src
, RegisterID dest
)
237 m_assembler
.orl_rr(src
, dest
);
240 void or32(TrustedImm32 imm
, RegisterID dest
)
242 m_assembler
.orl_ir(imm
.m_value
, dest
);
245 void or32(RegisterID src
, Address dest
)
247 m_assembler
.orl_rm(src
, dest
.offset
, dest
.base
);
250 void or32(Address src
, RegisterID dest
)
252 m_assembler
.orl_mr(src
.offset
, src
.base
, dest
);
255 void or32(TrustedImm32 imm
, Address address
)
257 m_assembler
.orl_im(imm
.m_value
, address
.offset
, address
.base
);
260 void or32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
263 zeroExtend32ToPtr(op1
, dest
);
264 else if (op1
== dest
)
272 void or32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
278 void rshift32(RegisterID shift_amount
, RegisterID dest
)
280 ASSERT(shift_amount
!= dest
);
282 if (shift_amount
== X86Registers::ecx
)
283 m_assembler
.sarl_CLr(dest
);
285 // On x86 we can only shift by ecx; if asked to shift by another register we'll
286 // need rejig the shift amount into ecx first, and restore the registers afterwards.
287 // If we dest is ecx, then shift the swapped register!
288 swap(shift_amount
, X86Registers::ecx
);
289 m_assembler
.sarl_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
290 swap(shift_amount
, X86Registers::ecx
);
294 void rshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
296 ASSERT(shift_amount
!= dest
);
300 rshift32(shift_amount
, dest
);
303 void rshift32(TrustedImm32 imm
, RegisterID dest
)
305 m_assembler
.sarl_i8r(imm
.m_value
, dest
);
308 void rshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
315 void urshift32(RegisterID shift_amount
, RegisterID dest
)
317 ASSERT(shift_amount
!= dest
);
319 if (shift_amount
== X86Registers::ecx
)
320 m_assembler
.shrl_CLr(dest
);
322 // On x86 we can only shift by ecx; if asked to shift by another register we'll
323 // need rejig the shift amount into ecx first, and restore the registers afterwards.
324 // If we dest is ecx, then shift the swapped register!
325 swap(shift_amount
, X86Registers::ecx
);
326 m_assembler
.shrl_CLr(dest
== X86Registers::ecx
? shift_amount
: dest
);
327 swap(shift_amount
, X86Registers::ecx
);
331 void urshift32(RegisterID src
, RegisterID shift_amount
, RegisterID dest
)
333 ASSERT(shift_amount
!= dest
);
337 urshift32(shift_amount
, dest
);
340 void urshift32(TrustedImm32 imm
, RegisterID dest
)
342 m_assembler
.shrl_i8r(imm
.m_value
, dest
);
345 void urshift32(RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
349 urshift32(imm
, dest
);
352 void sub32(RegisterID src
, RegisterID dest
)
354 m_assembler
.subl_rr(src
, dest
);
357 void sub32(TrustedImm32 imm
, RegisterID dest
)
359 m_assembler
.subl_ir(imm
.m_value
, dest
);
362 void sub32(TrustedImm32 imm
, Address address
)
364 m_assembler
.subl_im(imm
.m_value
, address
.offset
, address
.base
);
367 void sub32(Address src
, RegisterID dest
)
369 m_assembler
.subl_mr(src
.offset
, src
.base
, dest
);
372 void sub32(RegisterID src
, Address dest
)
374 m_assembler
.subl_rm(src
, dest
.offset
, dest
.base
);
378 void xor32(RegisterID src
, RegisterID dest
)
380 m_assembler
.xorl_rr(src
, dest
);
383 void xor32(TrustedImm32 imm
, Address dest
)
385 m_assembler
.xorl_im(imm
.m_value
, dest
.offset
, dest
.base
);
388 void xor32(TrustedImm32 imm
, RegisterID dest
)
390 m_assembler
.xorl_ir(imm
.m_value
, dest
);
393 void xor32(RegisterID src
, Address dest
)
395 m_assembler
.xorl_rm(src
, dest
.offset
, dest
.base
);
398 void xor32(Address src
, RegisterID dest
)
400 m_assembler
.xorl_mr(src
.offset
, src
.base
, dest
);
403 void xor32(RegisterID op1
, RegisterID op2
, RegisterID dest
)
406 move(TrustedImm32(0), dest
);
407 else if (op1
== dest
)
415 void xor32(TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
421 void sqrtDouble(FPRegisterID src
, FPRegisterID dst
)
423 m_assembler
.sqrtsd_rr(src
, dst
);
426 // Memory access operations:
428 // Loads are of the form load(address, destination) and stores of the form
429 // store(source, address). The source for a store may be an TrustedImm32. Address
430 // operand objects to loads and store will be implicitly constructed if a
431 // register is passed.
433 void load32(ImplicitAddress address
, RegisterID dest
)
435 m_assembler
.movl_mr(address
.offset
, address
.base
, dest
);
438 void load32(BaseIndex address
, RegisterID dest
)
440 m_assembler
.movl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
443 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
445 load32(address
, dest
);
448 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
450 m_assembler
.movl_mr_disp32(address
.offset
, address
.base
, dest
);
451 return DataLabel32(this);
454 DataLabelCompact
load32WithCompactAddressOffsetPatch(Address address
, RegisterID dest
)
456 m_assembler
.movl_mr_disp8(address
.offset
, address
.base
, dest
);
457 return DataLabelCompact(this);
460 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact
, int32_t value
)
463 ASSERT(value
< MaximumCompactPtrAlignedAddressOffset
);
464 AssemblerType_T::repatchCompact(dataLabelCompact
.dataLocation(), value
);
467 DataLabelCompact
loadCompactWithAddressOffsetPatch(Address address
, RegisterID dest
)
469 m_assembler
.movl_mr_disp8(address
.offset
, address
.base
, dest
);
470 return DataLabelCompact(this);
473 void load16(BaseIndex address
, RegisterID dest
)
475 m_assembler
.movzwl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
478 void load16(Address address
, RegisterID dest
)
480 m_assembler
.movzwl_mr(address
.offset
, address
.base
, dest
);
483 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
485 m_assembler
.movl_rm_disp32(src
, address
.offset
, address
.base
);
486 return DataLabel32(this);
489 void store32(RegisterID src
, ImplicitAddress address
)
491 m_assembler
.movl_rm(src
, address
.offset
, address
.base
);
494 void store32(RegisterID src
, BaseIndex address
)
496 m_assembler
.movl_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
499 void store32(TrustedImm32 imm
, ImplicitAddress address
)
501 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
);
505 // Floating-point operation:
507 // Presently only supports SSE, not x87 floating point.
509 void moveDouble(FPRegisterID src
, FPRegisterID dest
)
511 ASSERT(isSSE2Present());
513 m_assembler
.movsd_rr(src
, dest
);
516 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
518 ASSERT(isSSE2Present());
519 m_assembler
.movsd_mr(address
.offset
, address
.base
, dest
);
522 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
524 ASSERT(isSSE2Present());
525 m_assembler
.movsd_rm(src
, address
.offset
, address
.base
);
528 void addDouble(FPRegisterID src
, FPRegisterID dest
)
530 ASSERT(isSSE2Present());
531 m_assembler
.addsd_rr(src
, dest
);
534 void addDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
536 ASSERT(isSSE2Present());
538 addDouble(op2
, dest
);
540 moveDouble(op2
, dest
);
541 addDouble(op1
, dest
);
545 void addDouble(Address src
, FPRegisterID dest
)
547 ASSERT(isSSE2Present());
548 m_assembler
.addsd_mr(src
.offset
, src
.base
, dest
);
551 void divDouble(FPRegisterID src
, FPRegisterID dest
)
553 ASSERT(isSSE2Present());
554 m_assembler
.divsd_rr(src
, dest
);
557 void divDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
559 // B := A / B is invalid.
560 ASSERT(op1
== dest
|| op2
!= dest
);
562 moveDouble(op1
, dest
);
563 divDouble(op2
, dest
);
566 void divDouble(Address src
, FPRegisterID dest
)
568 ASSERT(isSSE2Present());
569 m_assembler
.divsd_mr(src
.offset
, src
.base
, dest
);
572 void subDouble(FPRegisterID src
, FPRegisterID dest
)
574 ASSERT(isSSE2Present());
575 m_assembler
.subsd_rr(src
, dest
);
578 void subDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
580 // B := A - B is invalid.
581 ASSERT(op1
== dest
|| op2
!= dest
);
583 moveDouble(op1
, dest
);
584 subDouble(op2
, dest
);
587 void subDouble(Address src
, FPRegisterID dest
)
589 ASSERT(isSSE2Present());
590 m_assembler
.subsd_mr(src
.offset
, src
.base
, dest
);
593 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
595 ASSERT(isSSE2Present());
596 m_assembler
.mulsd_rr(src
, dest
);
599 void mulDouble(FPRegisterID op1
, FPRegisterID op2
, FPRegisterID dest
)
601 ASSERT(isSSE2Present());
603 mulDouble(op2
, dest
);
605 moveDouble(op2
, dest
);
606 mulDouble(op1
, dest
);
610 void mulDouble(Address src
, FPRegisterID dest
)
612 ASSERT(isSSE2Present());
613 m_assembler
.mulsd_mr(src
.offset
, src
.base
, dest
);
616 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
618 ASSERT(isSSE2Present());
619 m_assembler
.cvtsi2sd_rr(src
, dest
);
622 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
624 ASSERT(isSSE2Present());
625 m_assembler
.cvtsi2sd_mr(src
.offset
, src
.base
, dest
);
628 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
630 ASSERT(isSSE2Present());
632 if (cond
& DoubleConditionBitInvert
)
633 m_assembler
.ucomisd_rr(left
, right
);
635 m_assembler
.ucomisd_rr(right
, left
);
637 if (cond
== DoubleEqual
) {
638 Jump
isUnordered(m_assembler
.jp());
639 Jump result
= Jump(m_assembler
.je());
640 isUnordered
.link(this);
642 } else if (cond
== DoubleNotEqualOrUnordered
) {
643 Jump
isUnordered(m_assembler
.jp());
644 Jump
isEqual(m_assembler
.je());
645 isUnordered
.link(this);
646 Jump result
= jump();
651 ASSERT(!(cond
& DoubleConditionBitSpecial
));
652 return Jump(m_assembler
.jCC(static_cast<X86Assembler::Condition
>(cond
& ~DoubleConditionBits
)));
655 // Truncates 'src' to an integer, and places the resulting 'dest'.
656 // If the result is not representable as a 32 bit value, branch.
657 // May also branch for some values that are representable in 32 bits
658 // (specifically, in this case, INT_MIN).
659 enum BranchTruncateType
{ BranchIfTruncateFailed
, BranchIfTruncateSuccessful
};
660 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
, BranchTruncateType branchType
= BranchIfTruncateFailed
)
662 ASSERT(isSSE2Present());
663 m_assembler
.cvttsd2si_rr(src
, dest
);
664 return branch32(branchType
? NotEqual
: Equal
, dest
, TrustedImm32(0x80000000));
667 // Convert 'src' to an integer, and places the resulting 'dest'.
668 // If the result is not representable as a 32 bit value, branch.
669 // May also branch for some values that are representable in 32 bits
670 // (specifically, in this case, 0).
671 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
)
673 ASSERT(isSSE2Present());
674 m_assembler
.cvttsd2si_rr(src
, dest
);
676 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
677 failureCases
.append(branchTest32(Zero
, dest
));
679 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
680 convertInt32ToDouble(dest
, fpTemp
);
681 m_assembler
.ucomisd_rr(fpTemp
, src
);
682 failureCases
.append(m_assembler
.jp());
683 failureCases
.append(m_assembler
.jne());
686 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
688 ASSERT(isSSE2Present());
689 m_assembler
.xorpd_rr(scratch
, scratch
);
690 return branchDouble(DoubleNotEqual
, reg
, scratch
);
693 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
695 ASSERT(isSSE2Present());
696 m_assembler
.xorpd_rr(scratch
, scratch
);
697 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
700 // Stack manipulation operations:
702 // The ABI is assumed to provide a stack abstraction to memory,
703 // containing machine word sized units of data. Push and pop
704 // operations add and remove a single register sized unit of data
705 // to or from the stack. Peek and poke operations read or write
706 // values on the stack, without moving the current stack position.
708 void pop(RegisterID dest
)
710 m_assembler
.pop_r(dest
);
713 void push(RegisterID src
)
715 m_assembler
.push_r(src
);
718 void push(Address address
)
720 m_assembler
.push_m(address
.offset
, address
.base
);
723 void push(TrustedImm32 imm
)
725 m_assembler
.push_i32(imm
.m_value
);
729 // Register move operations:
731 // Move values in registers.
733 void move(TrustedImm32 imm
, RegisterID dest
)
735 // Note: on 64-bit the TrustedImm32 value is zero extended into the register, it
736 // may be useful to have a separate version that sign extends the value?
738 m_assembler
.xorl_rr(dest
, dest
);
740 m_assembler
.movl_i32r(imm
.m_value
, dest
);
744 void move(RegisterID src
, RegisterID dest
)
746 // Note: on 64-bit this is is a full register move; perhaps it would be
747 // useful to have separate move32 & movePtr, with move32 zero extending?
749 m_assembler
.movq_rr(src
, dest
);
752 void move(TrustedImmPtr imm
, RegisterID dest
)
754 m_assembler
.movq_i64r(imm
.asIntptr(), dest
);
757 void swap(RegisterID reg1
, RegisterID reg2
)
760 m_assembler
.xchgq_rr(reg1
, reg2
);
763 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
765 m_assembler
.movsxd_rr(src
, dest
);
768 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
770 m_assembler
.movl_rr(src
, dest
);
773 void move(RegisterID src
, RegisterID dest
)
776 m_assembler
.movl_rr(src
, dest
);
779 void move(TrustedImmPtr imm
, RegisterID dest
)
781 m_assembler
.movl_i32r(imm
.asIntptr(), dest
);
784 void swap(RegisterID reg1
, RegisterID reg2
)
787 m_assembler
.xchgl_rr(reg1
, reg2
);
790 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
795 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
802 // Forwards / external control flow operations:
804 // This set of jump and conditional branch operations return a Jump
805 // object which may linked at a later point, allow forwards jump,
806 // or jumps that will require external linkage (after the code has been
809 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
810 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
811 // used (representing the names 'below' and 'above').
813 // Operands to the comparision are provided in the expected order, e.g.
814 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
815 // treated as a signed 32bit value, is less than or equal to 5.
817 // jz and jnz test whether the first operand is equal to zero, and take
818 // an optional second operand of a mask under which to perform the test.
821 Jump
branch8(RelationalCondition cond
, Address left
, TrustedImm32 right
)
823 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
);
824 return Jump(m_assembler
.jCC(x86Condition(cond
)));
827 Jump
branch32(RelationalCondition cond
, RegisterID left
, RegisterID right
)
829 m_assembler
.cmpl_rr(right
, left
);
830 return Jump(m_assembler
.jCC(x86Condition(cond
)));
833 Jump
branch32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
)
835 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
836 m_assembler
.testl_rr(left
, left
);
838 m_assembler
.cmpl_ir(right
.m_value
, left
);
839 return Jump(m_assembler
.jCC(x86Condition(cond
)));
842 Jump
branch32(RelationalCondition cond
, TrustedImm32 left
, RegisterID right
)
844 if (((cond
== Equal
) || (cond
== NotEqual
)) && !left
.m_value
)
845 m_assembler
.testl_rr(right
, right
);
847 m_assembler
.cmpl_ir(left
.m_value
, right
);
848 return Jump(m_assembler
.jCC(x86Condition(commute(cond
))));
851 Jump
branch32(RelationalCondition cond
, RegisterID left
, Address right
)
853 m_assembler
.cmpl_mr(right
.offset
, right
.base
, left
);
854 return Jump(m_assembler
.jCC(x86Condition(cond
)));
857 Jump
branch32(RelationalCondition cond
, Address left
, RegisterID right
)
859 m_assembler
.cmpl_rm(right
, left
.offset
, left
.base
);
860 return Jump(m_assembler
.jCC(x86Condition(cond
)));
863 Jump
branch32(RelationalCondition cond
, Address left
, TrustedImm32 right
)
865 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
);
866 return Jump(m_assembler
.jCC(x86Condition(cond
)));
869 Jump
branch32(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
871 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
872 return Jump(m_assembler
.jCC(x86Condition(cond
)));
875 Jump
branch32WithUnalignedHalfWords(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
877 return branch32(cond
, left
, right
);
880 Jump
branch16(RelationalCondition cond
, BaseIndex left
, RegisterID right
)
882 m_assembler
.cmpw_rm(right
, left
.offset
, left
.base
, left
.index
, left
.scale
);
883 return Jump(m_assembler
.jCC(x86Condition(cond
)));
886 Jump
branch16(RelationalCondition cond
, BaseIndex left
, TrustedImm32 right
)
888 ASSERT(!(right
.m_value
& 0xFFFF0000));
890 m_assembler
.cmpw_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
891 return Jump(m_assembler
.jCC(x86Condition(cond
)));
894 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, RegisterID mask
)
896 m_assembler
.testl_rr(reg
, mask
);
897 return Jump(m_assembler
.jCC(x86Condition(cond
)));
900 Jump
branchTest32(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
902 // if we are only interested in the low seven bits, this can be tested with a testb
903 if (mask
.m_value
== -1)
904 m_assembler
.testl_rr(reg
, reg
);
905 else if ((mask
.m_value
& ~0x7f) == 0)
906 m_assembler
.testb_i8r(mask
.m_value
, reg
);
908 m_assembler
.testl_i32r(mask
.m_value
, reg
);
909 return Jump(m_assembler
.jCC(x86Condition(cond
)));
912 Jump
branchTest32(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
914 if (mask
.m_value
== -1)
915 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
917 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
918 return Jump(m_assembler
.jCC(x86Condition(cond
)));
921 Jump
branchTest32(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
923 if (mask
.m_value
== -1)
924 m_assembler
.cmpl_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
926 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
927 return Jump(m_assembler
.jCC(x86Condition(cond
)));
930 Jump
branchTest8(ResultCondition cond
, RegisterID reg
, TrustedImm32 mask
= TrustedImm32(-1))
932 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
933 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
934 if (mask
.m_value
== -1)
935 m_assembler
.testb_rr(reg
, reg
);
937 m_assembler
.testb_i8r(mask
.m_value
, reg
);
938 return Jump(m_assembler
.jCC(x86Condition(cond
)));
941 Jump
branchTest8(ResultCondition cond
, Address address
, TrustedImm32 mask
= TrustedImm32(-1))
943 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
944 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
945 if (mask
.m_value
== -1)
946 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
948 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
949 return Jump(m_assembler
.jCC(x86Condition(cond
)));
952 Jump
branchTest8(ResultCondition cond
, BaseIndex address
, TrustedImm32 mask
= TrustedImm32(-1))
954 // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values.
955 ASSERT(mask
.m_value
>= -128 && mask
.m_value
<= 255);
956 if (mask
.m_value
== -1)
957 m_assembler
.cmpb_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
959 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
960 return Jump(m_assembler
.jCC(x86Condition(cond
)));
965 return Jump(m_assembler
.jmp());
968 void jump(RegisterID target
)
970 m_assembler
.jmp_r(target
);
973 // Address is a memory location containing the address to jump to
974 void jump(Address address
)
976 m_assembler
.jmp_m(address
.offset
, address
.base
);
980 // Arithmetic control flow operations:
982 // This set of conditional branch operations branch based
983 // on the result of an arithmetic operation. The operation
984 // is performed as normal, storing the result.
986 // * jz operations branch if the result is zero.
987 // * jo operations branch if the (signed) arithmetic
988 // operation caused an overflow to occur.
990 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
993 return Jump(m_assembler
.jCC(x86Condition(cond
)));
996 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
999 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1002 Jump
branchAdd32(ResultCondition cond
, TrustedImm32 src
, Address dest
)
1005 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1008 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, Address dest
)
1011 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1014 Jump
branchAdd32(ResultCondition cond
, Address src
, RegisterID dest
)
1017 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1020 Jump
branchAdd32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1023 return branchAdd32(cond
, src2
, dest
);
1025 return branchAdd32(cond
, src1
, dest
);
1028 Jump
branchAdd32(ResultCondition cond
, RegisterID src
, TrustedImm32 imm
, RegisterID dest
)
1031 return branchAdd32(cond
, imm
, dest
);
1034 Jump
branchMul32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1037 if (cond
!= Overflow
)
1038 m_assembler
.testl_rr(dest
, dest
);
1039 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1042 Jump
branchMul32(ResultCondition cond
, Address src
, RegisterID dest
)
1045 if (cond
!= Overflow
)
1046 m_assembler
.testl_rr(dest
, dest
);
1047 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1050 Jump
branchMul32(ResultCondition cond
, TrustedImm32 imm
, RegisterID src
, RegisterID dest
)
1052 mul32(imm
, src
, dest
);
1053 if (cond
!= Overflow
)
1054 m_assembler
.testl_rr(dest
, dest
);
1055 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1058 Jump
branchMul32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1061 return branchMul32(cond
, src2
, dest
);
1063 return branchMul32(cond
, src1
, dest
);
1066 Jump
branchSub32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1069 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1072 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, RegisterID dest
)
1075 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1078 Jump
branchSub32(ResultCondition cond
, TrustedImm32 imm
, Address dest
)
1081 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1084 Jump
branchSub32(ResultCondition cond
, RegisterID src
, Address dest
)
1087 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1090 Jump
branchSub32(ResultCondition cond
, Address src
, RegisterID dest
)
1093 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1096 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, RegisterID src2
, RegisterID dest
)
1098 // B := A - B is invalid.
1099 ASSERT(src1
== dest
|| src2
!= dest
);
1102 return branchSub32(cond
, src2
, dest
);
1105 Jump
branchSub32(ResultCondition cond
, RegisterID src1
, TrustedImm32 src2
, RegisterID dest
)
1108 return branchSub32(cond
, src2
, dest
);
1111 Jump
branchNeg32(ResultCondition cond
, RegisterID srcDest
)
1114 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1117 Jump
branchOr32(ResultCondition cond
, RegisterID src
, RegisterID dest
)
1120 return Jump(m_assembler
.jCC(x86Condition(cond
)));
1124 // Miscellaneous operations:
1133 return Call(m_assembler
.call(), Call::LinkableNear
);
1136 Call
call(RegisterID target
)
1138 return Call(m_assembler
.call(target
), Call::None
);
1141 void call(Address address
)
1143 m_assembler
.call_m(address
.offset
, address
.base
);
1151 void compare32(RelationalCondition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
1153 m_assembler
.cmpl_rr(right
, left
);
1154 m_assembler
.setCC_r(x86Condition(cond
), dest
);
1155 m_assembler
.movzbl_rr(dest
, dest
);
1158 void compare32(RelationalCondition cond
, RegisterID left
, TrustedImm32 right
, RegisterID dest
)
1160 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
1161 m_assembler
.testl_rr(left
, left
);
1163 m_assembler
.cmpl_ir(right
.m_value
, left
);
1164 m_assembler
.setCC_r(x86Condition(cond
), dest
);
1165 m_assembler
.movzbl_rr(dest
, dest
);
1169 // The mask should be optional... paerhaps the argument order should be
1170 // dest-src, operations always have a dest? ... possibly not true, considering
1171 // asm ops like test, or pseudo ops like pop().
1173 void test8(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1175 if (mask
.m_value
== -1)
1176 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
1178 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
1179 m_assembler
.setCC_r(x86Condition(cond
), dest
);
1180 m_assembler
.movzbl_rr(dest
, dest
);
1183 void test32(ResultCondition cond
, Address address
, TrustedImm32 mask
, RegisterID dest
)
1185 if (mask
.m_value
== -1)
1186 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
1188 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
1189 m_assembler
.setCC_r(x86Condition(cond
), dest
);
1190 m_assembler
.movzbl_rr(dest
, dest
);
1193 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
1194 static RelationalCondition
invert(RelationalCondition cond
)
1196 return static_cast<RelationalCondition
>(cond
^ 1);
1199 // Commute a relational condition, returns a new condition that will produce
1200 // the same results given the same inputs but with their positions exchanged.
1201 static RelationalCondition
commute(RelationalCondition cond
)
1203 // Equality is commutative!
1204 if (cond
== Equal
|| cond
== NotEqual
)
1207 // Based on the values of x86 condition codes, remap > with < and >= with <=
1208 if (cond
>= LessThan
) {
1209 ASSERT(cond
== LessThan
|| cond
== LessThanOrEqual
|| cond
== GreaterThan
|| cond
== GreaterThanOrEqual
);
1210 return static_cast<RelationalCondition
>(X86Assembler::ConditionL
+ X86Assembler::ConditionG
- cond
);
1213 // As above, for unsigned conditions.
1214 ASSERT(cond
== Below
|| cond
== BelowOrEqual
|| cond
== Above
|| cond
== AboveOrEqual
);
1215 return static_cast<RelationalCondition
>(X86Assembler::ConditionB
+ X86Assembler::ConditionA
- cond
);
1224 X86Assembler::Condition
x86Condition(RelationalCondition cond
)
1226 return static_cast<X86Assembler::Condition
>(cond
);
1229 X86Assembler::Condition
x86Condition(ResultCondition cond
)
1231 return static_cast<X86Assembler::Condition
>(cond
);
1235 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1236 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1237 friend class MacroAssemblerX86
;
1242 // All X86 Macs are guaranteed to support at least SSE2,
1243 static bool isSSE2Present()
1248 #else // OS(MAC_OS_X)
1250 enum SSE2CheckState
{
1256 static bool isSSE2Present()
1258 if (s_sse2CheckState
== NotCheckedSSE2
) {
1259 // Default the flags value to zero; if the compiler is
1260 // not MSVC or GCC we will read this as SSE2 not present.
1264 mov eax
, 1 // cpuid function 1 gives us the standard feature set
1277 : "%eax", "%ecx", "%edx"
1280 static const int SSE2FeatureBit
= 1 << 26;
1281 s_sse2CheckState
= (flags
& SSE2FeatureBit
) ? HasSSE2
: NoSSE2
;
1284 ASSERT(s_sse2CheckState
!= NotCheckedSSE2
);
1286 return s_sse2CheckState
== HasSSE2
;
1289 static SSE2CheckState s_sse2CheckState
;
1291 #endif // OS(MAC_OS_X)
1292 #elif !defined(NDEBUG) // CPU(X86)
1294 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1295 // but non debug add this method to keep the asserts above happy.
1296 static bool isSSE2Present()
1306 #endif // ENABLE(ASSEMBLER)
1308 #endif // MacroAssemblerX86Common_h