2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
29 #include <wtf/Platform.h>
33 #include "X86Assembler.h"
34 #include "AbstractMacroAssembler.h"
38 class MacroAssemblerX86Common
: public AbstractMacroAssembler
<X86Assembler
> {
39 static const int DoubleConditionBitInvert
= 0x10;
40 static const int DoubleConditionBitSpecial
= 0x20;
41 static const int DoubleConditionBits
= DoubleConditionBitInvert
| DoubleConditionBitSpecial
;
46 Equal
= X86Assembler::ConditionE
,
47 NotEqual
= X86Assembler::ConditionNE
,
48 Above
= X86Assembler::ConditionA
,
49 AboveOrEqual
= X86Assembler::ConditionAE
,
50 Below
= X86Assembler::ConditionB
,
51 BelowOrEqual
= X86Assembler::ConditionBE
,
52 GreaterThan
= X86Assembler::ConditionG
,
53 GreaterThanOrEqual
= X86Assembler::ConditionGE
,
54 LessThan
= X86Assembler::ConditionL
,
55 LessThanOrEqual
= X86Assembler::ConditionLE
,
56 Overflow
= X86Assembler::ConditionO
,
57 Signed
= X86Assembler::ConditionS
,
58 Zero
= X86Assembler::ConditionE
,
59 NonZero
= X86Assembler::ConditionNE
62 enum DoubleCondition
{
63 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
64 DoubleEqual
= X86Assembler::ConditionE
| DoubleConditionBitSpecial
,
65 DoubleNotEqual
= X86Assembler::ConditionNE
,
66 DoubleGreaterThan
= X86Assembler::ConditionA
,
67 DoubleGreaterThanOrEqual
= X86Assembler::ConditionAE
,
68 DoubleLessThan
= X86Assembler::ConditionA
| DoubleConditionBitInvert
,
69 DoubleLessThanOrEqual
= X86Assembler::ConditionAE
| DoubleConditionBitInvert
,
70 // If either operand is NaN, these conditions always evaluate to true.
71 DoubleEqualOrUnordered
= X86Assembler::ConditionE
,
72 DoubleNotEqualOrUnordered
= X86Assembler::ConditionNE
| DoubleConditionBitSpecial
,
73 DoubleGreaterThanOrUnordered
= X86Assembler::ConditionB
| DoubleConditionBitInvert
,
74 DoubleGreaterThanOrEqualOrUnordered
= X86Assembler::ConditionBE
| DoubleConditionBitInvert
,
75 DoubleLessThanOrUnordered
= X86Assembler::ConditionB
,
76 DoubleLessThanOrEqualOrUnordered
= X86Assembler::ConditionBE
,
79 !((X86Assembler::ConditionE
| X86Assembler::ConditionNE
| X86Assembler::ConditionA
| X86Assembler::ConditionAE
| X86Assembler::ConditionB
| X86Assembler::ConditionBE
) & DoubleConditionBits
),
80 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes
);
82 static const RegisterID stackPointerRegister
= X86Registers::esp
;
84 // Integer arithmetic operations:
86 // Operations are typically two operand - operation(source, srcDst)
87 // For many operations the source may be an Imm32, the srcDst operand
88 // may often be a memory location (explictly described using an Address
91 void add32(RegisterID src
, RegisterID dest
)
93 m_assembler
.addl_rr(src
, dest
);
96 void add32(Imm32 imm
, Address address
)
98 m_assembler
.addl_im(imm
.m_value
, address
.offset
, address
.base
);
101 void add32(Imm32 imm
, RegisterID dest
)
103 m_assembler
.addl_ir(imm
.m_value
, dest
);
106 void add32(Address src
, RegisterID dest
)
108 m_assembler
.addl_mr(src
.offset
, src
.base
, dest
);
111 void add32(RegisterID src
, Address dest
)
113 m_assembler
.addl_rm(src
, dest
.offset
, dest
.base
);
116 void and32(RegisterID src
, RegisterID dest
)
118 m_assembler
.andl_rr(src
, dest
);
121 void and32(Imm32 imm
, RegisterID dest
)
123 m_assembler
.andl_ir(imm
.m_value
, dest
);
126 void and32(RegisterID src
, Address dest
)
128 m_assembler
.andl_rm(src
, dest
.offset
, dest
.base
);
131 void and32(Address src
, RegisterID dest
)
133 m_assembler
.andl_mr(src
.offset
, src
.base
, dest
);
136 void and32(Imm32 imm
, Address address
)
138 m_assembler
.andl_im(imm
.m_value
, address
.offset
, address
.base
);
141 void lshift32(Imm32 imm
, RegisterID dest
)
143 m_assembler
.shll_i8r(imm
.m_value
, dest
);
146 void lshift32(RegisterID shift_amount
, RegisterID dest
)
148 // On x86 we can only shift by ecx; if asked to shift by another register we'll
149 // need rejig the shift amount into ecx first, and restore the registers afterwards.
150 if (shift_amount
!= X86Registers::ecx
) {
151 swap(shift_amount
, X86Registers::ecx
);
153 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
154 if (dest
== shift_amount
)
155 m_assembler
.shll_CLr(X86Registers::ecx
);
156 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
157 else if (dest
== X86Registers::ecx
)
158 m_assembler
.shll_CLr(shift_amount
);
159 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
161 m_assembler
.shll_CLr(dest
);
163 swap(shift_amount
, X86Registers::ecx
);
165 m_assembler
.shll_CLr(dest
);
168 void mul32(RegisterID src
, RegisterID dest
)
170 m_assembler
.imull_rr(src
, dest
);
173 void mul32(Address src
, RegisterID dest
)
175 m_assembler
.imull_mr(src
.offset
, src
.base
, dest
);
178 void mul32(Imm32 imm
, RegisterID src
, RegisterID dest
)
180 m_assembler
.imull_i32r(src
, imm
.m_value
, dest
);
183 void neg32(RegisterID srcDest
)
185 m_assembler
.negl_r(srcDest
);
188 void neg32(Address srcDest
)
190 m_assembler
.negl_m(srcDest
.offset
, srcDest
.base
);
193 void not32(RegisterID srcDest
)
195 m_assembler
.notl_r(srcDest
);
198 void not32(Address srcDest
)
200 m_assembler
.notl_m(srcDest
.offset
, srcDest
.base
);
203 void or32(RegisterID src
, RegisterID dest
)
205 m_assembler
.orl_rr(src
, dest
);
208 void or32(Imm32 imm
, RegisterID dest
)
210 m_assembler
.orl_ir(imm
.m_value
, dest
);
213 void or32(RegisterID src
, Address dest
)
215 m_assembler
.orl_rm(src
, dest
.offset
, dest
.base
);
218 void or32(Address src
, RegisterID dest
)
220 m_assembler
.orl_mr(src
.offset
, src
.base
, dest
);
223 void or32(Imm32 imm
, Address address
)
225 m_assembler
.orl_im(imm
.m_value
, address
.offset
, address
.base
);
228 void rshift32(RegisterID shift_amount
, RegisterID dest
)
230 // On x86 we can only shift by ecx; if asked to shift by another register we'll
231 // need rejig the shift amount into ecx first, and restore the registers afterwards.
232 if (shift_amount
!= X86Registers::ecx
) {
233 swap(shift_amount
, X86Registers::ecx
);
235 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
236 if (dest
== shift_amount
)
237 m_assembler
.sarl_CLr(X86Registers::ecx
);
238 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
239 else if (dest
== X86Registers::ecx
)
240 m_assembler
.sarl_CLr(shift_amount
);
241 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
243 m_assembler
.sarl_CLr(dest
);
245 swap(shift_amount
, X86Registers::ecx
);
247 m_assembler
.sarl_CLr(dest
);
250 void rshift32(Imm32 imm
, RegisterID dest
)
252 m_assembler
.sarl_i8r(imm
.m_value
, dest
);
255 void sub32(RegisterID src
, RegisterID dest
)
257 m_assembler
.subl_rr(src
, dest
);
260 void sub32(Imm32 imm
, RegisterID dest
)
262 m_assembler
.subl_ir(imm
.m_value
, dest
);
265 void sub32(Imm32 imm
, Address address
)
267 m_assembler
.subl_im(imm
.m_value
, address
.offset
, address
.base
);
270 void sub32(Address src
, RegisterID dest
)
272 m_assembler
.subl_mr(src
.offset
, src
.base
, dest
);
275 void sub32(RegisterID src
, Address dest
)
277 m_assembler
.subl_rm(src
, dest
.offset
, dest
.base
);
281 void xor32(RegisterID src
, RegisterID dest
)
283 m_assembler
.xorl_rr(src
, dest
);
286 void xor32(Imm32 imm
, Address dest
)
288 m_assembler
.xorl_im(imm
.m_value
, dest
.offset
, dest
.base
);
291 void xor32(Imm32 imm
, RegisterID dest
)
293 m_assembler
.xorl_ir(imm
.m_value
, dest
);
296 void xor32(RegisterID src
, Address dest
)
298 m_assembler
.xorl_rm(src
, dest
.offset
, dest
.base
);
301 void xor32(Address src
, RegisterID dest
)
303 m_assembler
.xorl_mr(src
.offset
, src
.base
, dest
);
307 // Memory access operations:
309 // Loads are of the form load(address, destination) and stores of the form
310 // store(source, address). The source for a store may be an Imm32. Address
311 // operand objects to loads and store will be implicitly constructed if a
312 // register is passed.
314 void load32(ImplicitAddress address
, RegisterID dest
)
316 m_assembler
.movl_mr(address
.offset
, address
.base
, dest
);
319 void load32(BaseIndex address
, RegisterID dest
)
321 m_assembler
.movl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
324 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
326 load32(address
, dest
);
329 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
331 m_assembler
.movl_mr_disp32(address
.offset
, address
.base
, dest
);
332 return DataLabel32(this);
335 void load16(BaseIndex address
, RegisterID dest
)
337 m_assembler
.movzwl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
340 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
342 m_assembler
.movl_rm_disp32(src
, address
.offset
, address
.base
);
343 return DataLabel32(this);
346 void store32(RegisterID src
, ImplicitAddress address
)
348 m_assembler
.movl_rm(src
, address
.offset
, address
.base
);
351 void store32(RegisterID src
, BaseIndex address
)
353 m_assembler
.movl_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
356 void store32(Imm32 imm
, ImplicitAddress address
)
358 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
);
362 // Floating-point operation:
364 // Presently only supports SSE, not x87 floating point.
366 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
368 ASSERT(isSSE2Present());
369 m_assembler
.movsd_mr(address
.offset
, address
.base
, dest
);
372 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
374 ASSERT(isSSE2Present());
375 m_assembler
.movsd_rm(src
, address
.offset
, address
.base
);
378 void addDouble(FPRegisterID src
, FPRegisterID dest
)
380 ASSERT(isSSE2Present());
381 m_assembler
.addsd_rr(src
, dest
);
384 void addDouble(Address src
, FPRegisterID dest
)
386 ASSERT(isSSE2Present());
387 m_assembler
.addsd_mr(src
.offset
, src
.base
, dest
);
390 void divDouble(FPRegisterID src
, FPRegisterID dest
)
392 ASSERT(isSSE2Present());
393 m_assembler
.divsd_rr(src
, dest
);
396 void divDouble(Address src
, FPRegisterID dest
)
398 ASSERT(isSSE2Present());
399 m_assembler
.divsd_mr(src
.offset
, src
.base
, dest
);
402 void subDouble(FPRegisterID src
, FPRegisterID dest
)
404 ASSERT(isSSE2Present());
405 m_assembler
.subsd_rr(src
, dest
);
408 void subDouble(Address src
, FPRegisterID dest
)
410 ASSERT(isSSE2Present());
411 m_assembler
.subsd_mr(src
.offset
, src
.base
, dest
);
414 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
416 ASSERT(isSSE2Present());
417 m_assembler
.mulsd_rr(src
, dest
);
420 void mulDouble(Address src
, FPRegisterID dest
)
422 ASSERT(isSSE2Present());
423 m_assembler
.mulsd_mr(src
.offset
, src
.base
, dest
);
426 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
428 ASSERT(isSSE2Present());
429 m_assembler
.cvtsi2sd_rr(src
, dest
);
432 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
434 ASSERT(isSSE2Present());
435 m_assembler
.cvtsi2sd_mr(src
.offset
, src
.base
, dest
);
438 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
440 ASSERT(isSSE2Present());
442 if (cond
& DoubleConditionBitInvert
)
443 m_assembler
.ucomisd_rr(left
, right
);
445 m_assembler
.ucomisd_rr(right
, left
);
447 if (cond
== DoubleEqual
) {
448 Jump
isUnordered(m_assembler
.jp());
449 Jump result
= Jump(m_assembler
.je());
450 isUnordered
.link(this);
452 } else if (cond
== DoubleNotEqualOrUnordered
) {
453 Jump
isUnordered(m_assembler
.jp());
454 Jump
isEqual(m_assembler
.je());
455 isUnordered
.link(this);
456 Jump result
= jump();
461 ASSERT(!(cond
& DoubleConditionBitSpecial
));
462 return Jump(m_assembler
.jCC(static_cast<X86Assembler::Condition
>(cond
& ~DoubleConditionBits
)));
465 // Truncates 'src' to an integer, and places the resulting 'dest'.
466 // If the result is not representable as a 32 bit value, branch.
467 // May also branch for some values that are representable in 32 bits
468 // (specifically, in this case, INT_MIN).
469 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
471 ASSERT(isSSE2Present());
472 m_assembler
.cvttsd2si_rr(src
, dest
);
473 return branch32(Equal
, dest
, Imm32(0x80000000));
476 // Convert 'src' to an integer, and places the resulting 'dest'.
477 // If the result is not representable as a 32 bit value, branch.
478 // May also branch for some values that are representable in 32 bits
479 // (specifically, in this case, 0).
480 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
)
482 ASSERT(isSSE2Present());
483 m_assembler
.cvttsd2si_rr(src
, dest
);
485 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
486 failureCases
.append(branchTest32(Zero
, dest
));
488 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
489 convertInt32ToDouble(dest
, fpTemp
);
490 m_assembler
.ucomisd_rr(fpTemp
, src
);
491 failureCases
.append(m_assembler
.jp());
492 failureCases
.append(m_assembler
.jne());
495 void zeroDouble(FPRegisterID srcDest
)
497 ASSERT(isSSE2Present());
498 m_assembler
.xorpd_rr(srcDest
, srcDest
);
502 // Stack manipulation operations:
504 // The ABI is assumed to provide a stack abstraction to memory,
505 // containing machine word sized units of data. Push and pop
506 // operations add and remove a single register sized unit of data
507 // to or from the stack. Peek and poke operations read or write
508 // values on the stack, without moving the current stack position.
510 void pop(RegisterID dest
)
512 m_assembler
.pop_r(dest
);
515 void push(RegisterID src
)
517 m_assembler
.push_r(src
);
520 void push(Address address
)
522 m_assembler
.push_m(address
.offset
, address
.base
);
527 m_assembler
.push_i32(imm
.m_value
);
531 // Register move operations:
533 // Move values in registers.
535 void move(Imm32 imm
, RegisterID dest
)
537 // Note: on 64-bit the Imm32 value is zero extended into the register, it
538 // may be useful to have a separate version that sign extends the value?
540 m_assembler
.xorl_rr(dest
, dest
);
542 m_assembler
.movl_i32r(imm
.m_value
, dest
);
546 void move(RegisterID src
, RegisterID dest
)
548 // Note: on 64-bit this is is a full register move; perhaps it would be
549 // useful to have separate move32 & movePtr, with move32 zero extending?
551 m_assembler
.movq_rr(src
, dest
);
554 void move(ImmPtr imm
, RegisterID dest
)
556 m_assembler
.movq_i64r(imm
.asIntptr(), dest
);
559 void swap(RegisterID reg1
, RegisterID reg2
)
562 m_assembler
.xchgq_rr(reg1
, reg2
);
565 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
567 m_assembler
.movsxd_rr(src
, dest
);
570 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
572 m_assembler
.movl_rr(src
, dest
);
575 void move(RegisterID src
, RegisterID dest
)
578 m_assembler
.movl_rr(src
, dest
);
581 void move(ImmPtr imm
, RegisterID dest
)
583 m_assembler
.movl_i32r(imm
.asIntptr(), dest
);
586 void swap(RegisterID reg1
, RegisterID reg2
)
589 m_assembler
.xchgl_rr(reg1
, reg2
);
592 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
597 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
604 // Forwards / external control flow operations:
606 // This set of jump and conditional branch operations return a Jump
607 // object which may linked at a later point, allow forwards jump,
608 // or jumps that will require external linkage (after the code has been
611 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
612 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
613 // used (representing the names 'below' and 'above').
615 // Operands to the comparision are provided in the expected order, e.g.
616 // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
617 // treated as a signed 32bit value, is less than or equal to 5.
619 // jz and jnz test whether the first operand is equal to zero, and take
620 // an optional second operand of a mask under which to perform the test.
623 Jump
branch32(Condition cond
, RegisterID left
, RegisterID right
)
625 m_assembler
.cmpl_rr(right
, left
);
626 return Jump(m_assembler
.jCC(x86Condition(cond
)));
629 Jump
branch32(Condition cond
, RegisterID left
, Imm32 right
)
631 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
632 m_assembler
.testl_rr(left
, left
);
634 m_assembler
.cmpl_ir(right
.m_value
, left
);
635 return Jump(m_assembler
.jCC(x86Condition(cond
)));
638 Jump
branch32(Condition cond
, RegisterID left
, Address right
)
640 m_assembler
.cmpl_mr(right
.offset
, right
.base
, left
);
641 return Jump(m_assembler
.jCC(x86Condition(cond
)));
644 Jump
branch32(Condition cond
, Address left
, RegisterID right
)
646 m_assembler
.cmpl_rm(right
, left
.offset
, left
.base
);
647 return Jump(m_assembler
.jCC(x86Condition(cond
)));
650 Jump
branch32(Condition cond
, Address left
, Imm32 right
)
652 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
);
653 return Jump(m_assembler
.jCC(x86Condition(cond
)));
656 Jump
branch32(Condition cond
, BaseIndex left
, Imm32 right
)
658 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
659 return Jump(m_assembler
.jCC(x86Condition(cond
)));
662 Jump
branch32WithUnalignedHalfWords(Condition cond
, BaseIndex left
, Imm32 right
)
664 return branch32(cond
, left
, right
);
667 Jump
branch16(Condition cond
, BaseIndex left
, RegisterID right
)
669 m_assembler
.cmpw_rm(right
, left
.offset
, left
.base
, left
.index
, left
.scale
);
670 return Jump(m_assembler
.jCC(x86Condition(cond
)));
673 Jump
branch16(Condition cond
, BaseIndex left
, Imm32 right
)
675 ASSERT(!(right
.m_value
& 0xFFFF0000));
677 m_assembler
.cmpw_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
678 return Jump(m_assembler
.jCC(x86Condition(cond
)));
681 Jump
branchTest32(Condition cond
, RegisterID reg
, RegisterID mask
)
683 ASSERT((cond
== Zero
) || (cond
== NonZero
));
684 m_assembler
.testl_rr(reg
, mask
);
685 return Jump(m_assembler
.jCC(x86Condition(cond
)));
688 Jump
branchTest32(Condition cond
, RegisterID reg
, Imm32 mask
= Imm32(-1))
690 ASSERT((cond
== Zero
) || (cond
== NonZero
));
691 // if we are only interested in the low seven bits, this can be tested with a testb
692 if (mask
.m_value
== -1)
693 m_assembler
.testl_rr(reg
, reg
);
694 else if ((mask
.m_value
& ~0x7f) == 0)
695 m_assembler
.testb_i8r(mask
.m_value
, reg
);
697 m_assembler
.testl_i32r(mask
.m_value
, reg
);
698 return Jump(m_assembler
.jCC(x86Condition(cond
)));
701 Jump
branchTest32(Condition cond
, Address address
, Imm32 mask
= Imm32(-1))
703 ASSERT((cond
== Zero
) || (cond
== NonZero
));
704 if (mask
.m_value
== -1)
705 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
707 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
708 return Jump(m_assembler
.jCC(x86Condition(cond
)));
711 Jump
branchTest32(Condition cond
, BaseIndex address
, Imm32 mask
= Imm32(-1))
713 ASSERT((cond
== Zero
) || (cond
== NonZero
));
714 if (mask
.m_value
== -1)
715 m_assembler
.cmpl_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
717 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
718 return Jump(m_assembler
.jCC(x86Condition(cond
)));
723 return Jump(m_assembler
.jmp());
726 void jump(RegisterID target
)
728 m_assembler
.jmp_r(target
);
731 // Address is a memory location containing the address to jump to
732 void jump(Address address
)
734 m_assembler
.jmp_m(address
.offset
, address
.base
);
738 // Arithmetic control flow operations:
740 // This set of conditional branch operations branch based
741 // on the result of an arithmetic operation. The operation
742 // is performed as normal, storing the result.
744 // * jz operations branch if the result is zero.
745 // * jo operations branch if the (signed) arithmetic
746 // operation caused an overflow to occur.
748 Jump
branchAdd32(Condition cond
, RegisterID src
, RegisterID dest
)
750 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
752 return Jump(m_assembler
.jCC(x86Condition(cond
)));
755 Jump
branchAdd32(Condition cond
, Imm32 imm
, RegisterID dest
)
757 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
759 return Jump(m_assembler
.jCC(x86Condition(cond
)));
762 Jump
branchAdd32(Condition cond
, Imm32 src
, Address dest
)
764 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
766 return Jump(m_assembler
.jCC(x86Condition(cond
)));
769 Jump
branchAdd32(Condition cond
, RegisterID src
, Address dest
)
771 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
773 return Jump(m_assembler
.jCC(x86Condition(cond
)));
776 Jump
branchAdd32(Condition cond
, Address src
, RegisterID dest
)
778 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
780 return Jump(m_assembler
.jCC(x86Condition(cond
)));
783 Jump
branchMul32(Condition cond
, RegisterID src
, RegisterID dest
)
785 ASSERT(cond
== Overflow
);
787 return Jump(m_assembler
.jCC(x86Condition(cond
)));
790 Jump
branchMul32(Condition cond
, Address src
, RegisterID dest
)
792 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
794 return Jump(m_assembler
.jCC(x86Condition(cond
)));
797 Jump
branchMul32(Condition cond
, Imm32 imm
, RegisterID src
, RegisterID dest
)
799 ASSERT(cond
== Overflow
);
800 mul32(imm
, src
, dest
);
801 return Jump(m_assembler
.jCC(x86Condition(cond
)));
804 Jump
branchSub32(Condition cond
, RegisterID src
, RegisterID dest
)
806 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
808 return Jump(m_assembler
.jCC(x86Condition(cond
)));
811 Jump
branchSub32(Condition cond
, Imm32 imm
, RegisterID dest
)
813 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
815 return Jump(m_assembler
.jCC(x86Condition(cond
)));
818 Jump
branchSub32(Condition cond
, Imm32 imm
, Address dest
)
820 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
822 return Jump(m_assembler
.jCC(x86Condition(cond
)));
825 Jump
branchSub32(Condition cond
, RegisterID src
, Address dest
)
827 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
829 return Jump(m_assembler
.jCC(x86Condition(cond
)));
832 Jump
branchSub32(Condition cond
, Address src
, RegisterID dest
)
834 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
836 return Jump(m_assembler
.jCC(x86Condition(cond
)));
839 Jump
branchOr32(Condition cond
, RegisterID src
, RegisterID dest
)
841 ASSERT((cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
843 return Jump(m_assembler
.jCC(x86Condition(cond
)));
847 // Miscellaneous operations:
856 return Call(m_assembler
.call(), Call::LinkableNear
);
859 Call
call(RegisterID target
)
861 return Call(m_assembler
.call(target
), Call::None
);
864 void call(Address address
)
866 m_assembler
.call_m(address
.offset
, address
.base
);
874 void set8(Condition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
876 m_assembler
.cmpl_rr(right
, left
);
877 m_assembler
.setCC_r(x86Condition(cond
), dest
);
880 void set8(Condition cond
, Address left
, RegisterID right
, RegisterID dest
)
882 m_assembler
.cmpl_mr(left
.offset
, left
.base
, right
);
883 m_assembler
.setCC_r(x86Condition(cond
), dest
);
886 void set8(Condition cond
, RegisterID left
, Imm32 right
, RegisterID dest
)
888 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
889 m_assembler
.testl_rr(left
, left
);
891 m_assembler
.cmpl_ir(right
.m_value
, left
);
892 m_assembler
.setCC_r(x86Condition(cond
), dest
);
895 void set32(Condition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
897 m_assembler
.cmpl_rr(right
, left
);
898 m_assembler
.setCC_r(x86Condition(cond
), dest
);
899 m_assembler
.movzbl_rr(dest
, dest
);
902 void set32(Condition cond
, RegisterID left
, Imm32 right
, RegisterID dest
)
904 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
905 m_assembler
.testl_rr(left
, left
);
907 m_assembler
.cmpl_ir(right
.m_value
, left
);
908 m_assembler
.setCC_r(x86Condition(cond
), dest
);
909 m_assembler
.movzbl_rr(dest
, dest
);
913 // The mask should be optional... paerhaps the argument order should be
914 // dest-src, operations always have a dest? ... possibly not true, considering
915 // asm ops like test, or pseudo ops like pop().
917 void setTest8(Condition cond
, Address address
, Imm32 mask
, RegisterID dest
)
919 if (mask
.m_value
== -1)
920 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
922 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
923 m_assembler
.setCC_r(x86Condition(cond
), dest
);
926 void setTest32(Condition cond
, Address address
, Imm32 mask
, RegisterID dest
)
928 if (mask
.m_value
== -1)
929 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
931 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
932 m_assembler
.setCC_r(x86Condition(cond
), dest
);
933 m_assembler
.movzbl_rr(dest
, dest
);
937 X86Assembler::Condition
x86Condition(Condition cond
)
939 return static_cast<X86Assembler::Condition
>(cond
);
943 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
944 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
945 friend class MacroAssemblerX86
;
950 // All X86 Macs are guaranteed to support at least SSE2,
951 static bool isSSE2Present()
956 #else // OS(MAC_OS_X)
958 enum SSE2CheckState
{
964 static bool isSSE2Present()
966 if (s_sse2CheckState
== NotCheckedSSE2
) {
967 // Default the flags value to zero; if the compiler is
968 // not MSVC or GCC we will read this as SSE2 not present.
972 mov eax
, 1 // cpuid function 1 gives us the standard feature set
985 : "%eax", "%ecx", "%edx"
988 static const int SSE2FeatureBit
= 1 << 26;
989 s_sse2CheckState
= (flags
& SSE2FeatureBit
) ? HasSSE2
: NoSSE2
;
992 ASSERT(s_sse2CheckState
!= NotCheckedSSE2
);
994 return s_sse2CheckState
== HasSSE2
;
997 static SSE2CheckState s_sse2CheckState
;
999 #endif // OS(MAC_OS_X)
1000 #elif !defined(NDEBUG) // CPU(X86)
1002 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1003 // but non debug add this method to keep the asserts above happy.
1004 static bool isSSE2Present()
1014 #endif // ENABLE(ASSEMBLER)
1016 #endif // MacroAssemblerX86Common_h