2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef MacroAssemblerX86Common_h
27 #define MacroAssemblerX86Common_h
31 #include "X86Assembler.h"
32 #include "AbstractMacroAssembler.h"
36 class MacroAssemblerX86Common
: public AbstractMacroAssembler
<X86Assembler
> {
37 static const int DoubleConditionBitInvert
= 0x10;
38 static const int DoubleConditionBitSpecial
= 0x20;
39 static const int DoubleConditionBits
= DoubleConditionBitInvert
| DoubleConditionBitSpecial
;
42 typedef X86Assembler::FPRegisterID FPRegisterID
;
45 Equal
= X86Assembler::ConditionE
,
46 NotEqual
= X86Assembler::ConditionNE
,
47 Above
= X86Assembler::ConditionA
,
48 AboveOrEqual
= X86Assembler::ConditionAE
,
49 Below
= X86Assembler::ConditionB
,
50 BelowOrEqual
= X86Assembler::ConditionBE
,
51 GreaterThan
= X86Assembler::ConditionG
,
52 GreaterThanOrEqual
= X86Assembler::ConditionGE
,
53 LessThan
= X86Assembler::ConditionL
,
54 LessThanOrEqual
= X86Assembler::ConditionLE
,
55 Overflow
= X86Assembler::ConditionO
,
56 Signed
= X86Assembler::ConditionS
,
57 Zero
= X86Assembler::ConditionE
,
58 NonZero
= X86Assembler::ConditionNE
61 enum DoubleCondition
{
62 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
63 DoubleEqual
= X86Assembler::ConditionE
| DoubleConditionBitSpecial
,
64 DoubleNotEqual
= X86Assembler::ConditionNE
,
65 DoubleGreaterThan
= X86Assembler::ConditionA
,
66 DoubleGreaterThanOrEqual
= X86Assembler::ConditionAE
,
67 DoubleLessThan
= X86Assembler::ConditionA
| DoubleConditionBitInvert
,
68 DoubleLessThanOrEqual
= X86Assembler::ConditionAE
| DoubleConditionBitInvert
,
69 // If either operand is NaN, these conditions always evaluate to true.
70 DoubleEqualOrUnordered
= X86Assembler::ConditionE
,
71 DoubleNotEqualOrUnordered
= X86Assembler::ConditionNE
| DoubleConditionBitSpecial
,
72 DoubleGreaterThanOrUnordered
= X86Assembler::ConditionB
| DoubleConditionBitInvert
,
73 DoubleGreaterThanOrEqualOrUnordered
= X86Assembler::ConditionBE
| DoubleConditionBitInvert
,
74 DoubleLessThanOrUnordered
= X86Assembler::ConditionB
,
75 DoubleLessThanOrEqualOrUnordered
= X86Assembler::ConditionBE
,
78 !((X86Assembler::ConditionE
| X86Assembler::ConditionNE
| X86Assembler::ConditionA
| X86Assembler::ConditionAE
| X86Assembler::ConditionB
| X86Assembler::ConditionBE
) & DoubleConditionBits
),
79 DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes
);
81 static const RegisterID stackPointerRegister
= X86Registers::esp
;
83 // Integer arithmetic operations:
85 // Operations are typically two operand - operation(source, srcDst)
86 // For many operations the source may be an Imm32, the srcDst operand
87 // may often be a memory location (explictly described using an Address
90 void add32(RegisterID src
, RegisterID dest
)
92 m_assembler
.addl_rr(src
, dest
);
95 void add32(Imm32 imm
, Address address
)
97 m_assembler
.addl_im(imm
.m_value
, address
.offset
, address
.base
);
100 void add32(Imm32 imm
, RegisterID dest
)
102 m_assembler
.addl_ir(imm
.m_value
, dest
);
105 void add32(Address src
, RegisterID dest
)
107 m_assembler
.addl_mr(src
.offset
, src
.base
, dest
);
110 void add32(RegisterID src
, Address dest
)
112 m_assembler
.addl_rm(src
, dest
.offset
, dest
.base
);
115 void and32(RegisterID src
, RegisterID dest
)
117 m_assembler
.andl_rr(src
, dest
);
120 void and32(Imm32 imm
, RegisterID dest
)
122 m_assembler
.andl_ir(imm
.m_value
, dest
);
125 void and32(RegisterID src
, Address dest
)
127 m_assembler
.andl_rm(src
, dest
.offset
, dest
.base
);
130 void and32(Address src
, RegisterID dest
)
132 m_assembler
.andl_mr(src
.offset
, src
.base
, dest
);
135 void and32(Imm32 imm
, Address address
)
137 m_assembler
.andl_im(imm
.m_value
, address
.offset
, address
.base
);
140 void lshift32(Imm32 imm
, RegisterID dest
)
142 m_assembler
.shll_i8r(imm
.m_value
, dest
);
145 void lshift32(RegisterID shift_amount
, RegisterID dest
)
147 // On x86 we can only shift by ecx; if asked to shift by another register we'll
148 // need rejig the shift amount into ecx first, and restore the registers afterwards.
149 if (shift_amount
!= X86Registers::ecx
) {
150 swap(shift_amount
, X86Registers::ecx
);
152 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
153 if (dest
== shift_amount
)
154 m_assembler
.shll_CLr(X86Registers::ecx
);
155 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
156 else if (dest
== X86Registers::ecx
)
157 m_assembler
.shll_CLr(shift_amount
);
158 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
160 m_assembler
.shll_CLr(dest
);
162 swap(shift_amount
, X86Registers::ecx
);
164 m_assembler
.shll_CLr(dest
);
167 void mul32(RegisterID src
, RegisterID dest
)
169 m_assembler
.imull_rr(src
, dest
);
172 void mul32(Address src
, RegisterID dest
)
174 m_assembler
.imull_mr(src
.offset
, src
.base
, dest
);
177 void mul32(Imm32 imm
, RegisterID src
, RegisterID dest
)
179 m_assembler
.imull_i32r(src
, imm
.m_value
, dest
);
182 void neg32(RegisterID srcDest
)
184 m_assembler
.negl_r(srcDest
);
187 void neg32(Address srcDest
)
189 m_assembler
.negl_m(srcDest
.offset
, srcDest
.base
);
192 void not32(RegisterID srcDest
)
194 m_assembler
.notl_r(srcDest
);
197 void not32(Address srcDest
)
199 m_assembler
.notl_m(srcDest
.offset
, srcDest
.base
);
202 void or32(RegisterID src
, RegisterID dest
)
204 m_assembler
.orl_rr(src
, dest
);
207 void or32(Imm32 imm
, RegisterID dest
)
209 m_assembler
.orl_ir(imm
.m_value
, dest
);
212 void or32(RegisterID src
, Address dest
)
214 m_assembler
.orl_rm(src
, dest
.offset
, dest
.base
);
217 void or32(Address src
, RegisterID dest
)
219 m_assembler
.orl_mr(src
.offset
, src
.base
, dest
);
222 void or32(Imm32 imm
, Address address
)
224 m_assembler
.orl_im(imm
.m_value
, address
.offset
, address
.base
);
227 void rshift32(RegisterID shift_amount
, RegisterID dest
)
229 // On x86 we can only shift by ecx; if asked to shift by another register we'll
230 // need rejig the shift amount into ecx first, and restore the registers afterwards.
231 if (shift_amount
!= X86Registers::ecx
) {
232 swap(shift_amount
, X86Registers::ecx
);
234 // E.g. transform "shll %eax, %eax" -> "xchgl %eax, %ecx; shll %ecx, %ecx; xchgl %eax, %ecx"
235 if (dest
== shift_amount
)
236 m_assembler
.sarl_CLr(X86Registers::ecx
);
237 // E.g. transform "shll %eax, %ecx" -> "xchgl %eax, %ecx; shll %ecx, %eax; xchgl %eax, %ecx"
238 else if (dest
== X86Registers::ecx
)
239 m_assembler
.sarl_CLr(shift_amount
);
240 // E.g. transform "shll %eax, %ebx" -> "xchgl %eax, %ecx; shll %ecx, %ebx; xchgl %eax, %ecx"
242 m_assembler
.sarl_CLr(dest
);
244 swap(shift_amount
, X86Registers::ecx
);
246 m_assembler
.sarl_CLr(dest
);
249 void rshift32(Imm32 imm
, RegisterID dest
)
251 m_assembler
.sarl_i8r(imm
.m_value
, dest
);
254 void urshift32(RegisterID shift_amount
, RegisterID dest
)
256 // On x86 we can only shift by ecx; if asked to shift by another register we'll
257 // need rejig the shift amount into ecx first, and restore the registers afterwards.
258 if (shift_amount
!= X86Registers::ecx
) {
259 swap(shift_amount
, X86Registers::ecx
);
261 // E.g. transform "shrl %eax, %eax" -> "xchgl %eax, %ecx; shrl %ecx, %ecx; xchgl %eax, %ecx"
262 if (dest
== shift_amount
)
263 m_assembler
.shrl_CLr(X86Registers::ecx
);
264 // E.g. transform "shrl %eax, %ecx" -> "xchgl %eax, %ecx; shrl %ecx, %eax; xchgl %eax, %ecx"
265 else if (dest
== X86Registers::ecx
)
266 m_assembler
.shrl_CLr(shift_amount
);
267 // E.g. transform "shrl %eax, %ebx" -> "xchgl %eax, %ecx; shrl %ecx, %ebx; xchgl %eax, %ecx"
269 m_assembler
.shrl_CLr(dest
);
271 swap(shift_amount
, X86Registers::ecx
);
273 m_assembler
.shrl_CLr(dest
);
276 void urshift32(Imm32 imm
, RegisterID dest
)
278 m_assembler
.shrl_i8r(imm
.m_value
, dest
);
281 void sub32(RegisterID src
, RegisterID dest
)
283 m_assembler
.subl_rr(src
, dest
);
286 void sub32(Imm32 imm
, RegisterID dest
)
288 m_assembler
.subl_ir(imm
.m_value
, dest
);
291 void sub32(Imm32 imm
, Address address
)
293 m_assembler
.subl_im(imm
.m_value
, address
.offset
, address
.base
);
296 void sub32(Address src
, RegisterID dest
)
298 m_assembler
.subl_mr(src
.offset
, src
.base
, dest
);
301 void sub32(RegisterID src
, Address dest
)
303 m_assembler
.subl_rm(src
, dest
.offset
, dest
.base
);
307 void xor32(RegisterID src
, RegisterID dest
)
309 m_assembler
.xorl_rr(src
, dest
);
312 void xor32(Imm32 imm
, Address dest
)
314 m_assembler
.xorl_im(imm
.m_value
, dest
.offset
, dest
.base
);
317 void xor32(Imm32 imm
, RegisterID dest
)
319 m_assembler
.xorl_ir(imm
.m_value
, dest
);
322 void xor32(RegisterID src
, Address dest
)
324 m_assembler
.xorl_rm(src
, dest
.offset
, dest
.base
);
327 void xor32(Address src
, RegisterID dest
)
329 m_assembler
.xorl_mr(src
.offset
, src
.base
, dest
);
332 void sqrtDouble(FPRegisterID src
, FPRegisterID dst
)
334 m_assembler
.sqrtsd_rr(src
, dst
);
337 // Memory access operations:
339 // Loads are of the form load(address, destination) and stores of the form
340 // store(source, address). The source for a store may be an Imm32. Address
341 // operand objects to loads and store will be implicitly constructed if a
342 // register is passed.
344 void load32(ImplicitAddress address
, RegisterID dest
)
346 m_assembler
.movl_mr(address
.offset
, address
.base
, dest
);
349 void load32(BaseIndex address
, RegisterID dest
)
351 m_assembler
.movl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
354 void load32WithUnalignedHalfWords(BaseIndex address
, RegisterID dest
)
356 load32(address
, dest
);
359 DataLabel32
load32WithAddressOffsetPatch(Address address
, RegisterID dest
)
361 m_assembler
.movl_mr_disp32(address
.offset
, address
.base
, dest
);
362 return DataLabel32(this);
365 void load16(BaseIndex address
, RegisterID dest
)
367 m_assembler
.movzwl_mr(address
.offset
, address
.base
, address
.index
, address
.scale
, dest
);
370 void load16(Address address
, RegisterID dest
)
372 m_assembler
.movzwl_mr(address
.offset
, address
.base
, dest
);
375 DataLabel32
store32WithAddressOffsetPatch(RegisterID src
, Address address
)
377 m_assembler
.movl_rm_disp32(src
, address
.offset
, address
.base
);
378 return DataLabel32(this);
381 void store32(RegisterID src
, ImplicitAddress address
)
383 m_assembler
.movl_rm(src
, address
.offset
, address
.base
);
386 void store32(RegisterID src
, BaseIndex address
)
388 m_assembler
.movl_rm(src
, address
.offset
, address
.base
, address
.index
, address
.scale
);
391 void store32(Imm32 imm
, ImplicitAddress address
)
393 m_assembler
.movl_i32m(imm
.m_value
, address
.offset
, address
.base
);
397 // Floating-point operation:
399 // Presently only supports SSE, not x87 floating point.
401 void loadDouble(ImplicitAddress address
, FPRegisterID dest
)
403 ASSERT(isSSE2Present());
404 m_assembler
.movsd_mr(address
.offset
, address
.base
, dest
);
407 void storeDouble(FPRegisterID src
, ImplicitAddress address
)
409 ASSERT(isSSE2Present());
410 m_assembler
.movsd_rm(src
, address
.offset
, address
.base
);
413 void addDouble(FPRegisterID src
, FPRegisterID dest
)
415 ASSERT(isSSE2Present());
416 m_assembler
.addsd_rr(src
, dest
);
419 void addDouble(Address src
, FPRegisterID dest
)
421 ASSERT(isSSE2Present());
422 m_assembler
.addsd_mr(src
.offset
, src
.base
, dest
);
425 void divDouble(FPRegisterID src
, FPRegisterID dest
)
427 ASSERT(isSSE2Present());
428 m_assembler
.divsd_rr(src
, dest
);
431 void divDouble(Address src
, FPRegisterID dest
)
433 ASSERT(isSSE2Present());
434 m_assembler
.divsd_mr(src
.offset
, src
.base
, dest
);
437 void subDouble(FPRegisterID src
, FPRegisterID dest
)
439 ASSERT(isSSE2Present());
440 m_assembler
.subsd_rr(src
, dest
);
443 void subDouble(Address src
, FPRegisterID dest
)
445 ASSERT(isSSE2Present());
446 m_assembler
.subsd_mr(src
.offset
, src
.base
, dest
);
449 void mulDouble(FPRegisterID src
, FPRegisterID dest
)
451 ASSERT(isSSE2Present());
452 m_assembler
.mulsd_rr(src
, dest
);
455 void mulDouble(Address src
, FPRegisterID dest
)
457 ASSERT(isSSE2Present());
458 m_assembler
.mulsd_mr(src
.offset
, src
.base
, dest
);
461 void convertInt32ToDouble(RegisterID src
, FPRegisterID dest
)
463 ASSERT(isSSE2Present());
464 m_assembler
.cvtsi2sd_rr(src
, dest
);
467 void convertInt32ToDouble(Address src
, FPRegisterID dest
)
469 ASSERT(isSSE2Present());
470 m_assembler
.cvtsi2sd_mr(src
.offset
, src
.base
, dest
);
473 Jump
branchDouble(DoubleCondition cond
, FPRegisterID left
, FPRegisterID right
)
475 ASSERT(isSSE2Present());
477 if (cond
& DoubleConditionBitInvert
)
478 m_assembler
.ucomisd_rr(left
, right
);
480 m_assembler
.ucomisd_rr(right
, left
);
482 if (cond
== DoubleEqual
) {
483 Jump
isUnordered(m_assembler
.jp());
484 Jump result
= Jump(m_assembler
.je());
485 isUnordered
.link(this);
487 } else if (cond
== DoubleNotEqualOrUnordered
) {
488 Jump
isUnordered(m_assembler
.jp());
489 Jump
isEqual(m_assembler
.je());
490 isUnordered
.link(this);
491 Jump result
= jump();
496 ASSERT(!(cond
& DoubleConditionBitSpecial
));
497 return Jump(m_assembler
.jCC(static_cast<X86Assembler::Condition
>(cond
& ~DoubleConditionBits
)));
500 // Truncates 'src' to an integer, and places the resulting 'dest'.
501 // If the result is not representable as a 32 bit value, branch.
502 // May also branch for some values that are representable in 32 bits
503 // (specifically, in this case, INT_MIN).
504 Jump
branchTruncateDoubleToInt32(FPRegisterID src
, RegisterID dest
)
506 ASSERT(isSSE2Present());
507 m_assembler
.cvttsd2si_rr(src
, dest
);
508 return branch32(Equal
, dest
, Imm32(0x80000000));
511 // Convert 'src' to an integer, and places the resulting 'dest'.
512 // If the result is not representable as a 32 bit value, branch.
513 // May also branch for some values that are representable in 32 bits
514 // (specifically, in this case, 0).
515 void branchConvertDoubleToInt32(FPRegisterID src
, RegisterID dest
, JumpList
& failureCases
, FPRegisterID fpTemp
)
517 ASSERT(isSSE2Present());
518 m_assembler
.cvttsd2si_rr(src
, dest
);
520 // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
521 failureCases
.append(branchTest32(Zero
, dest
));
523 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
524 convertInt32ToDouble(dest
, fpTemp
);
525 m_assembler
.ucomisd_rr(fpTemp
, src
);
526 failureCases
.append(m_assembler
.jp());
527 failureCases
.append(m_assembler
.jne());
530 Jump
branchDoubleNonZero(FPRegisterID reg
, FPRegisterID scratch
)
532 ASSERT(isSSE2Present());
533 m_assembler
.xorpd_rr(scratch
, scratch
);
534 return branchDouble(DoubleNotEqual
, reg
, scratch
);
537 Jump
branchDoubleZeroOrNaN(FPRegisterID reg
, FPRegisterID scratch
)
539 ASSERT(isSSE2Present());
540 m_assembler
.xorpd_rr(scratch
, scratch
);
541 return branchDouble(DoubleEqualOrUnordered
, reg
, scratch
);
544 // Stack manipulation operations:
546 // The ABI is assumed to provide a stack abstraction to memory,
547 // containing machine word sized units of data. Push and pop
548 // operations add and remove a single register sized unit of data
549 // to or from the stack. Peek and poke operations read or write
550 // values on the stack, without moving the current stack position.
552 void pop(RegisterID dest
)
554 m_assembler
.pop_r(dest
);
557 void push(RegisterID src
)
559 m_assembler
.push_r(src
);
562 void push(Address address
)
564 m_assembler
.push_m(address
.offset
, address
.base
);
569 m_assembler
.push_i32(imm
.m_value
);
573 // Register move operations:
575 // Move values in registers.
577 void move(Imm32 imm
, RegisterID dest
)
579 // Note: on 64-bit the Imm32 value is zero extended into the register, it
580 // may be useful to have a separate version that sign extends the value?
582 m_assembler
.xorl_rr(dest
, dest
);
584 m_assembler
.movl_i32r(imm
.m_value
, dest
);
588 void move(RegisterID src
, RegisterID dest
)
590 // Note: on 64-bit this is is a full register move; perhaps it would be
591 // useful to have separate move32 & movePtr, with move32 zero extending?
593 m_assembler
.movq_rr(src
, dest
);
596 void move(ImmPtr imm
, RegisterID dest
)
598 m_assembler
.movq_i64r(imm
.asIntptr(), dest
);
601 void swap(RegisterID reg1
, RegisterID reg2
)
604 m_assembler
.xchgq_rr(reg1
, reg2
);
607 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
609 m_assembler
.movsxd_rr(src
, dest
);
612 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
614 m_assembler
.movl_rr(src
, dest
);
617 void move(RegisterID src
, RegisterID dest
)
620 m_assembler
.movl_rr(src
, dest
);
623 void move(ImmPtr imm
, RegisterID dest
)
625 m_assembler
.movl_i32r(imm
.asIntptr(), dest
);
628 void swap(RegisterID reg1
, RegisterID reg2
)
631 m_assembler
.xchgl_rr(reg1
, reg2
);
634 void signExtend32ToPtr(RegisterID src
, RegisterID dest
)
639 void zeroExtend32ToPtr(RegisterID src
, RegisterID dest
)
646 // Forwards / external control flow operations:
648 // This set of jump and conditional branch operations return a Jump
649 // object which may linked at a later point, allow forwards jump,
650 // or jumps that will require external linkage (after the code has been
653 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
654 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
655 // used (representing the names 'below' and 'above').
657 // Operands to the comparision are provided in the expected order, e.g.
658 // jle32(reg1, Imm32(5)) will branch if the value held in reg1, when
659 // treated as a signed 32bit value, is less than or equal to 5.
661 // jz and jnz test whether the first operand is equal to zero, and take
662 // an optional second operand of a mask under which to perform the test.
665 Jump
branch8(Condition cond
, Address left
, Imm32 right
)
667 m_assembler
.cmpb_im(right
.m_value
, left
.offset
, left
.base
);
668 return Jump(m_assembler
.jCC(x86Condition(cond
)));
671 Jump
branch32(Condition cond
, RegisterID left
, RegisterID right
)
673 m_assembler
.cmpl_rr(right
, left
);
674 return Jump(m_assembler
.jCC(x86Condition(cond
)));
677 Jump
branch32(Condition cond
, RegisterID left
, Imm32 right
)
679 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
680 m_assembler
.testl_rr(left
, left
);
682 m_assembler
.cmpl_ir(right
.m_value
, left
);
683 return Jump(m_assembler
.jCC(x86Condition(cond
)));
686 Jump
branch32(Condition cond
, RegisterID left
, Address right
)
688 m_assembler
.cmpl_mr(right
.offset
, right
.base
, left
);
689 return Jump(m_assembler
.jCC(x86Condition(cond
)));
692 Jump
branch32(Condition cond
, Address left
, RegisterID right
)
694 m_assembler
.cmpl_rm(right
, left
.offset
, left
.base
);
695 return Jump(m_assembler
.jCC(x86Condition(cond
)));
698 Jump
branch32(Condition cond
, Address left
, Imm32 right
)
700 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
);
701 return Jump(m_assembler
.jCC(x86Condition(cond
)));
704 Jump
branch32(Condition cond
, BaseIndex left
, Imm32 right
)
706 m_assembler
.cmpl_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
707 return Jump(m_assembler
.jCC(x86Condition(cond
)));
710 Jump
branch32WithUnalignedHalfWords(Condition cond
, BaseIndex left
, Imm32 right
)
712 return branch32(cond
, left
, right
);
715 Jump
branch16(Condition cond
, BaseIndex left
, RegisterID right
)
717 m_assembler
.cmpw_rm(right
, left
.offset
, left
.base
, left
.index
, left
.scale
);
718 return Jump(m_assembler
.jCC(x86Condition(cond
)));
721 Jump
branch16(Condition cond
, BaseIndex left
, Imm32 right
)
723 ASSERT(!(right
.m_value
& 0xFFFF0000));
725 m_assembler
.cmpw_im(right
.m_value
, left
.offset
, left
.base
, left
.index
, left
.scale
);
726 return Jump(m_assembler
.jCC(x86Condition(cond
)));
729 Jump
branchTest32(Condition cond
, RegisterID reg
, RegisterID mask
)
731 ASSERT((cond
== Zero
) || (cond
== NonZero
));
732 m_assembler
.testl_rr(reg
, mask
);
733 return Jump(m_assembler
.jCC(x86Condition(cond
)));
736 Jump
branchTest32(Condition cond
, RegisterID reg
, Imm32 mask
= Imm32(-1))
738 ASSERT((cond
== Zero
) || (cond
== NonZero
));
739 // if we are only interested in the low seven bits, this can be tested with a testb
740 if (mask
.m_value
== -1)
741 m_assembler
.testl_rr(reg
, reg
);
742 else if ((mask
.m_value
& ~0x7f) == 0)
743 m_assembler
.testb_i8r(mask
.m_value
, reg
);
745 m_assembler
.testl_i32r(mask
.m_value
, reg
);
746 return Jump(m_assembler
.jCC(x86Condition(cond
)));
749 Jump
branchTest32(Condition cond
, Address address
, Imm32 mask
= Imm32(-1))
751 ASSERT((cond
== Zero
) || (cond
== NonZero
));
752 if (mask
.m_value
== -1)
753 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
755 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
756 return Jump(m_assembler
.jCC(x86Condition(cond
)));
759 Jump
branchTest32(Condition cond
, BaseIndex address
, Imm32 mask
= Imm32(-1))
761 ASSERT((cond
== Zero
) || (cond
== NonZero
));
762 if (mask
.m_value
== -1)
763 m_assembler
.cmpl_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
765 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
766 return Jump(m_assembler
.jCC(x86Condition(cond
)));
769 Jump
branchTest8(Condition cond
, Address address
, Imm32 mask
= Imm32(-1))
771 ASSERT((cond
== Zero
) || (cond
== NonZero
));
772 if (mask
.m_value
== -1)
773 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
775 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
776 return Jump(m_assembler
.jCC(x86Condition(cond
)));
779 Jump
branchTest8(Condition cond
, BaseIndex address
, Imm32 mask
= Imm32(-1))
781 ASSERT((cond
== Zero
) || (cond
== NonZero
));
782 if (mask
.m_value
== -1)
783 m_assembler
.cmpb_im(0, address
.offset
, address
.base
, address
.index
, address
.scale
);
785 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
, address
.index
, address
.scale
);
786 return Jump(m_assembler
.jCC(x86Condition(cond
)));
791 return Jump(m_assembler
.jmp());
794 void jump(RegisterID target
)
796 m_assembler
.jmp_r(target
);
799 // Address is a memory location containing the address to jump to
800 void jump(Address address
)
802 m_assembler
.jmp_m(address
.offset
, address
.base
);
806 // Arithmetic control flow operations:
808 // This set of conditional branch operations branch based
809 // on the result of an arithmetic operation. The operation
810 // is performed as normal, storing the result.
812 // * jz operations branch if the result is zero.
813 // * jo operations branch if the (signed) arithmetic
814 // operation caused an overflow to occur.
816 Jump
branchAdd32(Condition cond
, RegisterID src
, RegisterID dest
)
818 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
820 return Jump(m_assembler
.jCC(x86Condition(cond
)));
823 Jump
branchAdd32(Condition cond
, Imm32 imm
, RegisterID dest
)
825 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
827 return Jump(m_assembler
.jCC(x86Condition(cond
)));
830 Jump
branchAdd32(Condition cond
, Imm32 src
, Address dest
)
832 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
834 return Jump(m_assembler
.jCC(x86Condition(cond
)));
837 Jump
branchAdd32(Condition cond
, RegisterID src
, Address dest
)
839 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
841 return Jump(m_assembler
.jCC(x86Condition(cond
)));
844 Jump
branchAdd32(Condition cond
, Address src
, RegisterID dest
)
846 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
848 return Jump(m_assembler
.jCC(x86Condition(cond
)));
851 Jump
branchMul32(Condition cond
, RegisterID src
, RegisterID dest
)
853 ASSERT(cond
== Overflow
);
855 return Jump(m_assembler
.jCC(x86Condition(cond
)));
858 Jump
branchMul32(Condition cond
, Address src
, RegisterID dest
)
860 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
862 return Jump(m_assembler
.jCC(x86Condition(cond
)));
865 Jump
branchMul32(Condition cond
, Imm32 imm
, RegisterID src
, RegisterID dest
)
867 ASSERT(cond
== Overflow
);
868 mul32(imm
, src
, dest
);
869 return Jump(m_assembler
.jCC(x86Condition(cond
)));
872 Jump
branchSub32(Condition cond
, RegisterID src
, RegisterID dest
)
874 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
876 return Jump(m_assembler
.jCC(x86Condition(cond
)));
879 Jump
branchSub32(Condition cond
, Imm32 imm
, RegisterID dest
)
881 ASSERT((cond
== Overflow
) || (cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
883 return Jump(m_assembler
.jCC(x86Condition(cond
)));
886 Jump
branchSub32(Condition cond
, Imm32 imm
, Address dest
)
888 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
890 return Jump(m_assembler
.jCC(x86Condition(cond
)));
893 Jump
branchSub32(Condition cond
, RegisterID src
, Address dest
)
895 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
897 return Jump(m_assembler
.jCC(x86Condition(cond
)));
900 Jump
branchSub32(Condition cond
, Address src
, RegisterID dest
)
902 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
904 return Jump(m_assembler
.jCC(x86Condition(cond
)));
907 Jump
branchNeg32(Condition cond
, RegisterID srcDest
)
909 ASSERT((cond
== Overflow
) || (cond
== Zero
) || (cond
== NonZero
));
911 return Jump(m_assembler
.jCC(x86Condition(cond
)));
914 Jump
branchOr32(Condition cond
, RegisterID src
, RegisterID dest
)
916 ASSERT((cond
== Signed
) || (cond
== Zero
) || (cond
== NonZero
));
918 return Jump(m_assembler
.jCC(x86Condition(cond
)));
922 // Miscellaneous operations:
931 return Call(m_assembler
.call(), Call::LinkableNear
);
934 Call
call(RegisterID target
)
936 return Call(m_assembler
.call(target
), Call::None
);
939 void call(Address address
)
941 m_assembler
.call_m(address
.offset
, address
.base
);
949 void set8(Condition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
951 m_assembler
.cmpl_rr(right
, left
);
952 m_assembler
.setCC_r(x86Condition(cond
), dest
);
955 void set8(Condition cond
, Address left
, RegisterID right
, RegisterID dest
)
957 m_assembler
.cmpl_mr(left
.offset
, left
.base
, right
);
958 m_assembler
.setCC_r(x86Condition(cond
), dest
);
961 void set8(Condition cond
, RegisterID left
, Imm32 right
, RegisterID dest
)
963 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
964 m_assembler
.testl_rr(left
, left
);
966 m_assembler
.cmpl_ir(right
.m_value
, left
);
967 m_assembler
.setCC_r(x86Condition(cond
), dest
);
970 void set32(Condition cond
, RegisterID left
, RegisterID right
, RegisterID dest
)
972 m_assembler
.cmpl_rr(right
, left
);
973 m_assembler
.setCC_r(x86Condition(cond
), dest
);
974 m_assembler
.movzbl_rr(dest
, dest
);
977 void set32(Condition cond
, RegisterID left
, Imm32 right
, RegisterID dest
)
979 if (((cond
== Equal
) || (cond
== NotEqual
)) && !right
.m_value
)
980 m_assembler
.testl_rr(left
, left
);
982 m_assembler
.cmpl_ir(right
.m_value
, left
);
983 m_assembler
.setCC_r(x86Condition(cond
), dest
);
984 m_assembler
.movzbl_rr(dest
, dest
);
988 // The mask should be optional... paerhaps the argument order should be
989 // dest-src, operations always have a dest? ... possibly not true, considering
990 // asm ops like test, or pseudo ops like pop().
992 void setTest8(Condition cond
, Address address
, Imm32 mask
, RegisterID dest
)
994 if (mask
.m_value
== -1)
995 m_assembler
.cmpb_im(0, address
.offset
, address
.base
);
997 m_assembler
.testb_im(mask
.m_value
, address
.offset
, address
.base
);
998 m_assembler
.setCC_r(x86Condition(cond
), dest
);
999 m_assembler
.movzbl_rr(dest
, dest
);
1002 void setTest32(Condition cond
, Address address
, Imm32 mask
, RegisterID dest
)
1004 if (mask
.m_value
== -1)
1005 m_assembler
.cmpl_im(0, address
.offset
, address
.base
);
1007 m_assembler
.testl_i32m(mask
.m_value
, address
.offset
, address
.base
);
1008 m_assembler
.setCC_r(x86Condition(cond
), dest
);
1009 m_assembler
.movzbl_rr(dest
, dest
);
1013 X86Assembler::Condition
x86Condition(Condition cond
)
1015 return static_cast<X86Assembler::Condition
>(cond
);
1019 // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on
1020 // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
1021 friend class MacroAssemblerX86
;
1026 // All X86 Macs are guaranteed to support at least SSE2,
1027 static bool isSSE2Present()
1032 #else // OS(MAC_OS_X)
1034 enum SSE2CheckState
{
1040 static bool isSSE2Present()
1042 if (s_sse2CheckState
== NotCheckedSSE2
) {
1043 // Default the flags value to zero; if the compiler is
1044 // not MSVC or GCC we will read this as SSE2 not present.
1048 mov eax
, 1 // cpuid function 1 gives us the standard feature set
1061 : "%eax", "%ecx", "%edx"
1064 static const int SSE2FeatureBit
= 1 << 26;
1065 s_sse2CheckState
= (flags
& SSE2FeatureBit
) ? HasSSE2
: NoSSE2
;
1068 ASSERT(s_sse2CheckState
!= NotCheckedSSE2
);
1070 return s_sse2CheckState
== HasSSE2
;
1073 static SSE2CheckState s_sse2CheckState
;
1075 #endif // OS(MAC_OS_X)
1076 #elif !defined(NDEBUG) // CPU(X86)
1078 // On x86-64 we should never be checking for SSE2 in a non-debug build,
1079 // but non debug add this method to keep the asserts above happy.
1080 static bool isSSE2Present()
1090 #endif // ENABLE(ASSEMBLER)
1092 #endif // MacroAssemblerX86Common_h