2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
29 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
31 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
38 inline bool CAN_SIGN_EXTEND_8_32(int32_t value
) { return value
== (int32_t)(signed char)value
; }
40 namespace X86Registers
{
77 typedef X86Registers::RegisterID RegisterID
;
78 typedef X86Registers::XMMRegisterID XMMRegisterID
;
79 typedef XMMRegisterID FPRegisterID
;
99 ConditionC
= ConditionB
,
100 ConditionNC
= ConditionAE
,
109 OP_2BYTE_ESCAPE
= 0x0F,
114 PRE_PREDICT_BRANCH_NOT_TAKEN
= 0x2E,
125 OP_MOVSXD_GvEv
= 0x63,
127 PRE_OPERAND_SIZE
= 0x66,
130 OP_IMUL_GvEvIz
= 0x69,
131 OP_GROUP1_EbIb
= 0x80,
132 OP_GROUP1_EvIz
= 0x81,
133 OP_GROUP1_EvIb
= 0x83,
139 OP_GROUP1A_Ev
= 0x8F,
144 OP_GROUP2_EvIb
= 0xC1,
146 OP_GROUP11_EvIz
= 0xC7,
148 OP_GROUP2_Ev1
= 0xD1,
149 OP_GROUP2_EvCL
= 0xD3,
150 OP_CALL_rel32
= 0xE8,
154 OP_GROUP3_EbIb
= 0xF6,
156 OP_GROUP3_EvIz
= 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
161 OP2_MOVSD_VsdWsd
= 0x10,
162 OP2_MOVSD_WsdVsd
= 0x11,
163 OP2_CVTSI2SD_VsdEd
= 0x2A,
164 OP2_CVTTSD2SI_GdWsd
= 0x2C,
165 OP2_UCOMISD_VsdWsd
= 0x2E,
166 OP2_ADDSD_VsdWsd
= 0x58,
167 OP2_MULSD_VsdWsd
= 0x59,
168 OP2_SUBSD_VsdWsd
= 0x5C,
169 OP2_DIVSD_VsdWsd
= 0x5E,
170 OP2_SQRTSD_VsdWsd
= 0x51,
171 OP2_XORPD_VpdWpd
= 0x57,
172 OP2_MOVD_VdEd
= 0x6E,
173 OP2_MOVD_EdVd
= 0x7E,
174 OP2_JCC_rel32
= 0x80,
176 OP2_IMUL_GvEv
= 0xAF,
177 OP2_MOVZX_GvEb
= 0xB6,
178 OP2_MOVZX_GvEw
= 0xB7,
179 OP2_PEXTRW_GdUdIb
= 0xC5,
182 TwoByteOpcodeID
jccRel32(Condition cond
)
184 return (TwoByteOpcodeID
)(OP2_JCC_rel32
+ cond
);
187 TwoByteOpcodeID
setccOpcode(Condition cond
)
189 return (TwoByteOpcodeID
)(OP_SETCC
+ cond
);
219 class X86InstructionFormatter
;
223 friend class X86Assembler
;
224 friend class X86InstructionFormatter
;
241 friend class X86Assembler
;
242 friend class X86InstructionFormatter
;
250 bool isUsed() const { return m_used
; }
251 void used() { m_used
= true; }
257 ASSERT(m_offset
== offset
);
268 size_t size() const { return m_formatter
.size(); }
272 void push_r(RegisterID reg
)
274 m_formatter
.oneByteOp(OP_PUSH_EAX
, reg
);
277 void pop_r(RegisterID reg
)
279 m_formatter
.oneByteOp(OP_POP_EAX
, reg
);
282 void push_i32(int imm
)
284 m_formatter
.oneByteOp(OP_PUSH_Iz
);
285 m_formatter
.immediate32(imm
);
288 void push_m(int offset
, RegisterID base
)
290 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_PUSH
, base
, offset
);
293 void pop_m(int offset
, RegisterID base
)
295 m_formatter
.oneByteOp(OP_GROUP1A_Ev
, GROUP1A_OP_POP
, base
, offset
);
298 // Arithmetic operations:
301 void adcl_im(int imm
, void* addr
)
303 if (CAN_SIGN_EXTEND_8_32(imm
)) {
304 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADC
, addr
);
305 m_formatter
.immediate8(imm
);
307 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADC
, addr
);
308 m_formatter
.immediate32(imm
);
313 void addl_rr(RegisterID src
, RegisterID dst
)
315 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, dst
);
318 void addl_mr(int offset
, RegisterID base
, RegisterID dst
)
320 m_formatter
.oneByteOp(OP_ADD_GvEv
, dst
, base
, offset
);
323 void addl_rm(RegisterID src
, int offset
, RegisterID base
)
325 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, base
, offset
);
328 void addl_ir(int imm
, RegisterID dst
)
330 if (CAN_SIGN_EXTEND_8_32(imm
)) {
331 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
332 m_formatter
.immediate8(imm
);
334 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
335 m_formatter
.immediate32(imm
);
339 void addl_im(int imm
, int offset
, RegisterID base
)
341 if (CAN_SIGN_EXTEND_8_32(imm
)) {
342 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
343 m_formatter
.immediate8(imm
);
345 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
346 m_formatter
.immediate32(imm
);
351 void addq_rr(RegisterID src
, RegisterID dst
)
353 m_formatter
.oneByteOp64(OP_ADD_EvGv
, src
, dst
);
356 void addq_ir(int imm
, RegisterID dst
)
358 if (CAN_SIGN_EXTEND_8_32(imm
)) {
359 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
360 m_formatter
.immediate8(imm
);
362 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
363 m_formatter
.immediate32(imm
);
367 void addq_im(int imm
, int offset
, RegisterID base
)
369 if (CAN_SIGN_EXTEND_8_32(imm
)) {
370 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
371 m_formatter
.immediate8(imm
);
373 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
374 m_formatter
.immediate32(imm
);
378 void addl_im(int imm
, void* addr
)
380 if (CAN_SIGN_EXTEND_8_32(imm
)) {
381 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, addr
);
382 m_formatter
.immediate8(imm
);
384 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, addr
);
385 m_formatter
.immediate32(imm
);
390 void andl_rr(RegisterID src
, RegisterID dst
)
392 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, dst
);
395 void andl_mr(int offset
, RegisterID base
, RegisterID dst
)
397 m_formatter
.oneByteOp(OP_AND_GvEv
, dst
, base
, offset
);
400 void andl_rm(RegisterID src
, int offset
, RegisterID base
)
402 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, base
, offset
);
405 void andl_ir(int imm
, RegisterID dst
)
407 if (CAN_SIGN_EXTEND_8_32(imm
)) {
408 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
409 m_formatter
.immediate8(imm
);
411 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
412 m_formatter
.immediate32(imm
);
416 void andl_im(int imm
, int offset
, RegisterID base
)
418 if (CAN_SIGN_EXTEND_8_32(imm
)) {
419 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, base
, offset
);
420 m_formatter
.immediate8(imm
);
422 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, base
, offset
);
423 m_formatter
.immediate32(imm
);
428 void andq_rr(RegisterID src
, RegisterID dst
)
430 m_formatter
.oneByteOp64(OP_AND_EvGv
, src
, dst
);
433 void andq_ir(int imm
, RegisterID dst
)
435 if (CAN_SIGN_EXTEND_8_32(imm
)) {
436 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
437 m_formatter
.immediate8(imm
);
439 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
440 m_formatter
.immediate32(imm
);
444 void andl_im(int imm
, void* addr
)
446 if (CAN_SIGN_EXTEND_8_32(imm
)) {
447 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, addr
);
448 m_formatter
.immediate8(imm
);
450 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, addr
);
451 m_formatter
.immediate32(imm
);
456 void negl_r(RegisterID dst
)
458 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, dst
);
461 void negl_m(int offset
, RegisterID base
)
463 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, base
, offset
);
466 void notl_r(RegisterID dst
)
468 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, dst
);
471 void notl_m(int offset
, RegisterID base
)
473 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, base
, offset
);
476 void orl_rr(RegisterID src
, RegisterID dst
)
478 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, dst
);
481 void orl_mr(int offset
, RegisterID base
, RegisterID dst
)
483 m_formatter
.oneByteOp(OP_OR_GvEv
, dst
, base
, offset
);
486 void orl_rm(RegisterID src
, int offset
, RegisterID base
)
488 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, base
, offset
);
491 void orl_ir(int imm
, RegisterID dst
)
493 if (CAN_SIGN_EXTEND_8_32(imm
)) {
494 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
495 m_formatter
.immediate8(imm
);
497 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
498 m_formatter
.immediate32(imm
);
502 void orl_im(int imm
, int offset
, RegisterID base
)
504 if (CAN_SIGN_EXTEND_8_32(imm
)) {
505 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, base
, offset
);
506 m_formatter
.immediate8(imm
);
508 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, base
, offset
);
509 m_formatter
.immediate32(imm
);
514 void orq_rr(RegisterID src
, RegisterID dst
)
516 m_formatter
.oneByteOp64(OP_OR_EvGv
, src
, dst
);
519 void orq_ir(int imm
, RegisterID dst
)
521 if (CAN_SIGN_EXTEND_8_32(imm
)) {
522 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
523 m_formatter
.immediate8(imm
);
525 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
526 m_formatter
.immediate32(imm
);
530 void orl_im(int imm
, void* addr
)
532 if (CAN_SIGN_EXTEND_8_32(imm
)) {
533 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, addr
);
534 m_formatter
.immediate8(imm
);
536 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, addr
);
537 m_formatter
.immediate32(imm
);
542 void subl_rr(RegisterID src
, RegisterID dst
)
544 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, dst
);
547 void subl_mr(int offset
, RegisterID base
, RegisterID dst
)
549 m_formatter
.oneByteOp(OP_SUB_GvEv
, dst
, base
, offset
);
552 void subl_rm(RegisterID src
, int offset
, RegisterID base
)
554 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, base
, offset
);
557 void subl_ir(int imm
, RegisterID dst
)
559 if (CAN_SIGN_EXTEND_8_32(imm
)) {
560 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
561 m_formatter
.immediate8(imm
);
563 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
564 m_formatter
.immediate32(imm
);
568 void subl_im(int imm
, int offset
, RegisterID base
)
570 if (CAN_SIGN_EXTEND_8_32(imm
)) {
571 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, base
, offset
);
572 m_formatter
.immediate8(imm
);
574 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, base
, offset
);
575 m_formatter
.immediate32(imm
);
580 void subq_rr(RegisterID src
, RegisterID dst
)
582 m_formatter
.oneByteOp64(OP_SUB_EvGv
, src
, dst
);
585 void subq_ir(int imm
, RegisterID dst
)
587 if (CAN_SIGN_EXTEND_8_32(imm
)) {
588 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
589 m_formatter
.immediate8(imm
);
591 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
592 m_formatter
.immediate32(imm
);
596 void subl_im(int imm
, void* addr
)
598 if (CAN_SIGN_EXTEND_8_32(imm
)) {
599 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, addr
);
600 m_formatter
.immediate8(imm
);
602 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, addr
);
603 m_formatter
.immediate32(imm
);
608 void xorl_rr(RegisterID src
, RegisterID dst
)
610 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, dst
);
613 void xorl_mr(int offset
, RegisterID base
, RegisterID dst
)
615 m_formatter
.oneByteOp(OP_XOR_GvEv
, dst
, base
, offset
);
618 void xorl_rm(RegisterID src
, int offset
, RegisterID base
)
620 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, base
, offset
);
623 void xorl_im(int imm
, int offset
, RegisterID base
)
625 if (CAN_SIGN_EXTEND_8_32(imm
)) {
626 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, base
, offset
);
627 m_formatter
.immediate8(imm
);
629 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, base
, offset
);
630 m_formatter
.immediate32(imm
);
634 void xorl_ir(int imm
, RegisterID dst
)
636 if (CAN_SIGN_EXTEND_8_32(imm
)) {
637 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
638 m_formatter
.immediate8(imm
);
640 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
641 m_formatter
.immediate32(imm
);
646 void xorq_rr(RegisterID src
, RegisterID dst
)
648 m_formatter
.oneByteOp64(OP_XOR_EvGv
, src
, dst
);
651 void xorq_ir(int imm
, RegisterID dst
)
653 if (CAN_SIGN_EXTEND_8_32(imm
)) {
654 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
655 m_formatter
.immediate8(imm
);
657 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
658 m_formatter
.immediate32(imm
);
663 void sarl_i8r(int imm
, RegisterID dst
)
666 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
668 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
669 m_formatter
.immediate8(imm
);
673 void sarl_CLr(RegisterID dst
)
675 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
678 void shrl_i8r(int imm
, RegisterID dst
)
681 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHR
, dst
);
683 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHR
, dst
);
684 m_formatter
.immediate8(imm
);
688 void shrl_CLr(RegisterID dst
)
690 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHR
, dst
);
693 void shll_i8r(int imm
, RegisterID dst
)
696 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHL
, dst
);
698 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHL
, dst
);
699 m_formatter
.immediate8(imm
);
703 void shll_CLr(RegisterID dst
)
705 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHL
, dst
);
709 void sarq_CLr(RegisterID dst
)
711 m_formatter
.oneByteOp64(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
714 void sarq_i8r(int imm
, RegisterID dst
)
717 m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
719 m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
720 m_formatter
.immediate8(imm
);
725 void imull_rr(RegisterID src
, RegisterID dst
)
727 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, src
);
730 void imull_mr(int offset
, RegisterID base
, RegisterID dst
)
732 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, base
, offset
);
735 void imull_i32r(RegisterID src
, int32_t value
, RegisterID dst
)
737 m_formatter
.oneByteOp(OP_IMUL_GvEvIz
, dst
, src
);
738 m_formatter
.immediate32(value
);
741 void idivl_r(RegisterID dst
)
743 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_IDIV
, dst
);
748 void cmpl_rr(RegisterID src
, RegisterID dst
)
750 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, dst
);
753 void cmpl_rm(RegisterID src
, int offset
, RegisterID base
)
755 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, offset
);
758 void cmpl_mr(int offset
, RegisterID base
, RegisterID src
)
760 m_formatter
.oneByteOp(OP_CMP_GvEv
, src
, base
, offset
);
763 void cmpl_ir(int imm
, RegisterID dst
)
765 if (CAN_SIGN_EXTEND_8_32(imm
)) {
766 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
767 m_formatter
.immediate8(imm
);
769 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
770 m_formatter
.immediate32(imm
);
774 void cmpl_ir_force32(int imm
, RegisterID dst
)
776 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
777 m_formatter
.immediate32(imm
);
780 void cmpl_im(int imm
, int offset
, RegisterID base
)
782 if (CAN_SIGN_EXTEND_8_32(imm
)) {
783 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
784 m_formatter
.immediate8(imm
);
786 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
787 m_formatter
.immediate32(imm
);
791 void cmpb_im(int imm
, int offset
, RegisterID base
)
793 m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, base
, offset
);
794 m_formatter
.immediate8(imm
);
797 void cmpb_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
799 m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
800 m_formatter
.immediate8(imm
);
803 void cmpl_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
805 if (CAN_SIGN_EXTEND_8_32(imm
)) {
806 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
807 m_formatter
.immediate8(imm
);
809 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
810 m_formatter
.immediate32(imm
);
814 void cmpl_im_force32(int imm
, int offset
, RegisterID base
)
816 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
817 m_formatter
.immediate32(imm
);
821 void cmpq_rr(RegisterID src
, RegisterID dst
)
823 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, dst
);
826 void cmpq_rm(RegisterID src
, int offset
, RegisterID base
)
828 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, base
, offset
);
831 void cmpq_mr(int offset
, RegisterID base
, RegisterID src
)
833 m_formatter
.oneByteOp64(OP_CMP_GvEv
, src
, base
, offset
);
836 void cmpq_ir(int imm
, RegisterID dst
)
838 if (CAN_SIGN_EXTEND_8_32(imm
)) {
839 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
840 m_formatter
.immediate8(imm
);
842 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
843 m_formatter
.immediate32(imm
);
847 void cmpq_im(int imm
, int offset
, RegisterID base
)
849 if (CAN_SIGN_EXTEND_8_32(imm
)) {
850 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
851 m_formatter
.immediate8(imm
);
853 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
854 m_formatter
.immediate32(imm
);
858 void cmpq_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
860 if (CAN_SIGN_EXTEND_8_32(imm
)) {
861 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
862 m_formatter
.immediate8(imm
);
864 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
865 m_formatter
.immediate32(imm
);
869 void cmpl_rm(RegisterID reg
, void* addr
)
871 m_formatter
.oneByteOp(OP_CMP_EvGv
, reg
, addr
);
874 void cmpl_im(int imm
, void* addr
)
876 if (CAN_SIGN_EXTEND_8_32(imm
)) {
877 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, addr
);
878 m_formatter
.immediate8(imm
);
880 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, addr
);
881 m_formatter
.immediate32(imm
);
886 void cmpw_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
888 m_formatter
.prefix(PRE_OPERAND_SIZE
);
889 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, index
, scale
, offset
);
892 void cmpw_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
894 if (CAN_SIGN_EXTEND_8_32(imm
)) {
895 m_formatter
.prefix(PRE_OPERAND_SIZE
);
896 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
897 m_formatter
.immediate8(imm
);
899 m_formatter
.prefix(PRE_OPERAND_SIZE
);
900 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
901 m_formatter
.immediate16(imm
);
905 void testl_rr(RegisterID src
, RegisterID dst
)
907 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
910 void testl_i32r(int imm
, RegisterID dst
)
912 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
913 m_formatter
.immediate32(imm
);
916 void testl_i32m(int imm
, int offset
, RegisterID base
)
918 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
919 m_formatter
.immediate32(imm
);
922 void testb_im(int imm
, int offset
, RegisterID base
)
924 m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, base
, offset
);
925 m_formatter
.immediate8(imm
);
928 void testb_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
930 m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
931 m_formatter
.immediate8(imm
);
934 void testl_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
936 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
937 m_formatter
.immediate32(imm
);
941 void testq_rr(RegisterID src
, RegisterID dst
)
943 m_formatter
.oneByteOp64(OP_TEST_EvGv
, src
, dst
);
946 void testq_i32r(int imm
, RegisterID dst
)
948 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
949 m_formatter
.immediate32(imm
);
952 void testq_i32m(int imm
, int offset
, RegisterID base
)
954 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
955 m_formatter
.immediate32(imm
);
958 void testq_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
960 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
961 m_formatter
.immediate32(imm
);
965 void testw_rr(RegisterID src
, RegisterID dst
)
967 m_formatter
.prefix(PRE_OPERAND_SIZE
);
968 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
971 void testb_i8r(int imm
, RegisterID dst
)
973 m_formatter
.oneByteOp8(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, dst
);
974 m_formatter
.immediate8(imm
);
977 void setCC_r(Condition cond
, RegisterID dst
)
979 m_formatter
.twoByteOp8(setccOpcode(cond
), (GroupOpcodeID
)0, dst
);
982 void sete_r(RegisterID dst
)
984 m_formatter
.twoByteOp8(setccOpcode(ConditionE
), (GroupOpcodeID
)0, dst
);
987 void setz_r(RegisterID dst
)
992 void setne_r(RegisterID dst
)
994 m_formatter
.twoByteOp8(setccOpcode(ConditionNE
), (GroupOpcodeID
)0, dst
);
997 void setnz_r(RegisterID dst
)
1002 // Various move ops:
1006 m_formatter
.oneByteOp(OP_CDQ
);
1009 void xchgl_rr(RegisterID src
, RegisterID dst
)
1011 m_formatter
.oneByteOp(OP_XCHG_EvGv
, src
, dst
);
1015 void xchgq_rr(RegisterID src
, RegisterID dst
)
1017 m_formatter
.oneByteOp64(OP_XCHG_EvGv
, src
, dst
);
1021 void movl_rr(RegisterID src
, RegisterID dst
)
1023 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, dst
);
1026 void movl_rm(RegisterID src
, int offset
, RegisterID base
)
1028 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, offset
);
1031 void movl_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
1033 m_formatter
.oneByteOp_disp32(OP_MOV_EvGv
, src
, base
, offset
);
1036 void movl_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1038 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1041 void movl_mEAX(void* addr
)
1043 m_formatter
.oneByteOp(OP_MOV_EAXOv
);
1045 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1047 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1051 void movl_mr(int offset
, RegisterID base
, RegisterID dst
)
1053 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, offset
);
1056 void movl_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1058 m_formatter
.oneByteOp_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1061 void movl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1063 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1066 void movl_i32r(int imm
, RegisterID dst
)
1068 m_formatter
.oneByteOp(OP_MOV_EAXIv
, dst
);
1069 m_formatter
.immediate32(imm
);
1072 void movl_i32m(int imm
, int offset
, RegisterID base
)
1074 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1075 m_formatter
.immediate32(imm
);
1078 void movl_EAXm(void* addr
)
1080 m_formatter
.oneByteOp(OP_MOV_OvEAX
);
1082 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1084 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1089 void movq_rr(RegisterID src
, RegisterID dst
)
1091 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, dst
);
1094 void movq_rm(RegisterID src
, int offset
, RegisterID base
)
1096 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, offset
);
1099 void movq_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
1101 m_formatter
.oneByteOp64_disp32(OP_MOV_EvGv
, src
, base
, offset
);
1104 void movq_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1106 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1109 void movq_mEAX(void* addr
)
1111 m_formatter
.oneByteOp64(OP_MOV_EAXOv
);
1112 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1115 void movq_EAXm(void* addr
)
1117 m_formatter
.oneByteOp64(OP_MOV_OvEAX
);
1118 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1121 void movq_mr(int offset
, RegisterID base
, RegisterID dst
)
1123 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, offset
);
1126 void movq_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1128 m_formatter
.oneByteOp64_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1131 void movq_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1133 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1136 void movq_i32m(int imm
, int offset
, RegisterID base
)
1138 m_formatter
.oneByteOp64(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1139 m_formatter
.immediate32(imm
);
1142 void movq_i64r(int64_t imm
, RegisterID dst
)
1144 m_formatter
.oneByteOp64(OP_MOV_EAXIv
, dst
);
1145 m_formatter
.immediate64(imm
);
1148 void movsxd_rr(RegisterID src
, RegisterID dst
)
1150 m_formatter
.oneByteOp64(OP_MOVSXD_GvEv
, dst
, src
);
1155 void movl_rm(RegisterID src
, void* addr
)
1157 if (src
== X86Registers::eax
)
1160 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, addr
);
1163 void movl_mr(void* addr
, RegisterID dst
)
1165 if (dst
== X86Registers::eax
)
1168 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, addr
);
1171 void movl_i32m(int imm
, void* addr
)
1173 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, addr
);
1174 m_formatter
.immediate32(imm
);
1178 void movzwl_mr(int offset
, RegisterID base
, RegisterID dst
)
1180 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, offset
);
1183 void movzwl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1185 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, index
, scale
, offset
);
1188 void movzbl_rr(RegisterID src
, RegisterID dst
)
1190 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1191 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1192 // REX prefixes are defined to be silently ignored by the processor.
1193 m_formatter
.twoByteOp8(OP2_MOVZX_GvEb
, dst
, src
);
1196 void leal_mr(int offset
, RegisterID base
, RegisterID dst
)
1198 m_formatter
.oneByteOp(OP_LEA
, dst
, base
, offset
);
1201 void leaq_mr(int offset
, RegisterID base
, RegisterID dst
)
1203 m_formatter
.oneByteOp64(OP_LEA
, dst
, base
, offset
);
1211 m_formatter
.oneByteOp(OP_CALL_rel32
);
1212 return m_formatter
.immediateRel32();
1215 JmpSrc
call(RegisterID dst
)
1217 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, dst
);
1218 return JmpSrc(m_formatter
.size());
1221 void call_m(int offset
, RegisterID base
)
1223 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, base
, offset
);
1228 m_formatter
.oneByteOp(OP_JMP_rel32
);
1229 return m_formatter
.immediateRel32();
1232 // Return a JmpSrc so we have a label to the jump, so we can use this
1233 // To make a tail recursive call on x86-64. The MacroAssembler
1234 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1235 JmpSrc
jmp_r(RegisterID dst
)
1237 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, dst
);
1238 return JmpSrc(m_formatter
.size());
1241 void jmp_m(int offset
, RegisterID base
)
1243 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, base
, offset
);
1248 m_formatter
.twoByteOp(jccRel32(ConditionNE
));
1249 return m_formatter
.immediateRel32();
1259 m_formatter
.twoByteOp(jccRel32(ConditionE
));
1260 return m_formatter
.immediateRel32();
1270 m_formatter
.twoByteOp(jccRel32(ConditionL
));
1271 return m_formatter
.immediateRel32();
1276 m_formatter
.twoByteOp(jccRel32(ConditionB
));
1277 return m_formatter
.immediateRel32();
1282 m_formatter
.twoByteOp(jccRel32(ConditionLE
));
1283 return m_formatter
.immediateRel32();
1288 m_formatter
.twoByteOp(jccRel32(ConditionBE
));
1289 return m_formatter
.immediateRel32();
1294 m_formatter
.twoByteOp(jccRel32(ConditionGE
));
1295 return m_formatter
.immediateRel32();
1300 m_formatter
.twoByteOp(jccRel32(ConditionG
));
1301 return m_formatter
.immediateRel32();
1306 m_formatter
.twoByteOp(jccRel32(ConditionA
));
1307 return m_formatter
.immediateRel32();
1312 m_formatter
.twoByteOp(jccRel32(ConditionAE
));
1313 return m_formatter
.immediateRel32();
1318 m_formatter
.twoByteOp(jccRel32(ConditionO
));
1319 return m_formatter
.immediateRel32();
1324 m_formatter
.twoByteOp(jccRel32(ConditionP
));
1325 return m_formatter
.immediateRel32();
1330 m_formatter
.twoByteOp(jccRel32(ConditionS
));
1331 return m_formatter
.immediateRel32();
1334 JmpSrc
jCC(Condition cond
)
1336 m_formatter
.twoByteOp(jccRel32(cond
));
1337 return m_formatter
.immediateRel32();
1342 void addsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1344 m_formatter
.prefix(PRE_SSE_F2
);
1345 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1348 void addsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1350 m_formatter
.prefix(PRE_SSE_F2
);
1351 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1354 void cvtsi2sd_rr(RegisterID src
, XMMRegisterID dst
)
1356 m_formatter
.prefix(PRE_SSE_F2
);
1357 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, src
);
1360 void cvtsi2sd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1362 m_formatter
.prefix(PRE_SSE_F2
);
1363 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, base
, offset
);
1367 void cvtsi2sd_mr(void* address
, XMMRegisterID dst
)
1369 m_formatter
.prefix(PRE_SSE_F2
);
1370 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, address
);
1374 void cvttsd2si_rr(XMMRegisterID src
, RegisterID dst
)
1376 m_formatter
.prefix(PRE_SSE_F2
);
1377 m_formatter
.twoByteOp(OP2_CVTTSD2SI_GdWsd
, dst
, (RegisterID
)src
);
1380 void movd_rr(XMMRegisterID src
, RegisterID dst
)
1382 m_formatter
.prefix(PRE_SSE_66
);
1383 m_formatter
.twoByteOp(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1387 void movq_rr(XMMRegisterID src
, RegisterID dst
)
1389 m_formatter
.prefix(PRE_SSE_66
);
1390 m_formatter
.twoByteOp64(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1393 void movq_rr(RegisterID src
, XMMRegisterID dst
)
1395 m_formatter
.prefix(PRE_SSE_66
);
1396 m_formatter
.twoByteOp64(OP2_MOVD_VdEd
, (RegisterID
)dst
, src
);
1400 void movsd_rm(XMMRegisterID src
, int offset
, RegisterID base
)
1402 m_formatter
.prefix(PRE_SSE_F2
);
1403 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, offset
);
1406 void movsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1408 m_formatter
.prefix(PRE_SSE_F2
);
1409 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1413 void movsd_mr(const void* address
, XMMRegisterID dst
)
1415 m_formatter
.prefix(PRE_SSE_F2
);
1416 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, address
);
1420 void mulsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1422 m_formatter
.prefix(PRE_SSE_F2
);
1423 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1426 void mulsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1428 m_formatter
.prefix(PRE_SSE_F2
);
1429 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1432 void pextrw_irr(int whichWord
, XMMRegisterID src
, RegisterID dst
)
1434 m_formatter
.prefix(PRE_SSE_66
);
1435 m_formatter
.twoByteOp(OP2_PEXTRW_GdUdIb
, (RegisterID
)dst
, (RegisterID
)src
);
1436 m_formatter
.immediate8(whichWord
);
1439 void subsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1441 m_formatter
.prefix(PRE_SSE_F2
);
1442 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1445 void subsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1447 m_formatter
.prefix(PRE_SSE_F2
);
1448 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1451 void ucomisd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1453 m_formatter
.prefix(PRE_SSE_66
);
1454 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1457 void ucomisd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1459 m_formatter
.prefix(PRE_SSE_66
);
1460 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1463 void divsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1465 m_formatter
.prefix(PRE_SSE_F2
);
1466 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1469 void divsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1471 m_formatter
.prefix(PRE_SSE_F2
);
1472 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1475 void xorpd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1477 m_formatter
.prefix(PRE_SSE_66
);
1478 m_formatter
.twoByteOp(OP2_XORPD_VpdWpd
, (RegisterID
)dst
, (RegisterID
)src
);
1481 void sqrtsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1483 m_formatter
.prefix(PRE_SSE_F2
);
1484 m_formatter
.twoByteOp(OP2_SQRTSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1487 // Misc instructions:
1491 m_formatter
.oneByteOp(OP_INT3
);
1496 m_formatter
.oneByteOp(OP_RET
);
1499 void predictNotTaken()
1501 m_formatter
.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN
);
1504 // Assembler admin methods:
1508 return JmpDst(m_formatter
.size());
1511 static JmpDst
labelFor(JmpSrc jump
, intptr_t offset
= 0)
1513 return JmpDst(jump
.m_offset
+ offset
);
1516 JmpDst
align(int alignment
)
1518 while (!m_formatter
.isAligned(alignment
))
1519 m_formatter
.oneByteOp(OP_HLT
);
1524 // Linking & patching:
1526 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1527 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1528 // code has been finalized it is (platform support permitting) within a non-
1529 // writable region of memory; to modify the code in an execute-only execuable
1530 // pool the 'repatch' and 'relink' methods should be used.
1532 void linkJump(JmpSrc from
, JmpDst to
)
1534 ASSERT(from
.m_offset
!= -1);
1535 ASSERT(to
.m_offset
!= -1);
1537 char* code
= reinterpret_cast<char*>(m_formatter
.data());
1538 setRel32(code
+ from
.m_offset
, code
+ to
.m_offset
);
1541 static void linkJump(void* code
, JmpSrc from
, void* to
)
1543 ASSERT(from
.m_offset
!= -1);
1545 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
1548 static void linkCall(void* code
, JmpSrc from
, void* to
)
1550 ASSERT(from
.m_offset
!= -1);
1552 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
1555 static void linkPointer(void* code
, JmpDst where
, void* value
)
1557 ASSERT(where
.m_offset
!= -1);
1559 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
);
1562 static void relinkJump(void* from
, void* to
)
1567 static void relinkCall(void* from
, void* to
)
1572 static void repatchInt32(void* where
, int32_t value
)
1574 setInt32(where
, value
);
1577 static void repatchPointer(void* where
, void* value
)
1579 setPointer(where
, value
);
1582 static void repatchLoadPtrToLEA(void* where
)
1585 // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
1586 // Skip over the prefix byte.
1587 where
= reinterpret_cast<char*>(where
) + 1;
1589 *reinterpret_cast<unsigned char*>(where
) = static_cast<unsigned char>(OP_LEA
);
1592 static unsigned getCallReturnOffset(JmpSrc call
)
1594 ASSERT(call
.m_offset
>= 0);
1595 return call
.m_offset
;
1598 static void* getRelocatedAddress(void* code
, JmpSrc jump
)
1600 ASSERT(jump
.m_offset
!= -1);
1602 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + jump
.m_offset
);
1605 static void* getRelocatedAddress(void* code
, JmpDst destination
)
1607 ASSERT(destination
.m_offset
!= -1);
1609 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + destination
.m_offset
);
1612 static int getDifferenceBetweenLabels(JmpDst src
, JmpDst dst
)
1614 return dst
.m_offset
- src
.m_offset
;
1617 static int getDifferenceBetweenLabels(JmpDst src
, JmpSrc dst
)
1619 return dst
.m_offset
- src
.m_offset
;
1622 static int getDifferenceBetweenLabels(JmpSrc src
, JmpDst dst
)
1624 return dst
.m_offset
- src
.m_offset
;
1627 void* executableCopy(ExecutablePool
* allocator
)
1629 void* copy
= m_formatter
.executableCopy(allocator
);
1636 static void setPointer(void* where
, void* value
)
1638 reinterpret_cast<void**>(where
)[-1] = value
;
1641 static void setInt32(void* where
, int32_t value
)
1643 reinterpret_cast<int32_t*>(where
)[-1] = value
;
1646 static void setRel32(void* from
, void* to
)
1648 intptr_t offset
= reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
);
1649 ASSERT(offset
== static_cast<int32_t>(offset
));
1651 setInt32(from
, offset
);
1654 class X86InstructionFormatter
{
1656 static const int maxInstructionSize
= 16;
1660 // Legacy prefix bytes:
1662 // These are emmitted prior to the instruction.
1664 void prefix(OneByteOpcodeID pre
)
1666 m_buffer
.putByte(pre
);
1669 // Word-sized operands / no operand instruction formatters.
1671 // In addition to the opcode, the following operand permutations are supported:
1672 // * None - instruction takes no operands.
1673 // * One register - the low three bits of the RegisterID are added into the opcode.
1674 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1675 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1676 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1678 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1679 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1681 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1683 void oneByteOp(OneByteOpcodeID opcode
)
1685 m_buffer
.ensureSpace(maxInstructionSize
);
1686 m_buffer
.putByteUnchecked(opcode
);
1689 void oneByteOp(OneByteOpcodeID opcode
, RegisterID reg
)
1691 m_buffer
.ensureSpace(maxInstructionSize
);
1692 emitRexIfNeeded(0, 0, reg
);
1693 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
1696 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
1698 m_buffer
.ensureSpace(maxInstructionSize
);
1699 emitRexIfNeeded(reg
, 0, rm
);
1700 m_buffer
.putByteUnchecked(opcode
);
1701 registerModRM(reg
, rm
);
1704 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1706 m_buffer
.ensureSpace(maxInstructionSize
);
1707 emitRexIfNeeded(reg
, 0, base
);
1708 m_buffer
.putByteUnchecked(opcode
);
1709 memoryModRM(reg
, base
, offset
);
1712 void oneByteOp_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1714 m_buffer
.ensureSpace(maxInstructionSize
);
1715 emitRexIfNeeded(reg
, 0, base
);
1716 m_buffer
.putByteUnchecked(opcode
);
1717 memoryModRM_disp32(reg
, base
, offset
);
1720 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1722 m_buffer
.ensureSpace(maxInstructionSize
);
1723 emitRexIfNeeded(reg
, index
, base
);
1724 m_buffer
.putByteUnchecked(opcode
);
1725 memoryModRM(reg
, base
, index
, scale
, offset
);
1729 void oneByteOp(OneByteOpcodeID opcode
, int reg
, void* address
)
1731 m_buffer
.ensureSpace(maxInstructionSize
);
1732 m_buffer
.putByteUnchecked(opcode
);
1733 memoryModRM(reg
, address
);
1737 void twoByteOp(TwoByteOpcodeID opcode
)
1739 m_buffer
.ensureSpace(maxInstructionSize
);
1740 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1741 m_buffer
.putByteUnchecked(opcode
);
1744 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
1746 m_buffer
.ensureSpace(maxInstructionSize
);
1747 emitRexIfNeeded(reg
, 0, rm
);
1748 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1749 m_buffer
.putByteUnchecked(opcode
);
1750 registerModRM(reg
, rm
);
1753 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1755 m_buffer
.ensureSpace(maxInstructionSize
);
1756 emitRexIfNeeded(reg
, 0, base
);
1757 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1758 m_buffer
.putByteUnchecked(opcode
);
1759 memoryModRM(reg
, base
, offset
);
1762 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1764 m_buffer
.ensureSpace(maxInstructionSize
);
1765 emitRexIfNeeded(reg
, index
, base
);
1766 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1767 m_buffer
.putByteUnchecked(opcode
);
1768 memoryModRM(reg
, base
, index
, scale
, offset
);
1772 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, const void* address
)
1774 m_buffer
.ensureSpace(maxInstructionSize
);
1775 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1776 m_buffer
.putByteUnchecked(opcode
);
1777 memoryModRM(reg
, address
);
1782 // Quad-word-sized operands:
1784 // Used to format 64-bit operantions, planting a REX.w prefix.
1785 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1786 // the normal (non-'64'-postfixed) formatters should be used.
1788 void oneByteOp64(OneByteOpcodeID opcode
)
1790 m_buffer
.ensureSpace(maxInstructionSize
);
1792 m_buffer
.putByteUnchecked(opcode
);
1795 void oneByteOp64(OneByteOpcodeID opcode
, RegisterID reg
)
1797 m_buffer
.ensureSpace(maxInstructionSize
);
1798 emitRexW(0, 0, reg
);
1799 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
1802 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
1804 m_buffer
.ensureSpace(maxInstructionSize
);
1805 emitRexW(reg
, 0, rm
);
1806 m_buffer
.putByteUnchecked(opcode
);
1807 registerModRM(reg
, rm
);
1810 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1812 m_buffer
.ensureSpace(maxInstructionSize
);
1813 emitRexW(reg
, 0, base
);
1814 m_buffer
.putByteUnchecked(opcode
);
1815 memoryModRM(reg
, base
, offset
);
1818 void oneByteOp64_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1820 m_buffer
.ensureSpace(maxInstructionSize
);
1821 emitRexW(reg
, 0, base
);
1822 m_buffer
.putByteUnchecked(opcode
);
1823 memoryModRM_disp32(reg
, base
, offset
);
1826 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1828 m_buffer
.ensureSpace(maxInstructionSize
);
1829 emitRexW(reg
, index
, base
);
1830 m_buffer
.putByteUnchecked(opcode
);
1831 memoryModRM(reg
, base
, index
, scale
, offset
);
1834 void twoByteOp64(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
1836 m_buffer
.ensureSpace(maxInstructionSize
);
1837 emitRexW(reg
, 0, rm
);
1838 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1839 m_buffer
.putByteUnchecked(opcode
);
1840 registerModRM(reg
, rm
);
1846 // These methods format byte operations. Byte operations differ from the normal
1847 // formatters in the circumstances under which they will decide to emit REX prefixes.
1848 // These should be used where any register operand signifies a byte register.
1850 // The disctinction is due to the handling of register numbers in the range 4..7 on
1851 // x86-64. These register numbers may either represent the second byte of the first
1852 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1854 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1855 // be accessed where a REX prefix is present), these are likely best treated as
1856 // deprecated. In order to ensure the correct registers spl..dil are selected a
1857 // REX prefix will be emitted for any byte register operand in the range 4..15.
1859 // These formatters may be used in instructions where a mix of operand sizes, in which
1860 // case an unnecessary REX will be emitted, for example:
1862 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1863 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1864 // be silently ignored by the processor.
1866 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1867 // is provided to check byte register operands.
1869 void oneByteOp8(OneByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
1871 m_buffer
.ensureSpace(maxInstructionSize
);
1872 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
1873 m_buffer
.putByteUnchecked(opcode
);
1874 registerModRM(groupOp
, rm
);
1877 void twoByteOp8(TwoByteOpcodeID opcode
, RegisterID reg
, RegisterID rm
)
1879 m_buffer
.ensureSpace(maxInstructionSize
);
1880 emitRexIf(byteRegRequiresRex(reg
)|byteRegRequiresRex(rm
), reg
, 0, rm
);
1881 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1882 m_buffer
.putByteUnchecked(opcode
);
1883 registerModRM(reg
, rm
);
1886 void twoByteOp8(TwoByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
1888 m_buffer
.ensureSpace(maxInstructionSize
);
1889 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
1890 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1891 m_buffer
.putByteUnchecked(opcode
);
1892 registerModRM(groupOp
, rm
);
1897 // An immedaite should be appended where appropriate after an op has been emitted.
1898 // The writes are unchecked since the opcode formatters above will have ensured space.
1900 void immediate8(int imm
)
1902 m_buffer
.putByteUnchecked(imm
);
1905 void immediate16(int imm
)
1907 m_buffer
.putShortUnchecked(imm
);
1910 void immediate32(int imm
)
1912 m_buffer
.putIntUnchecked(imm
);
1915 void immediate64(int64_t imm
)
1917 m_buffer
.putInt64Unchecked(imm
);
1920 JmpSrc
immediateRel32()
1922 m_buffer
.putIntUnchecked(0);
1923 return JmpSrc(m_buffer
.size());
1926 // Administrative methods:
1928 size_t size() const { return m_buffer
.size(); }
1929 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
1930 void* data() const { return m_buffer
.data(); }
1931 void* executableCopy(ExecutablePool
* allocator
) { return m_buffer
.executableCopy(allocator
); }
1935 // Internals; ModRm and REX formatters.
1937 static const RegisterID noBase
= X86Registers::ebp
;
1938 static const RegisterID hasSib
= X86Registers::esp
;
1939 static const RegisterID noIndex
= X86Registers::esp
;
1941 static const RegisterID noBase2
= X86Registers::r13
;
1942 static const RegisterID hasSib2
= X86Registers::r12
;
1944 // Registers r8 & above require a REX prefixe.
1945 inline bool regRequiresRex(int reg
)
1947 return (reg
>= X86Registers::r8
);
1950 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1951 inline bool byteRegRequiresRex(int reg
)
1953 return (reg
>= X86Registers::esp
);
1956 // Format a REX prefix byte.
1957 inline void emitRex(bool w
, int r
, int x
, int b
)
1959 m_buffer
.putByteUnchecked(PRE_REX
| ((int)w
<< 3) | ((r
>>3)<<2) | ((x
>>3)<<1) | (b
>>3));
1962 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1963 inline void emitRexW(int r
, int x
, int b
)
1965 emitRex(true, r
, x
, b
);
1968 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1969 // regRequiresRex() to check other registers (i.e. address base & index).
1970 inline void emitRexIf(bool condition
, int r
, int x
, int b
)
1972 if (condition
) emitRex(false, r
, x
, b
);
1975 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1976 inline void emitRexIfNeeded(int r
, int x
, int b
)
1978 emitRexIf(regRequiresRex(r
) || regRequiresRex(x
) || regRequiresRex(b
), r
, x
, b
);
1981 // No REX prefix bytes on 32-bit x86.
1982 inline bool regRequiresRex(int) { return false; }
1983 inline bool byteRegRequiresRex(int) { return false; }
1984 inline void emitRexIf(bool, int, int, int) {}
1985 inline void emitRexIfNeeded(int, int, int) {}
1995 void putModRm(ModRmMode mode
, int reg
, RegisterID rm
)
1997 m_buffer
.putByteUnchecked((mode
<< 6) | ((reg
& 7) << 3) | (rm
& 7));
2000 void putModRmSib(ModRmMode mode
, int reg
, RegisterID base
, RegisterID index
, int scale
)
2002 ASSERT(mode
!= ModRmRegister
);
2004 putModRm(mode
, reg
, hasSib
);
2005 m_buffer
.putByteUnchecked((scale
<< 6) | ((index
& 7) << 3) | (base
& 7));
2008 void registerModRM(int reg
, RegisterID rm
)
2010 putModRm(ModRmRegister
, reg
, rm
);
2013 void memoryModRM(int reg
, RegisterID base
, int offset
)
2015 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2017 if ((base
== hasSib
) || (base
== hasSib2
)) {
2019 if (base
== hasSib
) {
2021 if (!offset
) // No need to check if the base is noBase, since we know it is hasSib!
2022 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, noIndex
, 0);
2023 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2024 putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0);
2025 m_buffer
.putByteUnchecked(offset
);
2027 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
2028 m_buffer
.putIntUnchecked(offset
);
2032 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
2034 if (!offset
&& (base
!= noBase
))
2036 putModRm(ModRmMemoryNoDisp
, reg
, base
);
2037 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2038 putModRm(ModRmMemoryDisp8
, reg
, base
);
2039 m_buffer
.putByteUnchecked(offset
);
2041 putModRm(ModRmMemoryDisp32
, reg
, base
);
2042 m_buffer
.putIntUnchecked(offset
);
2047 void memoryModRM_disp32(int reg
, RegisterID base
, int offset
)
2049 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2051 if ((base
== hasSib
) || (base
== hasSib2
)) {
2053 if (base
== hasSib
) {
2055 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
2056 m_buffer
.putIntUnchecked(offset
);
2058 putModRm(ModRmMemoryDisp32
, reg
, base
);
2059 m_buffer
.putIntUnchecked(offset
);
2063 void memoryModRM(int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2065 ASSERT(index
!= noIndex
);
2068 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
2070 if (!offset
&& (base
!= noBase
))
2072 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, index
, scale
);
2073 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2074 putModRmSib(ModRmMemoryDisp8
, reg
, base
, index
, scale
);
2075 m_buffer
.putByteUnchecked(offset
);
2077 putModRmSib(ModRmMemoryDisp32
, reg
, base
, index
, scale
);
2078 m_buffer
.putIntUnchecked(offset
);
2083 void memoryModRM(int reg
, const void* address
)
2085 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2086 putModRm(ModRmMemoryNoDisp
, reg
, noBase
);
2087 m_buffer
.putIntUnchecked(reinterpret_cast<int32_t>(address
));
2091 AssemblerBuffer m_buffer
;
2097 #endif // ENABLE(ASSEMBLER) && CPU(X86)
2099 #endif // X86Assembler_h