2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
29 #include <wtf/Platform.h>
31 #if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
33 #include "AssemblerBuffer.h"
35 #include <wtf/Assertions.h>
36 #include <wtf/Vector.h>
40 inline bool CAN_SIGN_EXTEND_8_32(int32_t value
) { return value
== (int32_t)(signed char)value
; }
42 inline bool CAN_SIGN_EXTEND_32_64(intptr_t value
) { return value
== (intptr_t)(int32_t)value
; }
43 inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value
) { return value
== (intptr_t)(uint32_t)value
; }
83 typedef X86::RegisterID RegisterID
;
84 typedef X86::XMMRegisterID XMMRegisterID
;
91 OP_2BYTE_ESCAPE
= 0x0F,
95 PRE_PREDICT_BRANCH_NOT_TAKEN
= 0x2E,
105 OP_MOVSXD_GvEv
= 0x63,
107 PRE_OPERAND_SIZE
= 0x66,
110 OP_IMUL_GvEvIz
= 0x69,
111 OP_GROUP1_EvIz
= 0x81,
112 OP_GROUP1_EvIb
= 0x83,
118 OP_GROUP1A_Ev
= 0x8F,
123 OP_GROUP2_EvIb
= 0xC1,
125 OP_GROUP11_EvIz
= 0xC7,
127 OP_GROUP2_Ev1
= 0xD1,
128 OP_GROUP2_EvCL
= 0xD3,
129 OP_CALL_rel32
= 0xE8,
133 OP_GROUP3_EbIb
= 0xF6,
135 OP_GROUP3_EvIz
= 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
140 OP2_MOVSD_VsdWsd
= 0x10,
141 OP2_MOVSD_WsdVsd
= 0x11,
142 OP2_CVTSI2SD_VsdEd
= 0x2A,
143 OP2_CVTTSD2SI_GdWsd
= 0x2C,
144 OP2_UCOMISD_VsdWsd
= 0x2E,
145 OP2_ADDSD_VsdWsd
= 0x58,
146 OP2_MULSD_VsdWsd
= 0x59,
147 OP2_SUBSD_VsdWsd
= 0x5C,
148 OP2_MOVD_VdEd
= 0x6E,
149 OP2_MOVD_EdVd
= 0x7E,
152 OP2_JAE_rel32
= 0x83,
154 OP2_JNE_rel32
= 0x85,
155 OP2_JBE_rel32
= 0x86,
160 OP2_JGE_rel32
= 0x8D,
161 OP2_JLE_rel32
= 0x8E,
165 OP2_IMUL_GvEv
= 0xAF,
166 OP2_MOVZX_GvEb
= 0xB6,
167 OP2_MOVZX_GvEw
= 0xB7,
168 OP2_PEXTRW_GdUdIb
= 0xC5,
195 // Opaque label types
198 class X86InstructionFormatter
;
202 friend class X86Assembler
;
203 friend class X86InstructionFormatter
;
220 friend class X86Assembler
;
221 friend class X86InstructionFormatter
;
241 size_t size() const { return m_formatter
.size(); }
245 void push_r(RegisterID reg
)
247 m_formatter
.oneByteOp(OP_PUSH_EAX
, reg
);
250 void pop_r(RegisterID reg
)
252 m_formatter
.oneByteOp(OP_POP_EAX
, reg
);
255 void push_i32(int imm
)
257 m_formatter
.oneByteOp(OP_PUSH_Iz
);
258 m_formatter
.immediate32(imm
);
261 void push_m(int offset
, RegisterID base
)
263 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_PUSH
, base
, offset
);
266 void pop_m(int offset
, RegisterID base
)
268 m_formatter
.oneByteOp(OP_GROUP1A_Ev
, GROUP1A_OP_POP
, base
, offset
);
271 // Arithmetic operations:
273 void addl_rr(RegisterID src
, RegisterID dst
)
275 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, dst
);
278 void addl_mr(int offset
, RegisterID base
, RegisterID dst
)
280 m_formatter
.oneByteOp(OP_ADD_GvEv
, dst
, base
, offset
);
283 void addl_ir(int imm
, RegisterID dst
)
285 if (CAN_SIGN_EXTEND_8_32(imm
)) {
286 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
287 m_formatter
.immediate8(imm
);
289 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
290 m_formatter
.immediate32(imm
);
294 void addl_im(int imm
, int offset
, RegisterID base
)
296 if (CAN_SIGN_EXTEND_8_32(imm
)) {
297 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
298 m_formatter
.immediate8(imm
);
300 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
301 m_formatter
.immediate32(imm
);
306 void addq_rr(RegisterID src
, RegisterID dst
)
308 m_formatter
.oneByteOp64(OP_ADD_EvGv
, src
, dst
);
311 void addq_ir(int imm
, RegisterID dst
)
313 if (CAN_SIGN_EXTEND_8_32(imm
)) {
314 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
315 m_formatter
.immediate8(imm
);
317 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
318 m_formatter
.immediate32(imm
);
322 void addl_im(int imm
, void* addr
)
324 if (CAN_SIGN_EXTEND_8_32(imm
)) {
325 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, addr
);
326 m_formatter
.immediate8(imm
);
328 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, addr
);
329 m_formatter
.immediate32(imm
);
334 void andl_rr(RegisterID src
, RegisterID dst
)
336 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, dst
);
339 void andl_ir(int imm
, RegisterID dst
)
341 if (CAN_SIGN_EXTEND_8_32(imm
)) {
342 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
343 m_formatter
.immediate8(imm
);
345 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
346 m_formatter
.immediate32(imm
);
351 void andq_rr(RegisterID src
, RegisterID dst
)
353 m_formatter
.oneByteOp64(OP_AND_EvGv
, src
, dst
);
356 void andq_ir(int imm
, RegisterID dst
)
358 if (CAN_SIGN_EXTEND_8_32(imm
)) {
359 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
360 m_formatter
.immediate8(imm
);
362 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
363 m_formatter
.immediate32(imm
);
368 void notl_r(RegisterID dst
)
370 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, dst
);
373 void orl_rr(RegisterID src
, RegisterID dst
)
375 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, dst
);
378 void orl_mr(int offset
, RegisterID base
, RegisterID dst
)
380 m_formatter
.oneByteOp(OP_OR_GvEv
, dst
, base
, offset
);
383 void orl_ir(int imm
, RegisterID dst
)
385 if (CAN_SIGN_EXTEND_8_32(imm
)) {
386 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
387 m_formatter
.immediate8(imm
);
389 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
390 m_formatter
.immediate32(imm
);
395 void orq_rr(RegisterID src
, RegisterID dst
)
397 m_formatter
.oneByteOp64(OP_OR_EvGv
, src
, dst
);
400 void orq_ir(int imm
, RegisterID dst
)
402 if (CAN_SIGN_EXTEND_8_32(imm
)) {
403 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
404 m_formatter
.immediate8(imm
);
406 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
407 m_formatter
.immediate32(imm
);
412 void subl_rr(RegisterID src
, RegisterID dst
)
414 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, dst
);
417 void subl_mr(int offset
, RegisterID base
, RegisterID dst
)
419 m_formatter
.oneByteOp(OP_SUB_GvEv
, dst
, base
, offset
);
422 void subl_ir(int imm
, RegisterID dst
)
424 if (CAN_SIGN_EXTEND_8_32(imm
)) {
425 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
426 m_formatter
.immediate8(imm
);
428 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
429 m_formatter
.immediate32(imm
);
433 void subl_im(int imm
, int offset
, RegisterID base
)
435 if (CAN_SIGN_EXTEND_8_32(imm
)) {
436 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, base
, offset
);
437 m_formatter
.immediate8(imm
);
439 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, base
, offset
);
440 m_formatter
.immediate32(imm
);
445 void subq_rr(RegisterID src
, RegisterID dst
)
447 m_formatter
.oneByteOp64(OP_SUB_EvGv
, src
, dst
);
450 void subq_ir(int imm
, RegisterID dst
)
452 if (CAN_SIGN_EXTEND_8_32(imm
)) {
453 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
454 m_formatter
.immediate8(imm
);
456 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
457 m_formatter
.immediate32(imm
);
461 void subl_im(int imm
, void* addr
)
463 if (CAN_SIGN_EXTEND_8_32(imm
)) {
464 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, addr
);
465 m_formatter
.immediate8(imm
);
467 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, addr
);
468 m_formatter
.immediate32(imm
);
473 void xorl_rr(RegisterID src
, RegisterID dst
)
475 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, dst
);
478 void xorl_ir(int imm
, RegisterID dst
)
480 if (CAN_SIGN_EXTEND_8_32(imm
)) {
481 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
482 m_formatter
.immediate8(imm
);
484 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
485 m_formatter
.immediate32(imm
);
490 void xorq_rr(RegisterID src
, RegisterID dst
)
492 m_formatter
.oneByteOp64(OP_XOR_EvGv
, src
, dst
);
495 void xorq_ir(int imm
, RegisterID dst
)
497 if (CAN_SIGN_EXTEND_8_32(imm
)) {
498 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
499 m_formatter
.immediate8(imm
);
501 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
502 m_formatter
.immediate32(imm
);
507 void sarl_i8r(int imm
, RegisterID dst
)
510 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
512 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
513 m_formatter
.immediate8(imm
);
517 void sarl_CLr(RegisterID dst
)
519 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
522 void shll_i8r(int imm
, RegisterID dst
)
525 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHL
, dst
);
527 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHL
, dst
);
528 m_formatter
.immediate8(imm
);
532 void shll_CLr(RegisterID dst
)
534 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHL
, dst
);
538 void sarq_CLr(RegisterID dst
)
540 m_formatter
.oneByteOp64(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
543 void sarq_i8r(int imm
, RegisterID dst
)
546 m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
548 m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
549 m_formatter
.immediate8(imm
);
554 void imull_rr(RegisterID src
, RegisterID dst
)
556 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, src
);
559 void imull_i32r(RegisterID src
, int32_t value
, RegisterID dst
)
561 m_formatter
.oneByteOp(OP_IMUL_GvEvIz
, dst
, src
);
562 m_formatter
.immediate32(value
);
565 void idivl_r(RegisterID dst
)
567 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_IDIV
, dst
);
572 void cmpl_rr(RegisterID src
, RegisterID dst
)
574 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, dst
);
577 void cmpl_rm(RegisterID src
, int offset
, RegisterID base
)
579 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, offset
);
582 void cmpl_mr(int offset
, RegisterID base
, RegisterID src
)
584 m_formatter
.oneByteOp(OP_CMP_GvEv
, src
, base
, offset
);
587 void cmpl_ir(int imm
, RegisterID dst
)
589 if (CAN_SIGN_EXTEND_8_32(imm
)) {
590 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
591 m_formatter
.immediate8(imm
);
593 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
594 m_formatter
.immediate32(imm
);
598 void cmpl_ir_force32(int imm
, RegisterID dst
)
600 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
601 m_formatter
.immediate32(imm
);
604 void cmpl_im(int imm
, int offset
, RegisterID base
)
606 if (CAN_SIGN_EXTEND_8_32(imm
)) {
607 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
608 m_formatter
.immediate8(imm
);
610 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
611 m_formatter
.immediate32(imm
);
615 void cmpl_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
617 if (CAN_SIGN_EXTEND_8_32(imm
)) {
618 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
619 m_formatter
.immediate8(imm
);
621 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
622 m_formatter
.immediate32(imm
);
626 void cmpl_im_force32(int imm
, int offset
, RegisterID base
)
628 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
629 m_formatter
.immediate32(imm
);
633 void cmpq_rr(RegisterID src
, RegisterID dst
)
635 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, dst
);
638 void cmpq_rm(RegisterID src
, int offset
, RegisterID base
)
640 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, base
, offset
);
643 void cmpq_ir(int imm
, RegisterID dst
)
645 if (CAN_SIGN_EXTEND_8_32(imm
)) {
646 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
647 m_formatter
.immediate8(imm
);
649 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
650 m_formatter
.immediate32(imm
);
654 void cmpq_im(int imm
, int offset
, RegisterID base
)
656 if (CAN_SIGN_EXTEND_8_32(imm
)) {
657 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
658 m_formatter
.immediate8(imm
);
660 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
661 m_formatter
.immediate32(imm
);
665 void cmpq_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
667 if (CAN_SIGN_EXTEND_8_32(imm
)) {
668 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
669 m_formatter
.immediate8(imm
);
671 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
672 m_formatter
.immediate32(imm
);
676 void cmpl_rm(RegisterID reg
, void* addr
)
678 m_formatter
.oneByteOp(OP_CMP_EvGv
, reg
, addr
);
681 void cmpl_im(int imm
, void* addr
)
683 if (CAN_SIGN_EXTEND_8_32(imm
)) {
684 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, addr
);
685 m_formatter
.immediate8(imm
);
687 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, addr
);
688 m_formatter
.immediate32(imm
);
693 void cmpw_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
695 m_formatter
.prefix(PRE_OPERAND_SIZE
);
696 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, index
, scale
, offset
);
699 void testl_rr(RegisterID src
, RegisterID dst
)
701 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
704 void testl_i32r(int imm
, RegisterID dst
)
706 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
707 m_formatter
.immediate32(imm
);
710 void testl_i32m(int imm
, int offset
, RegisterID base
)
712 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
713 m_formatter
.immediate32(imm
);
716 void testl_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
718 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
719 m_formatter
.immediate32(imm
);
723 void testq_rr(RegisterID src
, RegisterID dst
)
725 m_formatter
.oneByteOp64(OP_TEST_EvGv
, src
, dst
);
728 void testq_i32r(int imm
, RegisterID dst
)
730 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
731 m_formatter
.immediate32(imm
);
734 void testq_i32m(int imm
, int offset
, RegisterID base
)
736 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
737 m_formatter
.immediate32(imm
);
740 void testq_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
742 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
743 m_formatter
.immediate32(imm
);
747 void testb_i8r(int imm
, RegisterID dst
)
749 m_formatter
.oneByteOp8(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, dst
);
750 m_formatter
.immediate8(imm
);
753 void sete_r(RegisterID dst
)
755 m_formatter
.twoByteOp8(OP_SETE
, (GroupOpcodeID
)0, dst
);
758 void setz_r(RegisterID dst
)
763 void setne_r(RegisterID dst
)
765 m_formatter
.twoByteOp8(OP_SETNE
, (GroupOpcodeID
)0, dst
);
768 void setnz_r(RegisterID dst
)
777 m_formatter
.oneByteOp(OP_CDQ
);
780 void xchgl_rr(RegisterID src
, RegisterID dst
)
782 m_formatter
.oneByteOp(OP_XCHG_EvGv
, src
, dst
);
786 void xchgq_rr(RegisterID src
, RegisterID dst
)
788 m_formatter
.oneByteOp64(OP_XCHG_EvGv
, src
, dst
);
792 void movl_rr(RegisterID src
, RegisterID dst
)
794 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, dst
);
797 void movl_rm(RegisterID src
, int offset
, RegisterID base
)
799 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, offset
);
802 void movl_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
804 m_formatter
.oneByteOp_disp32(OP_MOV_EvGv
, src
, base
, offset
);
807 void movl_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
809 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
812 void movl_mEAX(void* addr
)
814 m_formatter
.oneByteOp(OP_MOV_EAXOv
);
816 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
818 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
822 void movl_mr(int offset
, RegisterID base
, RegisterID dst
)
824 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, offset
);
827 void movl_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
829 m_formatter
.oneByteOp_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
832 void movl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
834 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
837 void movl_i32r(int imm
, RegisterID dst
)
839 m_formatter
.oneByteOp(OP_MOV_EAXIv
, dst
);
840 m_formatter
.immediate32(imm
);
843 void movl_i32m(int imm
, int offset
, RegisterID base
)
845 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
846 m_formatter
.immediate32(imm
);
849 void movl_EAXm(void* addr
)
851 m_formatter
.oneByteOp(OP_MOV_OvEAX
);
853 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
855 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
860 void movq_rr(RegisterID src
, RegisterID dst
)
862 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, dst
);
865 void movq_rm(RegisterID src
, int offset
, RegisterID base
)
867 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, offset
);
870 void movq_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
872 m_formatter
.oneByteOp64_disp32(OP_MOV_EvGv
, src
, base
, offset
);
875 void movq_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
877 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
880 void movq_mEAX(void* addr
)
882 m_formatter
.oneByteOp64(OP_MOV_EAXOv
);
883 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
886 void movq_mr(int offset
, RegisterID base
, RegisterID dst
)
888 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, offset
);
891 void movq_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
893 m_formatter
.oneByteOp64_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
896 void movq_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
898 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
901 void movq_i64r(int64_t imm
, RegisterID dst
)
903 m_formatter
.oneByteOp64(OP_MOV_EAXIv
, dst
);
904 m_formatter
.immediate64(imm
);
907 void movsxd_rr(RegisterID src
, RegisterID dst
)
909 m_formatter
.oneByteOp64(OP_MOVSXD_GvEv
, dst
, src
);
914 void movl_mr(void* addr
, RegisterID dst
)
919 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, addr
);
922 void movl_i32m(int imm
, void* addr
)
924 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, addr
);
925 m_formatter
.immediate32(imm
);
929 void movzwl_mr(int offset
, RegisterID base
, RegisterID dst
)
931 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, offset
);
934 void movzwl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
936 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, index
, scale
, offset
);
939 void movzbl_rr(RegisterID src
, RegisterID dst
)
941 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
942 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
943 // REX prefixes are defined to be silently ignored by the processor.
944 m_formatter
.twoByteOp8(OP2_MOVZX_GvEb
, dst
, src
);
947 void leal_mr(int offset
, RegisterID base
, RegisterID dst
)
949 m_formatter
.oneByteOp(OP_LEA
, dst
, base
, offset
);
956 m_formatter
.oneByteOp(OP_CALL_rel32
);
957 return m_formatter
.immediateRel32();
960 JmpSrc
call(RegisterID dst
)
962 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, dst
);
963 return JmpSrc(m_formatter
.size());
968 m_formatter
.oneByteOp(OP_JMP_rel32
);
969 return m_formatter
.immediateRel32();
972 void jmp_r(RegisterID dst
)
974 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, dst
);
977 void jmp_m(int offset
, RegisterID base
)
979 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, base
, offset
);
984 m_formatter
.twoByteOp(OP2_JNE_rel32
);
985 return m_formatter
.immediateRel32();
995 m_formatter
.twoByteOp(OP2_JE_rel32
);
996 return m_formatter
.immediateRel32();
1001 m_formatter
.twoByteOp(OP2_JL_rel32
);
1002 return m_formatter
.immediateRel32();
1007 m_formatter
.twoByteOp(OP2_JB_rel32
);
1008 return m_formatter
.immediateRel32();
1013 m_formatter
.twoByteOp(OP2_JLE_rel32
);
1014 return m_formatter
.immediateRel32();
1019 m_formatter
.twoByteOp(OP2_JBE_rel32
);
1020 return m_formatter
.immediateRel32();
1025 m_formatter
.twoByteOp(OP2_JGE_rel32
);
1026 return m_formatter
.immediateRel32();
1031 m_formatter
.twoByteOp(OP2_JG_rel32
);
1032 return m_formatter
.immediateRel32();
1037 m_formatter
.twoByteOp(OP2_JA_rel32
);
1038 return m_formatter
.immediateRel32();
1043 m_formatter
.twoByteOp(OP2_JAE_rel32
);
1044 return m_formatter
.immediateRel32();
1049 m_formatter
.twoByteOp(OP2_JO_rel32
);
1050 return m_formatter
.immediateRel32();
1055 m_formatter
.twoByteOp(OP2_JP_rel32
);
1056 return m_formatter
.immediateRel32();
1061 m_formatter
.twoByteOp(OP2_JS_rel32
);
1062 return m_formatter
.immediateRel32();
1067 void addsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1069 m_formatter
.prefix(PRE_SSE_F2
);
1070 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1073 void addsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1075 m_formatter
.prefix(PRE_SSE_F2
);
1076 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1079 void cvtsi2sd_rr(RegisterID src
, XMMRegisterID dst
)
1081 m_formatter
.prefix(PRE_SSE_F2
);
1082 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, src
);
1085 void cvttsd2si_rr(XMMRegisterID src
, RegisterID dst
)
1087 m_formatter
.prefix(PRE_SSE_F2
);
1088 m_formatter
.twoByteOp(OP2_CVTTSD2SI_GdWsd
, dst
, (RegisterID
)src
);
1091 void movd_rr(XMMRegisterID src
, RegisterID dst
)
1093 m_formatter
.prefix(PRE_SSE_66
);
1094 m_formatter
.twoByteOp(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1097 #if PLATFORM(X86_64)
1098 void movq_rr(XMMRegisterID src
, RegisterID dst
)
1100 m_formatter
.prefix(PRE_SSE_66
);
1101 m_formatter
.twoByteOp64(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1104 void movq_rr(RegisterID src
, XMMRegisterID dst
)
1106 m_formatter
.prefix(PRE_SSE_66
);
1107 m_formatter
.twoByteOp64(OP2_MOVD_VdEd
, (RegisterID
)dst
, src
);
1111 void movsd_rm(XMMRegisterID src
, int offset
, RegisterID base
)
1113 m_formatter
.prefix(PRE_SSE_F2
);
1114 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, offset
);
1117 void movsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1119 m_formatter
.prefix(PRE_SSE_F2
);
1120 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1123 void mulsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1125 m_formatter
.prefix(PRE_SSE_F2
);
1126 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1129 void mulsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1131 m_formatter
.prefix(PRE_SSE_F2
);
1132 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1135 void pextrw_irr(int whichWord
, XMMRegisterID src
, RegisterID dst
)
1137 m_formatter
.prefix(PRE_SSE_66
);
1138 m_formatter
.twoByteOp(OP2_PEXTRW_GdUdIb
, (RegisterID
)dst
, (RegisterID
)src
);
1139 m_formatter
.immediate8(whichWord
);
1142 void subsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1144 m_formatter
.prefix(PRE_SSE_F2
);
1145 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1148 void subsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1150 m_formatter
.prefix(PRE_SSE_F2
);
1151 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1154 void ucomisd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1156 m_formatter
.prefix(PRE_SSE_66
);
1157 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1160 // Misc instructions:
1164 m_formatter
.oneByteOp(OP_INT3
);
1169 m_formatter
.oneByteOp(OP_RET
);
1172 void predictNotTaken()
1174 m_formatter
.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN
);
1177 // Assembler admin methods:
1181 return JmpDst(m_formatter
.size());
1184 JmpDst
align(int alignment
)
1186 while (!m_formatter
.isAligned(alignment
))
1187 m_formatter
.oneByteOp(OP_HLT
);
1192 // Linking & patching:
1194 void link(JmpSrc from
, JmpDst to
)
1196 ASSERT(to
.m_offset
!= -1);
1197 ASSERT(from
.m_offset
!= -1);
1199 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter
.data()) + from
.m_offset
)[-1] = to
.m_offset
- from
.m_offset
;
1202 static void patchAddress(void* code
, JmpDst position
, void* value
)
1204 ASSERT(position
.m_offset
!= -1);
1206 reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code
) + position
.m_offset
)[-1] = value
;
1209 static void link(void* code
, JmpSrc from
, void* to
)
1211 ASSERT(from
.m_offset
!= -1);
1213 reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code
) + from
.m_offset
)[-1] = reinterpret_cast<ptrdiff_t>(to
) - (reinterpret_cast<ptrdiff_t>(code
) + from
.m_offset
);
1216 static void* getRelocatedAddress(void* code
, JmpSrc jump
)
1218 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + jump
.m_offset
);
1221 static void* getRelocatedAddress(void* code
, JmpDst destination
)
1223 ASSERT(destination
.m_offset
!= -1);
1225 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + destination
.m_offset
);
1228 static int getDifferenceBetweenLabels(JmpDst src
, JmpDst dst
)
1230 return dst
.m_offset
- src
.m_offset
;
1233 static int getDifferenceBetweenLabels(JmpDst src
, JmpSrc dst
)
1235 return dst
.m_offset
- src
.m_offset
;
1238 static int getDifferenceBetweenLabels(JmpSrc src
, JmpDst dst
)
1240 return dst
.m_offset
- src
.m_offset
;
1243 static void patchImmediate(intptr_t where
, int32_t value
)
1245 reinterpret_cast<int32_t*>(where
)[-1] = value
;
1248 static void patchPointer(intptr_t where
, intptr_t value
)
1250 reinterpret_cast<intptr_t*>(where
)[-1] = value
;
1253 static void patchBranchOffset(intptr_t where
, void* destination
)
1255 intptr_t offset
= reinterpret_cast<intptr_t>(destination
) - where
;
1256 ASSERT(offset
== static_cast<int32_t>(offset
));
1257 reinterpret_cast<int32_t*>(where
)[-1] = static_cast<int32_t>(offset
);
1260 void* executableCopy(ExecutablePool
* allocator
)
1262 void* copy
= m_formatter
.executableCopy(allocator
);
1269 class X86InstructionFormatter
{
1271 static const int maxInstructionSize
= 16;
1275 // Legacy prefix bytes:
1277 // These are emmitted prior to the instruction.
1279 void prefix(OneByteOpcodeID pre
)
1281 m_buffer
.putByte(pre
);
1284 // Word-sized operands / no operand instruction formatters.
1286 // In addition to the opcode, the following operand permutations are supported:
1287 // * None - instruction takes no operands.
1288 // * One register - the low three bits of the RegisterID are added into the opcode.
1289 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1290 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1291 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1293 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1294 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1296 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1298 void oneByteOp(OneByteOpcodeID opcode
)
1300 m_buffer
.ensureSpace(maxInstructionSize
);
1301 m_buffer
.putByteUnchecked(opcode
);
1304 void oneByteOp(OneByteOpcodeID opcode
, RegisterID reg
)
1306 m_buffer
.ensureSpace(maxInstructionSize
);
1307 emitRexIfNeeded(0, 0, reg
);
1308 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
1311 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
1313 m_buffer
.ensureSpace(maxInstructionSize
);
1314 emitRexIfNeeded(reg
, 0, rm
);
1315 m_buffer
.putByteUnchecked(opcode
);
1316 registerModRM(reg
, rm
);
1319 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1321 m_buffer
.ensureSpace(maxInstructionSize
);
1322 emitRexIfNeeded(reg
, 0, base
);
1323 m_buffer
.putByteUnchecked(opcode
);
1324 memoryModRM(reg
, base
, offset
);
1327 void oneByteOp_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1329 m_buffer
.ensureSpace(maxInstructionSize
);
1330 emitRexIfNeeded(reg
, 0, base
);
1331 m_buffer
.putByteUnchecked(opcode
);
1332 memoryModRM_disp32(reg
, base
, offset
);
1335 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1337 m_buffer
.ensureSpace(maxInstructionSize
);
1338 emitRexIfNeeded(reg
, index
, base
);
1339 m_buffer
.putByteUnchecked(opcode
);
1340 memoryModRM(reg
, base
, index
, scale
, offset
);
1343 #if !PLATFORM(X86_64)
1344 void oneByteOp(OneByteOpcodeID opcode
, int reg
, void* address
)
1346 m_buffer
.ensureSpace(maxInstructionSize
);
1347 m_buffer
.putByteUnchecked(opcode
);
1348 memoryModRM(reg
, address
);
1352 void twoByteOp(TwoByteOpcodeID opcode
)
1354 m_buffer
.ensureSpace(maxInstructionSize
);
1355 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1356 m_buffer
.putByteUnchecked(opcode
);
1359 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
1361 m_buffer
.ensureSpace(maxInstructionSize
);
1362 emitRexIfNeeded(reg
, 0, rm
);
1363 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1364 m_buffer
.putByteUnchecked(opcode
);
1365 registerModRM(reg
, rm
);
1368 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1370 m_buffer
.ensureSpace(maxInstructionSize
);
1371 emitRexIfNeeded(reg
, 0, base
);
1372 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1373 m_buffer
.putByteUnchecked(opcode
);
1374 memoryModRM(reg
, base
, offset
);
1377 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1379 m_buffer
.ensureSpace(maxInstructionSize
);
1380 emitRexIfNeeded(reg
, index
, base
);
1381 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1382 m_buffer
.putByteUnchecked(opcode
);
1383 memoryModRM(reg
, base
, index
, scale
, offset
);
1386 #if PLATFORM(X86_64)
1387 // Quad-word-sized operands:
1389 // Used to format 64-bit operantions, planting a REX.w prefix.
1390 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1391 // the normal (non-'64'-postfixed) formatters should be used.
1393 void oneByteOp64(OneByteOpcodeID opcode
)
1395 m_buffer
.ensureSpace(maxInstructionSize
);
1397 m_buffer
.putByteUnchecked(opcode
);
1400 void oneByteOp64(OneByteOpcodeID opcode
, RegisterID reg
)
1402 m_buffer
.ensureSpace(maxInstructionSize
);
1403 emitRexW(0, 0, reg
);
1404 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
1407 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
1409 m_buffer
.ensureSpace(maxInstructionSize
);
1410 emitRexW(reg
, 0, rm
);
1411 m_buffer
.putByteUnchecked(opcode
);
1412 registerModRM(reg
, rm
);
1415 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1417 m_buffer
.ensureSpace(maxInstructionSize
);
1418 emitRexW(reg
, 0, base
);
1419 m_buffer
.putByteUnchecked(opcode
);
1420 memoryModRM(reg
, base
, offset
);
1423 void oneByteOp64_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1425 m_buffer
.ensureSpace(maxInstructionSize
);
1426 emitRexW(reg
, 0, base
);
1427 m_buffer
.putByteUnchecked(opcode
);
1428 memoryModRM_disp32(reg
, base
, offset
);
1431 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1433 m_buffer
.ensureSpace(maxInstructionSize
);
1434 emitRexW(reg
, index
, base
);
1435 m_buffer
.putByteUnchecked(opcode
);
1436 memoryModRM(reg
, base
, index
, scale
, offset
);
1439 void twoByteOp64(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
1441 m_buffer
.ensureSpace(maxInstructionSize
);
1442 emitRexW(reg
, 0, rm
);
1443 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1444 m_buffer
.putByteUnchecked(opcode
);
1445 registerModRM(reg
, rm
);
1451 // These methods format byte operations. Byte operations differ from the normal
1452 // formatters in the circumstances under which they will decide to emit REX prefixes.
1453 // These should be used where any register operand signifies a byte register.
1455 // The disctinction is due to the handling of register numbers in the range 4..7 on
1456 // x86-64. These register numbers may either represent the second byte of the first
1457 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1459 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1460 // be accessed where a REX prefix is present), these are likely best treated as
1461 // deprecated. In order to ensure the correct registers spl..dil are selected a
1462 // REX prefix will be emitted for any byte register operand in the range 4..15.
1464 // These formatters may be used in instructions where a mix of operand sizes, in which
1465 // case an unnecessary REX will be emitted, for example:
1467 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1468 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1469 // be silently ignored by the processor.
1471 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1472 // is provided to check byte register operands.
1474 void oneByteOp8(OneByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
1476 m_buffer
.ensureSpace(maxInstructionSize
);
1477 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
1478 m_buffer
.putByteUnchecked(opcode
);
1479 registerModRM(groupOp
, rm
);
1482 void twoByteOp8(TwoByteOpcodeID opcode
, RegisterID reg
, RegisterID rm
)
1484 m_buffer
.ensureSpace(maxInstructionSize
);
1485 emitRexIf(byteRegRequiresRex(reg
)|byteRegRequiresRex(rm
), reg
, 0, rm
);
1486 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1487 m_buffer
.putByteUnchecked(opcode
);
1488 registerModRM(reg
, rm
);
1491 void twoByteOp8(TwoByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
1493 m_buffer
.ensureSpace(maxInstructionSize
);
1494 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
1495 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1496 m_buffer
.putByteUnchecked(opcode
);
1497 registerModRM(groupOp
, rm
);
1502 // An immedaite should be appended where appropriate after an op has been emitted.
1503 // The writes are unchecked since the opcode formatters above will have ensured space.
1505 void immediate8(int imm
)
1507 m_buffer
.putByteUnchecked(imm
);
1510 void immediate32(int imm
)
1512 m_buffer
.putIntUnchecked(imm
);
1515 void immediate64(int64_t imm
)
1517 m_buffer
.putInt64Unchecked(imm
);
1520 JmpSrc
immediateRel32()
1522 m_buffer
.putIntUnchecked(0);
1523 return JmpSrc(m_buffer
.size());
1526 // Administrative methods:
1528 size_t size() const { return m_buffer
.size(); }
1529 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
1530 void* data() const { return m_buffer
.data(); }
1531 void* executableCopy(ExecutablePool
* allocator
) { return m_buffer
.executableCopy(allocator
); }
1535 // Internals; ModRm and REX formatters.
1537 static const RegisterID noBase
= X86::ebp
;
1538 static const RegisterID hasSib
= X86::esp
;
1539 static const RegisterID noIndex
= X86::esp
;
1540 #if PLATFORM(X86_64)
1541 static const RegisterID noBase2
= X86::r13
;
1542 static const RegisterID hasSib2
= X86::r12
;
1544 // Registers r8 & above require a REX prefixe.
1545 inline bool regRequiresRex(int reg
)
1547 return (reg
>= X86::r8
);
1550 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1551 inline bool byteRegRequiresRex(int reg
)
1553 return (reg
>= X86::esp
);
1556 // Format a REX prefix byte.
1557 inline void emitRex(bool w
, int r
, int x
, int b
)
1559 m_buffer
.putByteUnchecked(PRE_REX
| ((int)w
<< 3) | ((r
>>3)<<2) | ((x
>>3)<<1) | (b
>>3));
1562 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1563 inline void emitRexW(int r
, int x
, int b
)
1565 emitRex(true, r
, x
, b
);
1568 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1569 // regRequiresRex() to check other registers (i.e. address base & index).
1570 inline void emitRexIf(bool condition
, int r
, int x
, int b
)
1572 if (condition
) emitRex(false, r
, x
, b
);
1575 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1576 inline void emitRexIfNeeded(int r
, int x
, int b
)
1578 emitRexIf(regRequiresRex(r
) || regRequiresRex(x
) || regRequiresRex(b
), r
, x
, b
);
1581 // No REX prefix bytes on 32-bit x86.
1582 inline bool regRequiresRex(int) { return false; }
1583 inline bool byteRegRequiresRex(int) { return false; }
1584 inline void emitRexIf(bool, int, int, int) {}
1585 inline void emitRexIfNeeded(int, int, int) {}
1595 void putModRm(ModRmMode mode
, int reg
, RegisterID rm
)
1597 m_buffer
.putByteUnchecked((mode
<< 6) | ((reg
& 7) << 3) | (rm
& 7));
1600 void putModRmSib(ModRmMode mode
, int reg
, RegisterID base
, RegisterID index
, int scale
)
1602 ASSERT(mode
!= ModRmRegister
);
1604 // Encode sacle of (1,2,4,8) -> (0,1,2,3)
1609 putModRm(mode
, reg
, hasSib
);
1610 m_buffer
.putByteUnchecked((shift
<< 6) | ((index
& 7) << 3) | (base
& 7));
1613 void registerModRM(int reg
, RegisterID rm
)
1615 putModRm(ModRmRegister
, reg
, rm
);
1618 void memoryModRM(int reg
, RegisterID base
, int offset
)
1620 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1621 #if PLATFORM(X86_64)
1622 if ((base
== hasSib
) || (base
== hasSib2
)) {
1624 if (base
== hasSib
) {
1626 if (!offset
) // No need to check if the base is noBase, since we know it is hasSib!
1627 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, noIndex
, 0);
1628 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
1629 putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0);
1630 m_buffer
.putByteUnchecked(offset
);
1632 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
1633 m_buffer
.putIntUnchecked(offset
);
1636 #if PLATFORM(X86_64)
1637 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
1639 if (!offset
&& (base
!= noBase
))
1641 putModRm(ModRmMemoryNoDisp
, reg
, base
);
1642 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
1643 putModRm(ModRmMemoryDisp8
, reg
, base
);
1644 m_buffer
.putByteUnchecked(offset
);
1646 putModRm(ModRmMemoryDisp32
, reg
, base
);
1647 m_buffer
.putIntUnchecked(offset
);
1652 void memoryModRM_disp32(int reg
, RegisterID base
, int offset
)
1654 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1655 #if PLATFORM(X86_64)
1656 if ((base
== hasSib
) || (base
== hasSib2
)) {
1658 if (base
== hasSib
) {
1660 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
1661 m_buffer
.putIntUnchecked(offset
);
1663 putModRm(ModRmMemoryDisp32
, reg
, base
);
1664 m_buffer
.putIntUnchecked(offset
);
1668 void memoryModRM(int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1670 ASSERT(index
!= noIndex
);
1672 #if PLATFORM(X86_64)
1673 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
1675 if (!offset
&& (base
!= noBase
))
1677 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, index
, scale
);
1678 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
1679 putModRmSib(ModRmMemoryDisp8
, reg
, base
, index
, scale
);
1680 m_buffer
.putByteUnchecked(offset
);
1682 putModRmSib(ModRmMemoryDisp32
, reg
, base
, index
, scale
);
1683 m_buffer
.putIntUnchecked(offset
);
1687 #if !PLATFORM(X86_64)
1688 void memoryModRM(int reg
, void* address
)
1690 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
1691 putModRm(ModRmMemoryNoDisp
, reg
, noBase
);
1692 m_buffer
.putIntUnchecked(reinterpret_cast<int32_t>(address
));
1696 AssemblerBuffer m_buffer
;
1702 #endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
1704 #endif // X86Assembler_h