2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
29 #include <wtf/Platform.h>
31 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
33 #include "AssemblerBuffer.h"
35 #include <wtf/Assertions.h>
36 #include <wtf/Vector.h>
40 inline bool CAN_SIGN_EXTEND_8_32(int32_t value
) { return value
== (int32_t)(signed char)value
; }
42 namespace X86Registers
{
79 typedef X86Registers::RegisterID RegisterID
;
80 typedef X86Registers::XMMRegisterID XMMRegisterID
;
81 typedef XMMRegisterID FPRegisterID
;
101 ConditionC
= ConditionB
,
102 ConditionNC
= ConditionAE
,
111 OP_2BYTE_ESCAPE
= 0x0F,
116 PRE_PREDICT_BRANCH_NOT_TAKEN
= 0x2E,
127 OP_MOVSXD_GvEv
= 0x63,
129 PRE_OPERAND_SIZE
= 0x66,
132 OP_IMUL_GvEvIz
= 0x69,
133 OP_GROUP1_EvIz
= 0x81,
134 OP_GROUP1_EvIb
= 0x83,
140 OP_GROUP1A_Ev
= 0x8F,
145 OP_GROUP2_EvIb
= 0xC1,
147 OP_GROUP11_EvIz
= 0xC7,
149 OP_GROUP2_Ev1
= 0xD1,
150 OP_GROUP2_EvCL
= 0xD3,
151 OP_CALL_rel32
= 0xE8,
155 OP_GROUP3_EbIb
= 0xF6,
157 OP_GROUP3_EvIz
= 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
162 OP2_MOVSD_VsdWsd
= 0x10,
163 OP2_MOVSD_WsdVsd
= 0x11,
164 OP2_CVTSI2SD_VsdEd
= 0x2A,
165 OP2_CVTTSD2SI_GdWsd
= 0x2C,
166 OP2_UCOMISD_VsdWsd
= 0x2E,
167 OP2_ADDSD_VsdWsd
= 0x58,
168 OP2_MULSD_VsdWsd
= 0x59,
169 OP2_SUBSD_VsdWsd
= 0x5C,
170 OP2_DIVSD_VsdWsd
= 0x5E,
171 OP2_XORPD_VpdWpd
= 0x57,
172 OP2_MOVD_VdEd
= 0x6E,
173 OP2_MOVD_EdVd
= 0x7E,
174 OP2_JCC_rel32
= 0x80,
176 OP2_IMUL_GvEv
= 0xAF,
177 OP2_MOVZX_GvEb
= 0xB6,
178 OP2_MOVZX_GvEw
= 0xB7,
179 OP2_PEXTRW_GdUdIb
= 0xC5,
182 TwoByteOpcodeID
jccRel32(Condition cond
)
184 return (TwoByteOpcodeID
)(OP2_JCC_rel32
+ cond
);
187 TwoByteOpcodeID
setccOpcode(Condition cond
)
189 return (TwoByteOpcodeID
)(OP_SETCC
+ cond
);
218 class X86InstructionFormatter
;
222 friend class X86Assembler
;
223 friend class X86InstructionFormatter
;
240 friend class X86Assembler
;
241 friend class X86InstructionFormatter
;
249 bool isUsed() const { return m_used
; }
250 void used() { m_used
= true; }
256 ASSERT(m_offset
== offset
);
267 size_t size() const { return m_formatter
.size(); }
271 void push_r(RegisterID reg
)
273 m_formatter
.oneByteOp(OP_PUSH_EAX
, reg
);
276 void pop_r(RegisterID reg
)
278 m_formatter
.oneByteOp(OP_POP_EAX
, reg
);
281 void push_i32(int imm
)
283 m_formatter
.oneByteOp(OP_PUSH_Iz
);
284 m_formatter
.immediate32(imm
);
287 void push_m(int offset
, RegisterID base
)
289 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_PUSH
, base
, offset
);
292 void pop_m(int offset
, RegisterID base
)
294 m_formatter
.oneByteOp(OP_GROUP1A_Ev
, GROUP1A_OP_POP
, base
, offset
);
297 // Arithmetic operations:
300 void adcl_im(int imm
, void* addr
)
302 if (CAN_SIGN_EXTEND_8_32(imm
)) {
303 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADC
, addr
);
304 m_formatter
.immediate8(imm
);
306 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADC
, addr
);
307 m_formatter
.immediate32(imm
);
312 void addl_rr(RegisterID src
, RegisterID dst
)
314 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, dst
);
317 void addl_mr(int offset
, RegisterID base
, RegisterID dst
)
319 m_formatter
.oneByteOp(OP_ADD_GvEv
, dst
, base
, offset
);
322 void addl_rm(RegisterID src
, int offset
, RegisterID base
)
324 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, base
, offset
);
327 void addl_ir(int imm
, RegisterID dst
)
329 if (CAN_SIGN_EXTEND_8_32(imm
)) {
330 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
331 m_formatter
.immediate8(imm
);
333 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
334 m_formatter
.immediate32(imm
);
338 void addl_im(int imm
, int offset
, RegisterID base
)
340 if (CAN_SIGN_EXTEND_8_32(imm
)) {
341 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
342 m_formatter
.immediate8(imm
);
344 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
345 m_formatter
.immediate32(imm
);
350 void addq_rr(RegisterID src
, RegisterID dst
)
352 m_formatter
.oneByteOp64(OP_ADD_EvGv
, src
, dst
);
355 void addq_ir(int imm
, RegisterID dst
)
357 if (CAN_SIGN_EXTEND_8_32(imm
)) {
358 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
359 m_formatter
.immediate8(imm
);
361 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
362 m_formatter
.immediate32(imm
);
366 void addq_im(int imm
, int offset
, RegisterID base
)
368 if (CAN_SIGN_EXTEND_8_32(imm
)) {
369 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
370 m_formatter
.immediate8(imm
);
372 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
373 m_formatter
.immediate32(imm
);
377 void addl_im(int imm
, void* addr
)
379 if (CAN_SIGN_EXTEND_8_32(imm
)) {
380 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, addr
);
381 m_formatter
.immediate8(imm
);
383 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, addr
);
384 m_formatter
.immediate32(imm
);
389 void andl_rr(RegisterID src
, RegisterID dst
)
391 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, dst
);
394 void andl_mr(int offset
, RegisterID base
, RegisterID dst
)
396 m_formatter
.oneByteOp(OP_AND_GvEv
, dst
, base
, offset
);
399 void andl_rm(RegisterID src
, int offset
, RegisterID base
)
401 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, base
, offset
);
404 void andl_ir(int imm
, RegisterID dst
)
406 if (CAN_SIGN_EXTEND_8_32(imm
)) {
407 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
408 m_formatter
.immediate8(imm
);
410 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
411 m_formatter
.immediate32(imm
);
415 void andl_im(int imm
, int offset
, RegisterID base
)
417 if (CAN_SIGN_EXTEND_8_32(imm
)) {
418 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, base
, offset
);
419 m_formatter
.immediate8(imm
);
421 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, base
, offset
);
422 m_formatter
.immediate32(imm
);
427 void andq_rr(RegisterID src
, RegisterID dst
)
429 m_formatter
.oneByteOp64(OP_AND_EvGv
, src
, dst
);
432 void andq_ir(int imm
, RegisterID dst
)
434 if (CAN_SIGN_EXTEND_8_32(imm
)) {
435 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
436 m_formatter
.immediate8(imm
);
438 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
439 m_formatter
.immediate32(imm
);
443 void andl_im(int imm
, void* addr
)
445 if (CAN_SIGN_EXTEND_8_32(imm
)) {
446 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, addr
);
447 m_formatter
.immediate8(imm
);
449 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, addr
);
450 m_formatter
.immediate32(imm
);
455 void negl_r(RegisterID dst
)
457 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, dst
);
460 void negl_m(int offset
, RegisterID base
)
462 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, base
, offset
);
465 void notl_r(RegisterID dst
)
467 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, dst
);
470 void notl_m(int offset
, RegisterID base
)
472 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, base
, offset
);
475 void orl_rr(RegisterID src
, RegisterID dst
)
477 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, dst
);
480 void orl_mr(int offset
, RegisterID base
, RegisterID dst
)
482 m_formatter
.oneByteOp(OP_OR_GvEv
, dst
, base
, offset
);
485 void orl_rm(RegisterID src
, int offset
, RegisterID base
)
487 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, base
, offset
);
490 void orl_ir(int imm
, RegisterID dst
)
492 if (CAN_SIGN_EXTEND_8_32(imm
)) {
493 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
494 m_formatter
.immediate8(imm
);
496 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
497 m_formatter
.immediate32(imm
);
501 void orl_im(int imm
, int offset
, RegisterID base
)
503 if (CAN_SIGN_EXTEND_8_32(imm
)) {
504 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, base
, offset
);
505 m_formatter
.immediate8(imm
);
507 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, base
, offset
);
508 m_formatter
.immediate32(imm
);
513 void orq_rr(RegisterID src
, RegisterID dst
)
515 m_formatter
.oneByteOp64(OP_OR_EvGv
, src
, dst
);
518 void orq_ir(int imm
, RegisterID dst
)
520 if (CAN_SIGN_EXTEND_8_32(imm
)) {
521 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
522 m_formatter
.immediate8(imm
);
524 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
525 m_formatter
.immediate32(imm
);
529 void orl_im(int imm
, void* addr
)
531 if (CAN_SIGN_EXTEND_8_32(imm
)) {
532 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, addr
);
533 m_formatter
.immediate8(imm
);
535 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, addr
);
536 m_formatter
.immediate32(imm
);
541 void subl_rr(RegisterID src
, RegisterID dst
)
543 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, dst
);
546 void subl_mr(int offset
, RegisterID base
, RegisterID dst
)
548 m_formatter
.oneByteOp(OP_SUB_GvEv
, dst
, base
, offset
);
551 void subl_rm(RegisterID src
, int offset
, RegisterID base
)
553 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, base
, offset
);
556 void subl_ir(int imm
, RegisterID dst
)
558 if (CAN_SIGN_EXTEND_8_32(imm
)) {
559 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
560 m_formatter
.immediate8(imm
);
562 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
563 m_formatter
.immediate32(imm
);
567 void subl_im(int imm
, int offset
, RegisterID base
)
569 if (CAN_SIGN_EXTEND_8_32(imm
)) {
570 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, base
, offset
);
571 m_formatter
.immediate8(imm
);
573 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, base
, offset
);
574 m_formatter
.immediate32(imm
);
579 void subq_rr(RegisterID src
, RegisterID dst
)
581 m_formatter
.oneByteOp64(OP_SUB_EvGv
, src
, dst
);
584 void subq_ir(int imm
, RegisterID dst
)
586 if (CAN_SIGN_EXTEND_8_32(imm
)) {
587 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
588 m_formatter
.immediate8(imm
);
590 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
591 m_formatter
.immediate32(imm
);
595 void subl_im(int imm
, void* addr
)
597 if (CAN_SIGN_EXTEND_8_32(imm
)) {
598 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, addr
);
599 m_formatter
.immediate8(imm
);
601 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, addr
);
602 m_formatter
.immediate32(imm
);
607 void xorl_rr(RegisterID src
, RegisterID dst
)
609 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, dst
);
612 void xorl_mr(int offset
, RegisterID base
, RegisterID dst
)
614 m_formatter
.oneByteOp(OP_XOR_GvEv
, dst
, base
, offset
);
617 void xorl_rm(RegisterID src
, int offset
, RegisterID base
)
619 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, base
, offset
);
622 void xorl_im(int imm
, int offset
, RegisterID base
)
624 if (CAN_SIGN_EXTEND_8_32(imm
)) {
625 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, base
, offset
);
626 m_formatter
.immediate8(imm
);
628 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, base
, offset
);
629 m_formatter
.immediate32(imm
);
633 void xorl_ir(int imm
, RegisterID dst
)
635 if (CAN_SIGN_EXTEND_8_32(imm
)) {
636 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
637 m_formatter
.immediate8(imm
);
639 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
640 m_formatter
.immediate32(imm
);
645 void xorq_rr(RegisterID src
, RegisterID dst
)
647 m_formatter
.oneByteOp64(OP_XOR_EvGv
, src
, dst
);
650 void xorq_ir(int imm
, RegisterID dst
)
652 if (CAN_SIGN_EXTEND_8_32(imm
)) {
653 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
654 m_formatter
.immediate8(imm
);
656 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
657 m_formatter
.immediate32(imm
);
662 void sarl_i8r(int imm
, RegisterID dst
)
665 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
667 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
668 m_formatter
.immediate8(imm
);
672 void sarl_CLr(RegisterID dst
)
674 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
677 void shll_i8r(int imm
, RegisterID dst
)
680 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHL
, dst
);
682 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHL
, dst
);
683 m_formatter
.immediate8(imm
);
687 void shll_CLr(RegisterID dst
)
689 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHL
, dst
);
693 void sarq_CLr(RegisterID dst
)
695 m_formatter
.oneByteOp64(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
698 void sarq_i8r(int imm
, RegisterID dst
)
701 m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
703 m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
704 m_formatter
.immediate8(imm
);
709 void imull_rr(RegisterID src
, RegisterID dst
)
711 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, src
);
714 void imull_mr(int offset
, RegisterID base
, RegisterID dst
)
716 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, base
, offset
);
719 void imull_i32r(RegisterID src
, int32_t value
, RegisterID dst
)
721 m_formatter
.oneByteOp(OP_IMUL_GvEvIz
, dst
, src
);
722 m_formatter
.immediate32(value
);
725 void idivl_r(RegisterID dst
)
727 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_IDIV
, dst
);
732 void cmpl_rr(RegisterID src
, RegisterID dst
)
734 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, dst
);
737 void cmpl_rm(RegisterID src
, int offset
, RegisterID base
)
739 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, offset
);
742 void cmpl_mr(int offset
, RegisterID base
, RegisterID src
)
744 m_formatter
.oneByteOp(OP_CMP_GvEv
, src
, base
, offset
);
747 void cmpl_ir(int imm
, RegisterID dst
)
749 if (CAN_SIGN_EXTEND_8_32(imm
)) {
750 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
751 m_formatter
.immediate8(imm
);
753 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
754 m_formatter
.immediate32(imm
);
758 void cmpl_ir_force32(int imm
, RegisterID dst
)
760 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
761 m_formatter
.immediate32(imm
);
764 void cmpl_im(int imm
, int offset
, RegisterID base
)
766 if (CAN_SIGN_EXTEND_8_32(imm
)) {
767 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
768 m_formatter
.immediate8(imm
);
770 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
771 m_formatter
.immediate32(imm
);
775 void cmpl_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
777 if (CAN_SIGN_EXTEND_8_32(imm
)) {
778 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
779 m_formatter
.immediate8(imm
);
781 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
782 m_formatter
.immediate32(imm
);
786 void cmpl_im_force32(int imm
, int offset
, RegisterID base
)
788 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
789 m_formatter
.immediate32(imm
);
793 void cmpq_rr(RegisterID src
, RegisterID dst
)
795 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, dst
);
798 void cmpq_rm(RegisterID src
, int offset
, RegisterID base
)
800 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, base
, offset
);
803 void cmpq_mr(int offset
, RegisterID base
, RegisterID src
)
805 m_formatter
.oneByteOp64(OP_CMP_GvEv
, src
, base
, offset
);
808 void cmpq_ir(int imm
, RegisterID dst
)
810 if (CAN_SIGN_EXTEND_8_32(imm
)) {
811 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
812 m_formatter
.immediate8(imm
);
814 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
815 m_formatter
.immediate32(imm
);
819 void cmpq_im(int imm
, int offset
, RegisterID base
)
821 if (CAN_SIGN_EXTEND_8_32(imm
)) {
822 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
823 m_formatter
.immediate8(imm
);
825 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
826 m_formatter
.immediate32(imm
);
830 void cmpq_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
832 if (CAN_SIGN_EXTEND_8_32(imm
)) {
833 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
834 m_formatter
.immediate8(imm
);
836 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
837 m_formatter
.immediate32(imm
);
841 void cmpl_rm(RegisterID reg
, void* addr
)
843 m_formatter
.oneByteOp(OP_CMP_EvGv
, reg
, addr
);
846 void cmpl_im(int imm
, void* addr
)
848 if (CAN_SIGN_EXTEND_8_32(imm
)) {
849 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, addr
);
850 m_formatter
.immediate8(imm
);
852 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, addr
);
853 m_formatter
.immediate32(imm
);
858 void cmpw_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
860 m_formatter
.prefix(PRE_OPERAND_SIZE
);
861 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, index
, scale
, offset
);
864 void cmpw_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
866 if (CAN_SIGN_EXTEND_8_32(imm
)) {
867 m_formatter
.prefix(PRE_OPERAND_SIZE
);
868 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
869 m_formatter
.immediate8(imm
);
871 m_formatter
.prefix(PRE_OPERAND_SIZE
);
872 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
873 m_formatter
.immediate16(imm
);
877 void testl_rr(RegisterID src
, RegisterID dst
)
879 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
882 void testl_i32r(int imm
, RegisterID dst
)
884 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
885 m_formatter
.immediate32(imm
);
888 void testl_i32m(int imm
, int offset
, RegisterID base
)
890 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
891 m_formatter
.immediate32(imm
);
894 void testl_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
896 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
897 m_formatter
.immediate32(imm
);
901 void testq_rr(RegisterID src
, RegisterID dst
)
903 m_formatter
.oneByteOp64(OP_TEST_EvGv
, src
, dst
);
906 void testq_i32r(int imm
, RegisterID dst
)
908 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
909 m_formatter
.immediate32(imm
);
912 void testq_i32m(int imm
, int offset
, RegisterID base
)
914 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
915 m_formatter
.immediate32(imm
);
918 void testq_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
920 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
921 m_formatter
.immediate32(imm
);
925 void testw_rr(RegisterID src
, RegisterID dst
)
927 m_formatter
.prefix(PRE_OPERAND_SIZE
);
928 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
931 void testb_i8r(int imm
, RegisterID dst
)
933 m_formatter
.oneByteOp8(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, dst
);
934 m_formatter
.immediate8(imm
);
937 void setCC_r(Condition cond
, RegisterID dst
)
939 m_formatter
.twoByteOp8(setccOpcode(cond
), (GroupOpcodeID
)0, dst
);
942 void sete_r(RegisterID dst
)
944 m_formatter
.twoByteOp8(setccOpcode(ConditionE
), (GroupOpcodeID
)0, dst
);
947 void setz_r(RegisterID dst
)
952 void setne_r(RegisterID dst
)
954 m_formatter
.twoByteOp8(setccOpcode(ConditionNE
), (GroupOpcodeID
)0, dst
);
957 void setnz_r(RegisterID dst
)
966 m_formatter
.oneByteOp(OP_CDQ
);
969 void xchgl_rr(RegisterID src
, RegisterID dst
)
971 m_formatter
.oneByteOp(OP_XCHG_EvGv
, src
, dst
);
975 void xchgq_rr(RegisterID src
, RegisterID dst
)
977 m_formatter
.oneByteOp64(OP_XCHG_EvGv
, src
, dst
);
981 void movl_rr(RegisterID src
, RegisterID dst
)
983 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, dst
);
986 void movl_rm(RegisterID src
, int offset
, RegisterID base
)
988 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, offset
);
991 void movl_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
993 m_formatter
.oneByteOp_disp32(OP_MOV_EvGv
, src
, base
, offset
);
996 void movl_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
998 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1001 void movl_mEAX(void* addr
)
1003 m_formatter
.oneByteOp(OP_MOV_EAXOv
);
1005 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1007 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1011 void movl_mr(int offset
, RegisterID base
, RegisterID dst
)
1013 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, offset
);
1016 void movl_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1018 m_formatter
.oneByteOp_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1021 void movl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1023 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1026 void movl_i32r(int imm
, RegisterID dst
)
1028 m_formatter
.oneByteOp(OP_MOV_EAXIv
, dst
);
1029 m_formatter
.immediate32(imm
);
1032 void movl_i32m(int imm
, int offset
, RegisterID base
)
1034 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1035 m_formatter
.immediate32(imm
);
1038 void movl_EAXm(void* addr
)
1040 m_formatter
.oneByteOp(OP_MOV_OvEAX
);
1042 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1044 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1049 void movq_rr(RegisterID src
, RegisterID dst
)
1051 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, dst
);
1054 void movq_rm(RegisterID src
, int offset
, RegisterID base
)
1056 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, offset
);
1059 void movq_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
1061 m_formatter
.oneByteOp64_disp32(OP_MOV_EvGv
, src
, base
, offset
);
1064 void movq_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1066 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1069 void movq_mEAX(void* addr
)
1071 m_formatter
.oneByteOp64(OP_MOV_EAXOv
);
1072 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1075 void movq_EAXm(void* addr
)
1077 m_formatter
.oneByteOp64(OP_MOV_OvEAX
);
1078 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1081 void movq_mr(int offset
, RegisterID base
, RegisterID dst
)
1083 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, offset
);
1086 void movq_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1088 m_formatter
.oneByteOp64_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1091 void movq_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1093 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1096 void movq_i32m(int imm
, int offset
, RegisterID base
)
1098 m_formatter
.oneByteOp64(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1099 m_formatter
.immediate32(imm
);
1102 void movq_i64r(int64_t imm
, RegisterID dst
)
1104 m_formatter
.oneByteOp64(OP_MOV_EAXIv
, dst
);
1105 m_formatter
.immediate64(imm
);
1108 void movsxd_rr(RegisterID src
, RegisterID dst
)
1110 m_formatter
.oneByteOp64(OP_MOVSXD_GvEv
, dst
, src
);
1115 void movl_rm(RegisterID src
, void* addr
)
1117 if (src
== X86Registers::eax
)
1120 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, addr
);
1123 void movl_mr(void* addr
, RegisterID dst
)
1125 if (dst
== X86Registers::eax
)
1128 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, addr
);
1131 void movl_i32m(int imm
, void* addr
)
1133 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, addr
);
1134 m_formatter
.immediate32(imm
);
1138 void movzwl_mr(int offset
, RegisterID base
, RegisterID dst
)
1140 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, offset
);
1143 void movzwl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1145 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, index
, scale
, offset
);
1148 void movzbl_rr(RegisterID src
, RegisterID dst
)
1150 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1151 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1152 // REX prefixes are defined to be silently ignored by the processor.
1153 m_formatter
.twoByteOp8(OP2_MOVZX_GvEb
, dst
, src
);
1156 void leal_mr(int offset
, RegisterID base
, RegisterID dst
)
1158 m_formatter
.oneByteOp(OP_LEA
, dst
, base
, offset
);
1161 void leaq_mr(int offset
, RegisterID base
, RegisterID dst
)
1163 m_formatter
.oneByteOp64(OP_LEA
, dst
, base
, offset
);
1171 m_formatter
.oneByteOp(OP_CALL_rel32
);
1172 return m_formatter
.immediateRel32();
1175 JmpSrc
call(RegisterID dst
)
1177 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, dst
);
1178 return JmpSrc(m_formatter
.size());
1181 void call_m(int offset
, RegisterID base
)
1183 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, base
, offset
);
1188 m_formatter
.oneByteOp(OP_JMP_rel32
);
1189 return m_formatter
.immediateRel32();
1192 // Return a JmpSrc so we have a label to the jump, so we can use this
1193 // To make a tail recursive call on x86-64. The MacroAssembler
1194 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1195 JmpSrc
jmp_r(RegisterID dst
)
1197 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, dst
);
1198 return JmpSrc(m_formatter
.size());
1201 void jmp_m(int offset
, RegisterID base
)
1203 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, base
, offset
);
1208 m_formatter
.twoByteOp(jccRel32(ConditionNE
));
1209 return m_formatter
.immediateRel32();
1219 m_formatter
.twoByteOp(jccRel32(ConditionE
));
1220 return m_formatter
.immediateRel32();
1230 m_formatter
.twoByteOp(jccRel32(ConditionL
));
1231 return m_formatter
.immediateRel32();
1236 m_formatter
.twoByteOp(jccRel32(ConditionB
));
1237 return m_formatter
.immediateRel32();
1242 m_formatter
.twoByteOp(jccRel32(ConditionLE
));
1243 return m_formatter
.immediateRel32();
1248 m_formatter
.twoByteOp(jccRel32(ConditionBE
));
1249 return m_formatter
.immediateRel32();
1254 m_formatter
.twoByteOp(jccRel32(ConditionGE
));
1255 return m_formatter
.immediateRel32();
1260 m_formatter
.twoByteOp(jccRel32(ConditionG
));
1261 return m_formatter
.immediateRel32();
1266 m_formatter
.twoByteOp(jccRel32(ConditionA
));
1267 return m_formatter
.immediateRel32();
1272 m_formatter
.twoByteOp(jccRel32(ConditionAE
));
1273 return m_formatter
.immediateRel32();
1278 m_formatter
.twoByteOp(jccRel32(ConditionO
));
1279 return m_formatter
.immediateRel32();
1284 m_formatter
.twoByteOp(jccRel32(ConditionP
));
1285 return m_formatter
.immediateRel32();
1290 m_formatter
.twoByteOp(jccRel32(ConditionS
));
1291 return m_formatter
.immediateRel32();
1294 JmpSrc
jCC(Condition cond
)
1296 m_formatter
.twoByteOp(jccRel32(cond
));
1297 return m_formatter
.immediateRel32();
1302 void addsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1304 m_formatter
.prefix(PRE_SSE_F2
);
1305 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1308 void addsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1310 m_formatter
.prefix(PRE_SSE_F2
);
1311 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1314 void cvtsi2sd_rr(RegisterID src
, XMMRegisterID dst
)
1316 m_formatter
.prefix(PRE_SSE_F2
);
1317 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, src
);
1320 void cvtsi2sd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1322 m_formatter
.prefix(PRE_SSE_F2
);
1323 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, base
, offset
);
1327 void cvtsi2sd_mr(void* address
, XMMRegisterID dst
)
1329 m_formatter
.prefix(PRE_SSE_F2
);
1330 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, address
);
1334 void cvttsd2si_rr(XMMRegisterID src
, RegisterID dst
)
1336 m_formatter
.prefix(PRE_SSE_F2
);
1337 m_formatter
.twoByteOp(OP2_CVTTSD2SI_GdWsd
, dst
, (RegisterID
)src
);
1340 void movd_rr(XMMRegisterID src
, RegisterID dst
)
1342 m_formatter
.prefix(PRE_SSE_66
);
1343 m_formatter
.twoByteOp(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1347 void movq_rr(XMMRegisterID src
, RegisterID dst
)
1349 m_formatter
.prefix(PRE_SSE_66
);
1350 m_formatter
.twoByteOp64(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1353 void movq_rr(RegisterID src
, XMMRegisterID dst
)
1355 m_formatter
.prefix(PRE_SSE_66
);
1356 m_formatter
.twoByteOp64(OP2_MOVD_VdEd
, (RegisterID
)dst
, src
);
1360 void movsd_rm(XMMRegisterID src
, int offset
, RegisterID base
)
1362 m_formatter
.prefix(PRE_SSE_F2
);
1363 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, offset
);
1366 void movsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1368 m_formatter
.prefix(PRE_SSE_F2
);
1369 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1373 void movsd_mr(void* address
, XMMRegisterID dst
)
1375 m_formatter
.prefix(PRE_SSE_F2
);
1376 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, address
);
1380 void mulsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1382 m_formatter
.prefix(PRE_SSE_F2
);
1383 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1386 void mulsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1388 m_formatter
.prefix(PRE_SSE_F2
);
1389 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1392 void pextrw_irr(int whichWord
, XMMRegisterID src
, RegisterID dst
)
1394 m_formatter
.prefix(PRE_SSE_66
);
1395 m_formatter
.twoByteOp(OP2_PEXTRW_GdUdIb
, (RegisterID
)dst
, (RegisterID
)src
);
1396 m_formatter
.immediate8(whichWord
);
1399 void subsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1401 m_formatter
.prefix(PRE_SSE_F2
);
1402 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1405 void subsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1407 m_formatter
.prefix(PRE_SSE_F2
);
1408 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1411 void ucomisd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1413 m_formatter
.prefix(PRE_SSE_66
);
1414 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1417 void ucomisd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1419 m_formatter
.prefix(PRE_SSE_66
);
1420 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1423 void divsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1425 m_formatter
.prefix(PRE_SSE_F2
);
1426 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1429 void divsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1431 m_formatter
.prefix(PRE_SSE_F2
);
1432 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1435 void xorpd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1437 m_formatter
.prefix(PRE_SSE_66
);
1438 m_formatter
.twoByteOp(OP2_XORPD_VpdWpd
, (RegisterID
)dst
, (RegisterID
)src
);
1441 // Misc instructions:
1445 m_formatter
.oneByteOp(OP_INT3
);
1450 m_formatter
.oneByteOp(OP_RET
);
1453 void predictNotTaken()
1455 m_formatter
.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN
);
1458 // Assembler admin methods:
1462 return JmpDst(m_formatter
.size());
1465 static JmpDst
labelFor(JmpSrc jump
, intptr_t offset
= 0)
1467 return JmpDst(jump
.m_offset
+ offset
);
1470 JmpDst
align(int alignment
)
1472 while (!m_formatter
.isAligned(alignment
))
1473 m_formatter
.oneByteOp(OP_HLT
);
1478 // Linking & patching:
1480 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1481 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1482 // code has been finalized it is (platform support permitting) within a non-
1483 // writable region of memory; to modify the code in an execute-only execuable
1484 // pool the 'repatch' and 'relink' methods should be used.
1486 void linkJump(JmpSrc from
, JmpDst to
)
1488 ASSERT(from
.m_offset
!= -1);
1489 ASSERT(to
.m_offset
!= -1);
1491 char* code
= reinterpret_cast<char*>(m_formatter
.data());
1492 setRel32(code
+ from
.m_offset
, code
+ to
.m_offset
);
1495 static void linkJump(void* code
, JmpSrc from
, void* to
)
1497 ASSERT(from
.m_offset
!= -1);
1499 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
1502 static void linkCall(void* code
, JmpSrc from
, void* to
)
1504 ASSERT(from
.m_offset
!= -1);
1506 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
1509 static void linkPointer(void* code
, JmpDst where
, void* value
)
1511 ASSERT(where
.m_offset
!= -1);
1513 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
);
1516 static void relinkJump(void* from
, void* to
)
1521 static void relinkCall(void* from
, void* to
)
1526 static void repatchInt32(void* where
, int32_t value
)
1528 setInt32(where
, value
);
1531 static void repatchPointer(void* where
, void* value
)
1533 setPointer(where
, value
);
1536 static void repatchLoadPtrToLEA(void* where
)
1539 // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
1540 // Skip over the prefix byte.
1541 where
= reinterpret_cast<char*>(where
) + 1;
1543 *reinterpret_cast<unsigned char*>(where
) = static_cast<unsigned char>(OP_LEA
);
1546 static unsigned getCallReturnOffset(JmpSrc call
)
1548 ASSERT(call
.m_offset
>= 0);
1549 return call
.m_offset
;
1552 static void* getRelocatedAddress(void* code
, JmpSrc jump
)
1554 ASSERT(jump
.m_offset
!= -1);
1556 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + jump
.m_offset
);
1559 static void* getRelocatedAddress(void* code
, JmpDst destination
)
1561 ASSERT(destination
.m_offset
!= -1);
1563 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + destination
.m_offset
);
1566 static int getDifferenceBetweenLabels(JmpDst src
, JmpDst dst
)
1568 return dst
.m_offset
- src
.m_offset
;
1571 static int getDifferenceBetweenLabels(JmpDst src
, JmpSrc dst
)
1573 return dst
.m_offset
- src
.m_offset
;
1576 static int getDifferenceBetweenLabels(JmpSrc src
, JmpDst dst
)
1578 return dst
.m_offset
- src
.m_offset
;
1581 void* executableCopy(ExecutablePool
* allocator
)
1583 void* copy
= m_formatter
.executableCopy(allocator
);
1590 static void setPointer(void* where
, void* value
)
1592 reinterpret_cast<void**>(where
)[-1] = value
;
1595 static void setInt32(void* where
, int32_t value
)
1597 reinterpret_cast<int32_t*>(where
)[-1] = value
;
1600 static void setRel32(void* from
, void* to
)
1602 intptr_t offset
= reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
);
1603 ASSERT(offset
== static_cast<int32_t>(offset
));
1605 setInt32(from
, offset
);
1608 class X86InstructionFormatter
{
1610 static const int maxInstructionSize
= 16;
1614 // Legacy prefix bytes:
1616 // These are emmitted prior to the instruction.
1618 void prefix(OneByteOpcodeID pre
)
1620 m_buffer
.putByte(pre
);
1623 // Word-sized operands / no operand instruction formatters.
1625 // In addition to the opcode, the following operand permutations are supported:
1626 // * None - instruction takes no operands.
1627 // * One register - the low three bits of the RegisterID are added into the opcode.
1628 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1629 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1630 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1632 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1633 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1635 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1637 void oneByteOp(OneByteOpcodeID opcode
)
1639 m_buffer
.ensureSpace(maxInstructionSize
);
1640 m_buffer
.putByteUnchecked(opcode
);
1643 void oneByteOp(OneByteOpcodeID opcode
, RegisterID reg
)
1645 m_buffer
.ensureSpace(maxInstructionSize
);
1646 emitRexIfNeeded(0, 0, reg
);
1647 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
1650 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
1652 m_buffer
.ensureSpace(maxInstructionSize
);
1653 emitRexIfNeeded(reg
, 0, rm
);
1654 m_buffer
.putByteUnchecked(opcode
);
1655 registerModRM(reg
, rm
);
1658 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1660 m_buffer
.ensureSpace(maxInstructionSize
);
1661 emitRexIfNeeded(reg
, 0, base
);
1662 m_buffer
.putByteUnchecked(opcode
);
1663 memoryModRM(reg
, base
, offset
);
1666 void oneByteOp_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1668 m_buffer
.ensureSpace(maxInstructionSize
);
1669 emitRexIfNeeded(reg
, 0, base
);
1670 m_buffer
.putByteUnchecked(opcode
);
1671 memoryModRM_disp32(reg
, base
, offset
);
1674 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1676 m_buffer
.ensureSpace(maxInstructionSize
);
1677 emitRexIfNeeded(reg
, index
, base
);
1678 m_buffer
.putByteUnchecked(opcode
);
1679 memoryModRM(reg
, base
, index
, scale
, offset
);
1683 void oneByteOp(OneByteOpcodeID opcode
, int reg
, void* address
)
1685 m_buffer
.ensureSpace(maxInstructionSize
);
1686 m_buffer
.putByteUnchecked(opcode
);
1687 memoryModRM(reg
, address
);
1691 void twoByteOp(TwoByteOpcodeID opcode
)
1693 m_buffer
.ensureSpace(maxInstructionSize
);
1694 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1695 m_buffer
.putByteUnchecked(opcode
);
1698 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
1700 m_buffer
.ensureSpace(maxInstructionSize
);
1701 emitRexIfNeeded(reg
, 0, rm
);
1702 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1703 m_buffer
.putByteUnchecked(opcode
);
1704 registerModRM(reg
, rm
);
1707 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1709 m_buffer
.ensureSpace(maxInstructionSize
);
1710 emitRexIfNeeded(reg
, 0, base
);
1711 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1712 m_buffer
.putByteUnchecked(opcode
);
1713 memoryModRM(reg
, base
, offset
);
1716 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1718 m_buffer
.ensureSpace(maxInstructionSize
);
1719 emitRexIfNeeded(reg
, index
, base
);
1720 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1721 m_buffer
.putByteUnchecked(opcode
);
1722 memoryModRM(reg
, base
, index
, scale
, offset
);
1726 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, void* address
)
1728 m_buffer
.ensureSpace(maxInstructionSize
);
1729 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1730 m_buffer
.putByteUnchecked(opcode
);
1731 memoryModRM(reg
, address
);
1736 // Quad-word-sized operands:
1738 // Used to format 64-bit operantions, planting a REX.w prefix.
1739 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1740 // the normal (non-'64'-postfixed) formatters should be used.
1742 void oneByteOp64(OneByteOpcodeID opcode
)
1744 m_buffer
.ensureSpace(maxInstructionSize
);
1746 m_buffer
.putByteUnchecked(opcode
);
1749 void oneByteOp64(OneByteOpcodeID opcode
, RegisterID reg
)
1751 m_buffer
.ensureSpace(maxInstructionSize
);
1752 emitRexW(0, 0, reg
);
1753 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
1756 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
1758 m_buffer
.ensureSpace(maxInstructionSize
);
1759 emitRexW(reg
, 0, rm
);
1760 m_buffer
.putByteUnchecked(opcode
);
1761 registerModRM(reg
, rm
);
1764 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1766 m_buffer
.ensureSpace(maxInstructionSize
);
1767 emitRexW(reg
, 0, base
);
1768 m_buffer
.putByteUnchecked(opcode
);
1769 memoryModRM(reg
, base
, offset
);
1772 void oneByteOp64_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1774 m_buffer
.ensureSpace(maxInstructionSize
);
1775 emitRexW(reg
, 0, base
);
1776 m_buffer
.putByteUnchecked(opcode
);
1777 memoryModRM_disp32(reg
, base
, offset
);
1780 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1782 m_buffer
.ensureSpace(maxInstructionSize
);
1783 emitRexW(reg
, index
, base
);
1784 m_buffer
.putByteUnchecked(opcode
);
1785 memoryModRM(reg
, base
, index
, scale
, offset
);
1788 void twoByteOp64(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
1790 m_buffer
.ensureSpace(maxInstructionSize
);
1791 emitRexW(reg
, 0, rm
);
1792 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1793 m_buffer
.putByteUnchecked(opcode
);
1794 registerModRM(reg
, rm
);
1800 // These methods format byte operations. Byte operations differ from the normal
1801 // formatters in the circumstances under which they will decide to emit REX prefixes.
1802 // These should be used where any register operand signifies a byte register.
1804 // The disctinction is due to the handling of register numbers in the range 4..7 on
1805 // x86-64. These register numbers may either represent the second byte of the first
1806 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1808 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1809 // be accessed where a REX prefix is present), these are likely best treated as
1810 // deprecated. In order to ensure the correct registers spl..dil are selected a
1811 // REX prefix will be emitted for any byte register operand in the range 4..15.
1813 // These formatters may be used in instructions where a mix of operand sizes, in which
1814 // case an unnecessary REX will be emitted, for example:
1816 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1817 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1818 // be silently ignored by the processor.
1820 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1821 // is provided to check byte register operands.
1823 void oneByteOp8(OneByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
1825 m_buffer
.ensureSpace(maxInstructionSize
);
1826 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
1827 m_buffer
.putByteUnchecked(opcode
);
1828 registerModRM(groupOp
, rm
);
1831 void twoByteOp8(TwoByteOpcodeID opcode
, RegisterID reg
, RegisterID rm
)
1833 m_buffer
.ensureSpace(maxInstructionSize
);
1834 emitRexIf(byteRegRequiresRex(reg
)|byteRegRequiresRex(rm
), reg
, 0, rm
);
1835 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1836 m_buffer
.putByteUnchecked(opcode
);
1837 registerModRM(reg
, rm
);
1840 void twoByteOp8(TwoByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
1842 m_buffer
.ensureSpace(maxInstructionSize
);
1843 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
1844 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1845 m_buffer
.putByteUnchecked(opcode
);
1846 registerModRM(groupOp
, rm
);
1851 // An immedaite should be appended where appropriate after an op has been emitted.
1852 // The writes are unchecked since the opcode formatters above will have ensured space.
1854 void immediate8(int imm
)
1856 m_buffer
.putByteUnchecked(imm
);
1859 void immediate16(int imm
)
1861 m_buffer
.putShortUnchecked(imm
);
1864 void immediate32(int imm
)
1866 m_buffer
.putIntUnchecked(imm
);
1869 void immediate64(int64_t imm
)
1871 m_buffer
.putInt64Unchecked(imm
);
1874 JmpSrc
immediateRel32()
1876 m_buffer
.putIntUnchecked(0);
1877 return JmpSrc(m_buffer
.size());
1880 // Administrative methods:
1882 size_t size() const { return m_buffer
.size(); }
1883 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
1884 void* data() const { return m_buffer
.data(); }
1885 void* executableCopy(ExecutablePool
* allocator
) { return m_buffer
.executableCopy(allocator
); }
1889 // Internals; ModRm and REX formatters.
1891 static const RegisterID noBase
= X86Registers::ebp
;
1892 static const RegisterID hasSib
= X86Registers::esp
;
1893 static const RegisterID noIndex
= X86Registers::esp
;
1895 static const RegisterID noBase2
= X86Registers::r13
;
1896 static const RegisterID hasSib2
= X86Registers::r12
;
1898 // Registers r8 & above require a REX prefixe.
1899 inline bool regRequiresRex(int reg
)
1901 return (reg
>= X86Registers::r8
);
1904 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1905 inline bool byteRegRequiresRex(int reg
)
1907 return (reg
>= X86Registers::esp
);
1910 // Format a REX prefix byte.
1911 inline void emitRex(bool w
, int r
, int x
, int b
)
1913 m_buffer
.putByteUnchecked(PRE_REX
| ((int)w
<< 3) | ((r
>>3)<<2) | ((x
>>3)<<1) | (b
>>3));
1916 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1917 inline void emitRexW(int r
, int x
, int b
)
1919 emitRex(true, r
, x
, b
);
1922 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1923 // regRequiresRex() to check other registers (i.e. address base & index).
1924 inline void emitRexIf(bool condition
, int r
, int x
, int b
)
1926 if (condition
) emitRex(false, r
, x
, b
);
1929 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1930 inline void emitRexIfNeeded(int r
, int x
, int b
)
1932 emitRexIf(regRequiresRex(r
) || regRequiresRex(x
) || regRequiresRex(b
), r
, x
, b
);
1935 // No REX prefix bytes on 32-bit x86.
1936 inline bool regRequiresRex(int) { return false; }
1937 inline bool byteRegRequiresRex(int) { return false; }
1938 inline void emitRexIf(bool, int, int, int) {}
1939 inline void emitRexIfNeeded(int, int, int) {}
1949 void putModRm(ModRmMode mode
, int reg
, RegisterID rm
)
1951 m_buffer
.putByteUnchecked((mode
<< 6) | ((reg
& 7) << 3) | (rm
& 7));
1954 void putModRmSib(ModRmMode mode
, int reg
, RegisterID base
, RegisterID index
, int scale
)
1956 ASSERT(mode
!= ModRmRegister
);
1958 putModRm(mode
, reg
, hasSib
);
1959 m_buffer
.putByteUnchecked((scale
<< 6) | ((index
& 7) << 3) | (base
& 7));
1962 void registerModRM(int reg
, RegisterID rm
)
1964 putModRm(ModRmRegister
, reg
, rm
);
1967 void memoryModRM(int reg
, RegisterID base
, int offset
)
1969 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1971 if ((base
== hasSib
) || (base
== hasSib2
)) {
1973 if (base
== hasSib
) {
1975 if (!offset
) // No need to check if the base is noBase, since we know it is hasSib!
1976 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, noIndex
, 0);
1977 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
1978 putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0);
1979 m_buffer
.putByteUnchecked(offset
);
1981 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
1982 m_buffer
.putIntUnchecked(offset
);
1986 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
1988 if (!offset
&& (base
!= noBase
))
1990 putModRm(ModRmMemoryNoDisp
, reg
, base
);
1991 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
1992 putModRm(ModRmMemoryDisp8
, reg
, base
);
1993 m_buffer
.putByteUnchecked(offset
);
1995 putModRm(ModRmMemoryDisp32
, reg
, base
);
1996 m_buffer
.putIntUnchecked(offset
);
2001 void memoryModRM_disp32(int reg
, RegisterID base
, int offset
)
2003 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2005 if ((base
== hasSib
) || (base
== hasSib2
)) {
2007 if (base
== hasSib
) {
2009 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
2010 m_buffer
.putIntUnchecked(offset
);
2012 putModRm(ModRmMemoryDisp32
, reg
, base
);
2013 m_buffer
.putIntUnchecked(offset
);
2017 void memoryModRM(int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2019 ASSERT(index
!= noIndex
);
2022 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
2024 if (!offset
&& (base
!= noBase
))
2026 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, index
, scale
);
2027 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2028 putModRmSib(ModRmMemoryDisp8
, reg
, base
, index
, scale
);
2029 m_buffer
.putByteUnchecked(offset
);
2031 putModRmSib(ModRmMemoryDisp32
, reg
, base
, index
, scale
);
2032 m_buffer
.putIntUnchecked(offset
);
2037 void memoryModRM(int reg
, void* address
)
2039 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2040 putModRm(ModRmMemoryNoDisp
, reg
, noBase
);
2041 m_buffer
.putIntUnchecked(reinterpret_cast<int32_t>(address
));
2045 AssemblerBuffer m_buffer
;
2051 #endif // ENABLE(ASSEMBLER) && CPU(X86)
2053 #endif // X86Assembler_h