2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
29 #include <wtf/Platform.h>
31 #if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
33 #include "AssemblerBuffer.h"
35 #include <wtf/Assertions.h>
36 #include <wtf/Vector.h>
40 inline bool CAN_SIGN_EXTEND_8_32(int32_t value
) { return value
== (int32_t)(signed char)value
; }
42 inline bool CAN_SIGN_EXTEND_32_64(intptr_t value
) { return value
== (intptr_t)(int32_t)value
; }
43 inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value
) { return value
== (intptr_t)(uint32_t)value
; }
83 typedef X86::RegisterID RegisterID
;
84 typedef X86::XMMRegisterID XMMRegisterID
;
85 typedef XMMRegisterID FPRegisterID
;
105 ConditionC
= ConditionB
,
106 ConditionNC
= ConditionAE
,
115 OP_2BYTE_ESCAPE
= 0x0F,
120 PRE_PREDICT_BRANCH_NOT_TAKEN
= 0x2E,
131 OP_MOVSXD_GvEv
= 0x63,
133 PRE_OPERAND_SIZE
= 0x66,
136 OP_IMUL_GvEvIz
= 0x69,
137 OP_GROUP1_EvIz
= 0x81,
138 OP_GROUP1_EvIb
= 0x83,
144 OP_GROUP1A_Ev
= 0x8F,
149 OP_GROUP2_EvIb
= 0xC1,
151 OP_GROUP11_EvIz
= 0xC7,
153 OP_GROUP2_Ev1
= 0xD1,
154 OP_GROUP2_EvCL
= 0xD3,
155 OP_CALL_rel32
= 0xE8,
159 OP_GROUP3_EbIb
= 0xF6,
161 OP_GROUP3_EvIz
= 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
166 OP2_MOVSD_VsdWsd
= 0x10,
167 OP2_MOVSD_WsdVsd
= 0x11,
168 OP2_CVTSI2SD_VsdEd
= 0x2A,
169 OP2_CVTTSD2SI_GdWsd
= 0x2C,
170 OP2_UCOMISD_VsdWsd
= 0x2E,
171 OP2_ADDSD_VsdWsd
= 0x58,
172 OP2_MULSD_VsdWsd
= 0x59,
173 OP2_SUBSD_VsdWsd
= 0x5C,
174 OP2_DIVSD_VsdWsd
= 0x5E,
175 OP2_XORPD_VpdWpd
= 0x57,
176 OP2_MOVD_VdEd
= 0x6E,
177 OP2_MOVD_EdVd
= 0x7E,
178 OP2_JCC_rel32
= 0x80,
180 OP2_IMUL_GvEv
= 0xAF,
181 OP2_MOVZX_GvEb
= 0xB6,
182 OP2_MOVZX_GvEw
= 0xB7,
183 OP2_PEXTRW_GdUdIb
= 0xC5,
186 TwoByteOpcodeID
jccRel32(Condition cond
)
188 return (TwoByteOpcodeID
)(OP2_JCC_rel32
+ cond
);
191 TwoByteOpcodeID
setccOpcode(Condition cond
)
193 return (TwoByteOpcodeID
)(OP_SETCC
+ cond
);
222 class X86InstructionFormatter
;
226 friend class X86Assembler
;
227 friend class X86InstructionFormatter
;
244 friend class X86Assembler
;
245 friend class X86InstructionFormatter
;
253 bool isUsed() const { return m_used
; }
254 void used() { m_used
= true; }
260 ASSERT(m_offset
== offset
);
271 size_t size() const { return m_formatter
.size(); }
275 void push_r(RegisterID reg
)
277 m_formatter
.oneByteOp(OP_PUSH_EAX
, reg
);
280 void pop_r(RegisterID reg
)
282 m_formatter
.oneByteOp(OP_POP_EAX
, reg
);
285 void push_i32(int imm
)
287 m_formatter
.oneByteOp(OP_PUSH_Iz
);
288 m_formatter
.immediate32(imm
);
291 void push_m(int offset
, RegisterID base
)
293 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_PUSH
, base
, offset
);
296 void pop_m(int offset
, RegisterID base
)
298 m_formatter
.oneByteOp(OP_GROUP1A_Ev
, GROUP1A_OP_POP
, base
, offset
);
301 // Arithmetic operations:
303 #if !PLATFORM(X86_64)
304 void adcl_im(int imm
, void* addr
)
306 if (CAN_SIGN_EXTEND_8_32(imm
)) {
307 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADC
, addr
);
308 m_formatter
.immediate8(imm
);
310 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADC
, addr
);
311 m_formatter
.immediate32(imm
);
316 void addl_rr(RegisterID src
, RegisterID dst
)
318 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, dst
);
321 void addl_mr(int offset
, RegisterID base
, RegisterID dst
)
323 m_formatter
.oneByteOp(OP_ADD_GvEv
, dst
, base
, offset
);
326 void addl_rm(RegisterID src
, int offset
, RegisterID base
)
328 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, base
, offset
);
331 void addl_ir(int imm
, RegisterID dst
)
333 if (CAN_SIGN_EXTEND_8_32(imm
)) {
334 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
335 m_formatter
.immediate8(imm
);
337 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
338 m_formatter
.immediate32(imm
);
342 void addl_im(int imm
, int offset
, RegisterID base
)
344 if (CAN_SIGN_EXTEND_8_32(imm
)) {
345 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
346 m_formatter
.immediate8(imm
);
348 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
349 m_formatter
.immediate32(imm
);
354 void addq_rr(RegisterID src
, RegisterID dst
)
356 m_formatter
.oneByteOp64(OP_ADD_EvGv
, src
, dst
);
359 void addq_ir(int imm
, RegisterID dst
)
361 if (CAN_SIGN_EXTEND_8_32(imm
)) {
362 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
363 m_formatter
.immediate8(imm
);
365 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
366 m_formatter
.immediate32(imm
);
370 void addq_im(int imm
, int offset
, RegisterID base
)
372 if (CAN_SIGN_EXTEND_8_32(imm
)) {
373 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
374 m_formatter
.immediate8(imm
);
376 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
377 m_formatter
.immediate32(imm
);
381 void addl_im(int imm
, void* addr
)
383 if (CAN_SIGN_EXTEND_8_32(imm
)) {
384 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, addr
);
385 m_formatter
.immediate8(imm
);
387 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, addr
);
388 m_formatter
.immediate32(imm
);
393 void andl_rr(RegisterID src
, RegisterID dst
)
395 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, dst
);
398 void andl_mr(int offset
, RegisterID base
, RegisterID dst
)
400 m_formatter
.oneByteOp(OP_AND_GvEv
, dst
, base
, offset
);
403 void andl_rm(RegisterID src
, int offset
, RegisterID base
)
405 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, base
, offset
);
408 void andl_ir(int imm
, RegisterID dst
)
410 if (CAN_SIGN_EXTEND_8_32(imm
)) {
411 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
412 m_formatter
.immediate8(imm
);
414 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
415 m_formatter
.immediate32(imm
);
419 void andl_im(int imm
, int offset
, RegisterID base
)
421 if (CAN_SIGN_EXTEND_8_32(imm
)) {
422 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, base
, offset
);
423 m_formatter
.immediate8(imm
);
425 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, base
, offset
);
426 m_formatter
.immediate32(imm
);
431 void andq_rr(RegisterID src
, RegisterID dst
)
433 m_formatter
.oneByteOp64(OP_AND_EvGv
, src
, dst
);
436 void andq_ir(int imm
, RegisterID dst
)
438 if (CAN_SIGN_EXTEND_8_32(imm
)) {
439 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
440 m_formatter
.immediate8(imm
);
442 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
443 m_formatter
.immediate32(imm
);
447 void andl_im(int imm
, void* addr
)
449 if (CAN_SIGN_EXTEND_8_32(imm
)) {
450 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, addr
);
451 m_formatter
.immediate8(imm
);
453 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, addr
);
454 m_formatter
.immediate32(imm
);
459 void negl_r(RegisterID dst
)
461 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, dst
);
464 void negl_m(int offset
, RegisterID base
)
466 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, base
, offset
);
469 void notl_r(RegisterID dst
)
471 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, dst
);
474 void notl_m(int offset
, RegisterID base
)
476 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, base
, offset
);
479 void orl_rr(RegisterID src
, RegisterID dst
)
481 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, dst
);
484 void orl_mr(int offset
, RegisterID base
, RegisterID dst
)
486 m_formatter
.oneByteOp(OP_OR_GvEv
, dst
, base
, offset
);
489 void orl_rm(RegisterID src
, int offset
, RegisterID base
)
491 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, base
, offset
);
494 void orl_ir(int imm
, RegisterID dst
)
496 if (CAN_SIGN_EXTEND_8_32(imm
)) {
497 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
498 m_formatter
.immediate8(imm
);
500 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
501 m_formatter
.immediate32(imm
);
505 void orl_im(int imm
, int offset
, RegisterID base
)
507 if (CAN_SIGN_EXTEND_8_32(imm
)) {
508 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, base
, offset
);
509 m_formatter
.immediate8(imm
);
511 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, base
, offset
);
512 m_formatter
.immediate32(imm
);
517 void orq_rr(RegisterID src
, RegisterID dst
)
519 m_formatter
.oneByteOp64(OP_OR_EvGv
, src
, dst
);
522 void orq_ir(int imm
, RegisterID dst
)
524 if (CAN_SIGN_EXTEND_8_32(imm
)) {
525 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
526 m_formatter
.immediate8(imm
);
528 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
529 m_formatter
.immediate32(imm
);
533 void orl_im(int imm
, void* addr
)
535 if (CAN_SIGN_EXTEND_8_32(imm
)) {
536 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, addr
);
537 m_formatter
.immediate8(imm
);
539 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, addr
);
540 m_formatter
.immediate32(imm
);
545 void subl_rr(RegisterID src
, RegisterID dst
)
547 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, dst
);
550 void subl_mr(int offset
, RegisterID base
, RegisterID dst
)
552 m_formatter
.oneByteOp(OP_SUB_GvEv
, dst
, base
, offset
);
555 void subl_rm(RegisterID src
, int offset
, RegisterID base
)
557 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, base
, offset
);
560 void subl_ir(int imm
, RegisterID dst
)
562 if (CAN_SIGN_EXTEND_8_32(imm
)) {
563 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
564 m_formatter
.immediate8(imm
);
566 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
567 m_formatter
.immediate32(imm
);
571 void subl_im(int imm
, int offset
, RegisterID base
)
573 if (CAN_SIGN_EXTEND_8_32(imm
)) {
574 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, base
, offset
);
575 m_formatter
.immediate8(imm
);
577 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, base
, offset
);
578 m_formatter
.immediate32(imm
);
583 void subq_rr(RegisterID src
, RegisterID dst
)
585 m_formatter
.oneByteOp64(OP_SUB_EvGv
, src
, dst
);
588 void subq_ir(int imm
, RegisterID dst
)
590 if (CAN_SIGN_EXTEND_8_32(imm
)) {
591 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
592 m_formatter
.immediate8(imm
);
594 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
595 m_formatter
.immediate32(imm
);
599 void subl_im(int imm
, void* addr
)
601 if (CAN_SIGN_EXTEND_8_32(imm
)) {
602 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, addr
);
603 m_formatter
.immediate8(imm
);
605 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, addr
);
606 m_formatter
.immediate32(imm
);
611 void xorl_rr(RegisterID src
, RegisterID dst
)
613 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, dst
);
616 void xorl_mr(int offset
, RegisterID base
, RegisterID dst
)
618 m_formatter
.oneByteOp(OP_XOR_GvEv
, dst
, base
, offset
);
621 void xorl_rm(RegisterID src
, int offset
, RegisterID base
)
623 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, base
, offset
);
626 void xorl_im(int imm
, int offset
, RegisterID base
)
628 if (CAN_SIGN_EXTEND_8_32(imm
)) {
629 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, base
, offset
);
630 m_formatter
.immediate8(imm
);
632 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, base
, offset
);
633 m_formatter
.immediate32(imm
);
637 void xorl_ir(int imm
, RegisterID dst
)
639 if (CAN_SIGN_EXTEND_8_32(imm
)) {
640 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
641 m_formatter
.immediate8(imm
);
643 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
644 m_formatter
.immediate32(imm
);
649 void xorq_rr(RegisterID src
, RegisterID dst
)
651 m_formatter
.oneByteOp64(OP_XOR_EvGv
, src
, dst
);
654 void xorq_ir(int imm
, RegisterID dst
)
656 if (CAN_SIGN_EXTEND_8_32(imm
)) {
657 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
658 m_formatter
.immediate8(imm
);
660 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
661 m_formatter
.immediate32(imm
);
666 void sarl_i8r(int imm
, RegisterID dst
)
669 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
671 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
672 m_formatter
.immediate8(imm
);
676 void sarl_CLr(RegisterID dst
)
678 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
681 void shll_i8r(int imm
, RegisterID dst
)
684 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHL
, dst
);
686 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHL
, dst
);
687 m_formatter
.immediate8(imm
);
691 void shll_CLr(RegisterID dst
)
693 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHL
, dst
);
697 void sarq_CLr(RegisterID dst
)
699 m_formatter
.oneByteOp64(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
702 void sarq_i8r(int imm
, RegisterID dst
)
705 m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
707 m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
708 m_formatter
.immediate8(imm
);
713 void imull_rr(RegisterID src
, RegisterID dst
)
715 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, src
);
718 void imull_mr(int offset
, RegisterID base
, RegisterID dst
)
720 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, base
, offset
);
723 void imull_i32r(RegisterID src
, int32_t value
, RegisterID dst
)
725 m_formatter
.oneByteOp(OP_IMUL_GvEvIz
, dst
, src
);
726 m_formatter
.immediate32(value
);
729 void idivl_r(RegisterID dst
)
731 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_IDIV
, dst
);
736 void cmpl_rr(RegisterID src
, RegisterID dst
)
738 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, dst
);
741 void cmpl_rm(RegisterID src
, int offset
, RegisterID base
)
743 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, offset
);
746 void cmpl_mr(int offset
, RegisterID base
, RegisterID src
)
748 m_formatter
.oneByteOp(OP_CMP_GvEv
, src
, base
, offset
);
751 void cmpl_ir(int imm
, RegisterID dst
)
753 if (CAN_SIGN_EXTEND_8_32(imm
)) {
754 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
755 m_formatter
.immediate8(imm
);
757 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
758 m_formatter
.immediate32(imm
);
762 void cmpl_ir_force32(int imm
, RegisterID dst
)
764 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
765 m_formatter
.immediate32(imm
);
768 void cmpl_im(int imm
, int offset
, RegisterID base
)
770 if (CAN_SIGN_EXTEND_8_32(imm
)) {
771 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
772 m_formatter
.immediate8(imm
);
774 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
775 m_formatter
.immediate32(imm
);
779 void cmpl_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
781 if (CAN_SIGN_EXTEND_8_32(imm
)) {
782 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
783 m_formatter
.immediate8(imm
);
785 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
786 m_formatter
.immediate32(imm
);
790 void cmpl_im_force32(int imm
, int offset
, RegisterID base
)
792 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
793 m_formatter
.immediate32(imm
);
797 void cmpq_rr(RegisterID src
, RegisterID dst
)
799 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, dst
);
802 void cmpq_rm(RegisterID src
, int offset
, RegisterID base
)
804 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, base
, offset
);
807 void cmpq_mr(int offset
, RegisterID base
, RegisterID src
)
809 m_formatter
.oneByteOp64(OP_CMP_GvEv
, src
, base
, offset
);
812 void cmpq_ir(int imm
, RegisterID dst
)
814 if (CAN_SIGN_EXTEND_8_32(imm
)) {
815 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
816 m_formatter
.immediate8(imm
);
818 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
819 m_formatter
.immediate32(imm
);
823 void cmpq_im(int imm
, int offset
, RegisterID base
)
825 if (CAN_SIGN_EXTEND_8_32(imm
)) {
826 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
827 m_formatter
.immediate8(imm
);
829 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
830 m_formatter
.immediate32(imm
);
834 void cmpq_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
836 if (CAN_SIGN_EXTEND_8_32(imm
)) {
837 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
838 m_formatter
.immediate8(imm
);
840 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
841 m_formatter
.immediate32(imm
);
845 void cmpl_rm(RegisterID reg
, void* addr
)
847 m_formatter
.oneByteOp(OP_CMP_EvGv
, reg
, addr
);
850 void cmpl_im(int imm
, void* addr
)
852 if (CAN_SIGN_EXTEND_8_32(imm
)) {
853 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, addr
);
854 m_formatter
.immediate8(imm
);
856 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, addr
);
857 m_formatter
.immediate32(imm
);
862 void cmpw_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
864 m_formatter
.prefix(PRE_OPERAND_SIZE
);
865 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, index
, scale
, offset
);
868 void cmpw_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
870 if (CAN_SIGN_EXTEND_8_32(imm
)) {
871 m_formatter
.prefix(PRE_OPERAND_SIZE
);
872 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
873 m_formatter
.immediate8(imm
);
875 m_formatter
.prefix(PRE_OPERAND_SIZE
);
876 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
877 m_formatter
.immediate16(imm
);
881 void testl_rr(RegisterID src
, RegisterID dst
)
883 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
886 void testl_i32r(int imm
, RegisterID dst
)
888 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
889 m_formatter
.immediate32(imm
);
892 void testl_i32m(int imm
, int offset
, RegisterID base
)
894 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
895 m_formatter
.immediate32(imm
);
898 void testl_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
900 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
901 m_formatter
.immediate32(imm
);
905 void testq_rr(RegisterID src
, RegisterID dst
)
907 m_formatter
.oneByteOp64(OP_TEST_EvGv
, src
, dst
);
910 void testq_i32r(int imm
, RegisterID dst
)
912 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
913 m_formatter
.immediate32(imm
);
916 void testq_i32m(int imm
, int offset
, RegisterID base
)
918 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
919 m_formatter
.immediate32(imm
);
922 void testq_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
924 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
925 m_formatter
.immediate32(imm
);
929 void testw_rr(RegisterID src
, RegisterID dst
)
931 m_formatter
.prefix(PRE_OPERAND_SIZE
);
932 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
935 void testb_i8r(int imm
, RegisterID dst
)
937 m_formatter
.oneByteOp8(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, dst
);
938 m_formatter
.immediate8(imm
);
941 void setCC_r(Condition cond
, RegisterID dst
)
943 m_formatter
.twoByteOp8(setccOpcode(cond
), (GroupOpcodeID
)0, dst
);
946 void sete_r(RegisterID dst
)
948 m_formatter
.twoByteOp8(setccOpcode(ConditionE
), (GroupOpcodeID
)0, dst
);
951 void setz_r(RegisterID dst
)
956 void setne_r(RegisterID dst
)
958 m_formatter
.twoByteOp8(setccOpcode(ConditionNE
), (GroupOpcodeID
)0, dst
);
961 void setnz_r(RegisterID dst
)
970 m_formatter
.oneByteOp(OP_CDQ
);
973 void xchgl_rr(RegisterID src
, RegisterID dst
)
975 m_formatter
.oneByteOp(OP_XCHG_EvGv
, src
, dst
);
979 void xchgq_rr(RegisterID src
, RegisterID dst
)
981 m_formatter
.oneByteOp64(OP_XCHG_EvGv
, src
, dst
);
985 void movl_rr(RegisterID src
, RegisterID dst
)
987 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, dst
);
990 void movl_rm(RegisterID src
, int offset
, RegisterID base
)
992 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, offset
);
995 void movl_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
997 m_formatter
.oneByteOp_disp32(OP_MOV_EvGv
, src
, base
, offset
);
1000 void movl_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1002 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1005 void movl_mEAX(void* addr
)
1007 m_formatter
.oneByteOp(OP_MOV_EAXOv
);
1008 #if PLATFORM(X86_64)
1009 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1011 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1015 void movl_mr(int offset
, RegisterID base
, RegisterID dst
)
1017 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, offset
);
1020 void movl_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1022 m_formatter
.oneByteOp_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1025 void movl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1027 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1030 void movl_i32r(int imm
, RegisterID dst
)
1032 m_formatter
.oneByteOp(OP_MOV_EAXIv
, dst
);
1033 m_formatter
.immediate32(imm
);
1036 void movl_i32m(int imm
, int offset
, RegisterID base
)
1038 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1039 m_formatter
.immediate32(imm
);
1042 void movl_EAXm(void* addr
)
1044 m_formatter
.oneByteOp(OP_MOV_OvEAX
);
1045 #if PLATFORM(X86_64)
1046 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1048 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1052 #if PLATFORM(X86_64)
1053 void movq_rr(RegisterID src
, RegisterID dst
)
1055 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, dst
);
1058 void movq_rm(RegisterID src
, int offset
, RegisterID base
)
1060 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, offset
);
1063 void movq_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
1065 m_formatter
.oneByteOp64_disp32(OP_MOV_EvGv
, src
, base
, offset
);
1068 void movq_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1070 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1073 void movq_mEAX(void* addr
)
1075 m_formatter
.oneByteOp64(OP_MOV_EAXOv
);
1076 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1079 void movq_EAXm(void* addr
)
1081 m_formatter
.oneByteOp64(OP_MOV_OvEAX
);
1082 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1085 void movq_mr(int offset
, RegisterID base
, RegisterID dst
)
1087 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, offset
);
1090 void movq_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1092 m_formatter
.oneByteOp64_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1095 void movq_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1097 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1100 void movq_i32m(int imm
, int offset
, RegisterID base
)
1102 m_formatter
.oneByteOp64(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1103 m_formatter
.immediate32(imm
);
1106 void movq_i64r(int64_t imm
, RegisterID dst
)
1108 m_formatter
.oneByteOp64(OP_MOV_EAXIv
, dst
);
1109 m_formatter
.immediate64(imm
);
1112 void movsxd_rr(RegisterID src
, RegisterID dst
)
1114 m_formatter
.oneByteOp64(OP_MOVSXD_GvEv
, dst
, src
);
1119 void movl_rm(RegisterID src
, void* addr
)
1121 if (src
== X86::eax
)
1124 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, addr
);
1127 void movl_mr(void* addr
, RegisterID dst
)
1129 if (dst
== X86::eax
)
1132 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, addr
);
1135 void movl_i32m(int imm
, void* addr
)
1137 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, addr
);
1138 m_formatter
.immediate32(imm
);
1142 void movzwl_mr(int offset
, RegisterID base
, RegisterID dst
)
1144 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, offset
);
1147 void movzwl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1149 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, index
, scale
, offset
);
1152 void movzbl_rr(RegisterID src
, RegisterID dst
)
1154 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1155 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1156 // REX prefixes are defined to be silently ignored by the processor.
1157 m_formatter
.twoByteOp8(OP2_MOVZX_GvEb
, dst
, src
);
1160 void leal_mr(int offset
, RegisterID base
, RegisterID dst
)
1162 m_formatter
.oneByteOp(OP_LEA
, dst
, base
, offset
);
1164 #if PLATFORM(X86_64)
1165 void leaq_mr(int offset
, RegisterID base
, RegisterID dst
)
1167 m_formatter
.oneByteOp64(OP_LEA
, dst
, base
, offset
);
1175 m_formatter
.oneByteOp(OP_CALL_rel32
);
1176 return m_formatter
.immediateRel32();
1179 JmpSrc
call(RegisterID dst
)
1181 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, dst
);
1182 return JmpSrc(m_formatter
.size());
1185 void call_m(int offset
, RegisterID base
)
1187 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, base
, offset
);
1192 m_formatter
.oneByteOp(OP_JMP_rel32
);
1193 return m_formatter
.immediateRel32();
1196 // Return a JmpSrc so we have a label to the jump, so we can use this
1197 // To make a tail recursive call on x86-64. The MacroAssembler
1198 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1199 JmpSrc
jmp_r(RegisterID dst
)
1201 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, dst
);
1202 return JmpSrc(m_formatter
.size());
1205 void jmp_m(int offset
, RegisterID base
)
1207 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, base
, offset
);
1212 m_formatter
.twoByteOp(jccRel32(ConditionNE
));
1213 return m_formatter
.immediateRel32();
1223 m_formatter
.twoByteOp(jccRel32(ConditionE
));
1224 return m_formatter
.immediateRel32();
1234 m_formatter
.twoByteOp(jccRel32(ConditionL
));
1235 return m_formatter
.immediateRel32();
1240 m_formatter
.twoByteOp(jccRel32(ConditionB
));
1241 return m_formatter
.immediateRel32();
1246 m_formatter
.twoByteOp(jccRel32(ConditionLE
));
1247 return m_formatter
.immediateRel32();
1252 m_formatter
.twoByteOp(jccRel32(ConditionBE
));
1253 return m_formatter
.immediateRel32();
1258 m_formatter
.twoByteOp(jccRel32(ConditionGE
));
1259 return m_formatter
.immediateRel32();
1264 m_formatter
.twoByteOp(jccRel32(ConditionG
));
1265 return m_formatter
.immediateRel32();
1270 m_formatter
.twoByteOp(jccRel32(ConditionA
));
1271 return m_formatter
.immediateRel32();
1276 m_formatter
.twoByteOp(jccRel32(ConditionAE
));
1277 return m_formatter
.immediateRel32();
1282 m_formatter
.twoByteOp(jccRel32(ConditionO
));
1283 return m_formatter
.immediateRel32();
1288 m_formatter
.twoByteOp(jccRel32(ConditionP
));
1289 return m_formatter
.immediateRel32();
1294 m_formatter
.twoByteOp(jccRel32(ConditionS
));
1295 return m_formatter
.immediateRel32();
1298 JmpSrc
jCC(Condition cond
)
1300 m_formatter
.twoByteOp(jccRel32(cond
));
1301 return m_formatter
.immediateRel32();
1306 void addsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1308 m_formatter
.prefix(PRE_SSE_F2
);
1309 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1312 void addsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1314 m_formatter
.prefix(PRE_SSE_F2
);
1315 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1318 void cvtsi2sd_rr(RegisterID src
, XMMRegisterID dst
)
1320 m_formatter
.prefix(PRE_SSE_F2
);
1321 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, src
);
1324 void cvtsi2sd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1326 m_formatter
.prefix(PRE_SSE_F2
);
1327 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, base
, offset
);
1330 #if !PLATFORM(X86_64)
1331 void cvtsi2sd_mr(void* address
, XMMRegisterID dst
)
1333 m_formatter
.prefix(PRE_SSE_F2
);
1334 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, address
);
1338 void cvttsd2si_rr(XMMRegisterID src
, RegisterID dst
)
1340 m_formatter
.prefix(PRE_SSE_F2
);
1341 m_formatter
.twoByteOp(OP2_CVTTSD2SI_GdWsd
, dst
, (RegisterID
)src
);
1344 void movd_rr(XMMRegisterID src
, RegisterID dst
)
1346 m_formatter
.prefix(PRE_SSE_66
);
1347 m_formatter
.twoByteOp(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1350 #if PLATFORM(X86_64)
1351 void movq_rr(XMMRegisterID src
, RegisterID dst
)
1353 m_formatter
.prefix(PRE_SSE_66
);
1354 m_formatter
.twoByteOp64(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1357 void movq_rr(RegisterID src
, XMMRegisterID dst
)
1359 m_formatter
.prefix(PRE_SSE_66
);
1360 m_formatter
.twoByteOp64(OP2_MOVD_VdEd
, (RegisterID
)dst
, src
);
1364 void movsd_rm(XMMRegisterID src
, int offset
, RegisterID base
)
1366 m_formatter
.prefix(PRE_SSE_F2
);
1367 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, offset
);
1370 void movsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1372 m_formatter
.prefix(PRE_SSE_F2
);
1373 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1376 #if !PLATFORM(X86_64)
1377 void movsd_mr(void* address
, XMMRegisterID dst
)
1379 m_formatter
.prefix(PRE_SSE_F2
);
1380 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, address
);
1384 void mulsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1386 m_formatter
.prefix(PRE_SSE_F2
);
1387 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1390 void mulsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1392 m_formatter
.prefix(PRE_SSE_F2
);
1393 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1396 void pextrw_irr(int whichWord
, XMMRegisterID src
, RegisterID dst
)
1398 m_formatter
.prefix(PRE_SSE_66
);
1399 m_formatter
.twoByteOp(OP2_PEXTRW_GdUdIb
, (RegisterID
)dst
, (RegisterID
)src
);
1400 m_formatter
.immediate8(whichWord
);
1403 void subsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1405 m_formatter
.prefix(PRE_SSE_F2
);
1406 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1409 void subsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1411 m_formatter
.prefix(PRE_SSE_F2
);
1412 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1415 void ucomisd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1417 m_formatter
.prefix(PRE_SSE_66
);
1418 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1421 void ucomisd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1423 m_formatter
.prefix(PRE_SSE_66
);
1424 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1427 void divsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1429 m_formatter
.prefix(PRE_SSE_F2
);
1430 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1433 void divsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1435 m_formatter
.prefix(PRE_SSE_F2
);
1436 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1439 void xorpd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1441 m_formatter
.prefix(PRE_SSE_66
);
1442 m_formatter
.twoByteOp(OP2_XORPD_VpdWpd
, (RegisterID
)dst
, (RegisterID
)src
);
1445 // Misc instructions:
1449 m_formatter
.oneByteOp(OP_INT3
);
1454 m_formatter
.oneByteOp(OP_RET
);
1457 void predictNotTaken()
1459 m_formatter
.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN
);
1462 // Assembler admin methods:
1466 return JmpDst(m_formatter
.size());
1469 static JmpDst
labelFor(JmpSrc jump
, intptr_t offset
= 0)
1471 return JmpDst(jump
.m_offset
+ offset
);
1474 JmpDst
align(int alignment
)
1476 while (!m_formatter
.isAligned(alignment
))
1477 m_formatter
.oneByteOp(OP_HLT
);
1482 // Linking & patching:
1484 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1485 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1486 // code has been finalized it is (platform support permitting) within a non-
1487 // writable region of memory; to modify the code in an execute-only execuable
1488 // pool the 'repatch' and 'relink' methods should be used.
1490 void linkJump(JmpSrc from
, JmpDst to
)
1492 ASSERT(from
.m_offset
!= -1);
1493 ASSERT(to
.m_offset
!= -1);
1495 char* code
= reinterpret_cast<char*>(m_formatter
.data());
1496 setRel32(code
+ from
.m_offset
, code
+ to
.m_offset
);
1499 static void linkJump(void* code
, JmpSrc from
, void* to
)
1501 ASSERT(from
.m_offset
!= -1);
1503 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
1506 static void linkCall(void* code
, JmpSrc from
, void* to
)
1508 ASSERT(from
.m_offset
!= -1);
1510 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
1513 static void linkPointer(void* code
, JmpDst where
, void* value
)
1515 ASSERT(where
.m_offset
!= -1);
1517 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
);
1520 static void relinkJump(void* from
, void* to
)
1525 static void relinkCall(void* from
, void* to
)
1530 static void repatchInt32(void* where
, int32_t value
)
1532 setInt32(where
, value
);
1535 static void repatchPointer(void* where
, void* value
)
1537 setPointer(where
, value
);
1540 static void repatchLoadPtrToLEA(void* where
)
1542 #if PLATFORM(X86_64)
1543 // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
1544 // Skip over the prefix byte.
1545 where
= reinterpret_cast<char*>(where
) + 1;
1547 *reinterpret_cast<unsigned char*>(where
) = static_cast<unsigned char>(OP_LEA
);
1550 static unsigned getCallReturnOffset(JmpSrc call
)
1552 ASSERT(call
.m_offset
>= 0);
1553 return call
.m_offset
;
1556 static void* getRelocatedAddress(void* code
, JmpSrc jump
)
1558 ASSERT(jump
.m_offset
!= -1);
1560 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + jump
.m_offset
);
1563 static void* getRelocatedAddress(void* code
, JmpDst destination
)
1565 ASSERT(destination
.m_offset
!= -1);
1567 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + destination
.m_offset
);
1570 static int getDifferenceBetweenLabels(JmpDst src
, JmpDst dst
)
1572 return dst
.m_offset
- src
.m_offset
;
1575 static int getDifferenceBetweenLabels(JmpDst src
, JmpSrc dst
)
1577 return dst
.m_offset
- src
.m_offset
;
1580 static int getDifferenceBetweenLabels(JmpSrc src
, JmpDst dst
)
1582 return dst
.m_offset
- src
.m_offset
;
1585 void* executableCopy(ExecutablePool
* allocator
)
1587 void* copy
= m_formatter
.executableCopy(allocator
);
1594 static void setPointer(void* where
, void* value
)
1596 reinterpret_cast<void**>(where
)[-1] = value
;
1599 static void setInt32(void* where
, int32_t value
)
1601 reinterpret_cast<int32_t*>(where
)[-1] = value
;
1604 static void setRel32(void* from
, void* to
)
1606 intptr_t offset
= reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
);
1607 ASSERT(offset
== static_cast<int32_t>(offset
));
1609 setInt32(from
, offset
);
1612 class X86InstructionFormatter
{
1614 static const int maxInstructionSize
= 16;
1618 // Legacy prefix bytes:
1620 // These are emmitted prior to the instruction.
1622 void prefix(OneByteOpcodeID pre
)
1624 m_buffer
.putByte(pre
);
1627 // Word-sized operands / no operand instruction formatters.
1629 // In addition to the opcode, the following operand permutations are supported:
1630 // * None - instruction takes no operands.
1631 // * One register - the low three bits of the RegisterID are added into the opcode.
1632 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1633 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1634 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1636 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1637 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1639 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1641 void oneByteOp(OneByteOpcodeID opcode
)
1643 m_buffer
.ensureSpace(maxInstructionSize
);
1644 m_buffer
.putByteUnchecked(opcode
);
1647 void oneByteOp(OneByteOpcodeID opcode
, RegisterID reg
)
1649 m_buffer
.ensureSpace(maxInstructionSize
);
1650 emitRexIfNeeded(0, 0, reg
);
1651 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
1654 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
1656 m_buffer
.ensureSpace(maxInstructionSize
);
1657 emitRexIfNeeded(reg
, 0, rm
);
1658 m_buffer
.putByteUnchecked(opcode
);
1659 registerModRM(reg
, rm
);
1662 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1664 m_buffer
.ensureSpace(maxInstructionSize
);
1665 emitRexIfNeeded(reg
, 0, base
);
1666 m_buffer
.putByteUnchecked(opcode
);
1667 memoryModRM(reg
, base
, offset
);
1670 void oneByteOp_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1672 m_buffer
.ensureSpace(maxInstructionSize
);
1673 emitRexIfNeeded(reg
, 0, base
);
1674 m_buffer
.putByteUnchecked(opcode
);
1675 memoryModRM_disp32(reg
, base
, offset
);
1678 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1680 m_buffer
.ensureSpace(maxInstructionSize
);
1681 emitRexIfNeeded(reg
, index
, base
);
1682 m_buffer
.putByteUnchecked(opcode
);
1683 memoryModRM(reg
, base
, index
, scale
, offset
);
1686 #if !PLATFORM(X86_64)
1687 void oneByteOp(OneByteOpcodeID opcode
, int reg
, void* address
)
1689 m_buffer
.ensureSpace(maxInstructionSize
);
1690 m_buffer
.putByteUnchecked(opcode
);
1691 memoryModRM(reg
, address
);
1695 void twoByteOp(TwoByteOpcodeID opcode
)
1697 m_buffer
.ensureSpace(maxInstructionSize
);
1698 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1699 m_buffer
.putByteUnchecked(opcode
);
1702 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
1704 m_buffer
.ensureSpace(maxInstructionSize
);
1705 emitRexIfNeeded(reg
, 0, rm
);
1706 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1707 m_buffer
.putByteUnchecked(opcode
);
1708 registerModRM(reg
, rm
);
1711 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1713 m_buffer
.ensureSpace(maxInstructionSize
);
1714 emitRexIfNeeded(reg
, 0, base
);
1715 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1716 m_buffer
.putByteUnchecked(opcode
);
1717 memoryModRM(reg
, base
, offset
);
1720 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1722 m_buffer
.ensureSpace(maxInstructionSize
);
1723 emitRexIfNeeded(reg
, index
, base
);
1724 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1725 m_buffer
.putByteUnchecked(opcode
);
1726 memoryModRM(reg
, base
, index
, scale
, offset
);
1729 #if !PLATFORM(X86_64)
1730 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, void* address
)
1732 m_buffer
.ensureSpace(maxInstructionSize
);
1733 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1734 m_buffer
.putByteUnchecked(opcode
);
1735 memoryModRM(reg
, address
);
1739 #if PLATFORM(X86_64)
1740 // Quad-word-sized operands:
1742 // Used to format 64-bit operantions, planting a REX.w prefix.
1743 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1744 // the normal (non-'64'-postfixed) formatters should be used.
1746 void oneByteOp64(OneByteOpcodeID opcode
)
1748 m_buffer
.ensureSpace(maxInstructionSize
);
1750 m_buffer
.putByteUnchecked(opcode
);
1753 void oneByteOp64(OneByteOpcodeID opcode
, RegisterID reg
)
1755 m_buffer
.ensureSpace(maxInstructionSize
);
1756 emitRexW(0, 0, reg
);
1757 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
1760 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
1762 m_buffer
.ensureSpace(maxInstructionSize
);
1763 emitRexW(reg
, 0, rm
);
1764 m_buffer
.putByteUnchecked(opcode
);
1765 registerModRM(reg
, rm
);
1768 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1770 m_buffer
.ensureSpace(maxInstructionSize
);
1771 emitRexW(reg
, 0, base
);
1772 m_buffer
.putByteUnchecked(opcode
);
1773 memoryModRM(reg
, base
, offset
);
1776 void oneByteOp64_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1778 m_buffer
.ensureSpace(maxInstructionSize
);
1779 emitRexW(reg
, 0, base
);
1780 m_buffer
.putByteUnchecked(opcode
);
1781 memoryModRM_disp32(reg
, base
, offset
);
1784 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1786 m_buffer
.ensureSpace(maxInstructionSize
);
1787 emitRexW(reg
, index
, base
);
1788 m_buffer
.putByteUnchecked(opcode
);
1789 memoryModRM(reg
, base
, index
, scale
, offset
);
1792 void twoByteOp64(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
1794 m_buffer
.ensureSpace(maxInstructionSize
);
1795 emitRexW(reg
, 0, rm
);
1796 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1797 m_buffer
.putByteUnchecked(opcode
);
1798 registerModRM(reg
, rm
);
1804 // These methods format byte operations. Byte operations differ from the normal
1805 // formatters in the circumstances under which they will decide to emit REX prefixes.
1806 // These should be used where any register operand signifies a byte register.
1808 // The disctinction is due to the handling of register numbers in the range 4..7 on
1809 // x86-64. These register numbers may either represent the second byte of the first
1810 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1812 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1813 // be accessed where a REX prefix is present), these are likely best treated as
1814 // deprecated. In order to ensure the correct registers spl..dil are selected a
1815 // REX prefix will be emitted for any byte register operand in the range 4..15.
1817 // These formatters may be used in instructions where a mix of operand sizes, in which
1818 // case an unnecessary REX will be emitted, for example:
1820 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1821 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1822 // be silently ignored by the processor.
1824 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1825 // is provided to check byte register operands.
1827 void oneByteOp8(OneByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
1829 m_buffer
.ensureSpace(maxInstructionSize
);
1830 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
1831 m_buffer
.putByteUnchecked(opcode
);
1832 registerModRM(groupOp
, rm
);
1835 void twoByteOp8(TwoByteOpcodeID opcode
, RegisterID reg
, RegisterID rm
)
1837 m_buffer
.ensureSpace(maxInstructionSize
);
1838 emitRexIf(byteRegRequiresRex(reg
)|byteRegRequiresRex(rm
), reg
, 0, rm
);
1839 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1840 m_buffer
.putByteUnchecked(opcode
);
1841 registerModRM(reg
, rm
);
1844 void twoByteOp8(TwoByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
1846 m_buffer
.ensureSpace(maxInstructionSize
);
1847 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
1848 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1849 m_buffer
.putByteUnchecked(opcode
);
1850 registerModRM(groupOp
, rm
);
1855 // An immedaite should be appended where appropriate after an op has been emitted.
1856 // The writes are unchecked since the opcode formatters above will have ensured space.
1858 void immediate8(int imm
)
1860 m_buffer
.putByteUnchecked(imm
);
1863 void immediate16(int imm
)
1865 m_buffer
.putShortUnchecked(imm
);
1868 void immediate32(int imm
)
1870 m_buffer
.putIntUnchecked(imm
);
1873 void immediate64(int64_t imm
)
1875 m_buffer
.putInt64Unchecked(imm
);
1878 JmpSrc
immediateRel32()
1880 m_buffer
.putIntUnchecked(0);
1881 return JmpSrc(m_buffer
.size());
1884 // Administrative methods:
1886 size_t size() const { return m_buffer
.size(); }
1887 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
1888 void* data() const { return m_buffer
.data(); }
1889 void* executableCopy(ExecutablePool
* allocator
) { return m_buffer
.executableCopy(allocator
); }
1893 // Internals; ModRm and REX formatters.
1895 static const RegisterID noBase
= X86::ebp
;
1896 static const RegisterID hasSib
= X86::esp
;
1897 static const RegisterID noIndex
= X86::esp
;
1898 #if PLATFORM(X86_64)
1899 static const RegisterID noBase2
= X86::r13
;
1900 static const RegisterID hasSib2
= X86::r12
;
1902 // Registers r8 & above require a REX prefixe.
1903 inline bool regRequiresRex(int reg
)
1905 return (reg
>= X86::r8
);
1908 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1909 inline bool byteRegRequiresRex(int reg
)
1911 return (reg
>= X86::esp
);
1914 // Format a REX prefix byte.
1915 inline void emitRex(bool w
, int r
, int x
, int b
)
1917 m_buffer
.putByteUnchecked(PRE_REX
| ((int)w
<< 3) | ((r
>>3)<<2) | ((x
>>3)<<1) | (b
>>3));
1920 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1921 inline void emitRexW(int r
, int x
, int b
)
1923 emitRex(true, r
, x
, b
);
1926 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1927 // regRequiresRex() to check other registers (i.e. address base & index).
1928 inline void emitRexIf(bool condition
, int r
, int x
, int b
)
1930 if (condition
) emitRex(false, r
, x
, b
);
1933 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1934 inline void emitRexIfNeeded(int r
, int x
, int b
)
1936 emitRexIf(regRequiresRex(r
) || regRequiresRex(x
) || regRequiresRex(b
), r
, x
, b
);
1939 // No REX prefix bytes on 32-bit x86.
1940 inline bool regRequiresRex(int) { return false; }
1941 inline bool byteRegRequiresRex(int) { return false; }
1942 inline void emitRexIf(bool, int, int, int) {}
1943 inline void emitRexIfNeeded(int, int, int) {}
1953 void putModRm(ModRmMode mode
, int reg
, RegisterID rm
)
1955 m_buffer
.putByteUnchecked((mode
<< 6) | ((reg
& 7) << 3) | (rm
& 7));
1958 void putModRmSib(ModRmMode mode
, int reg
, RegisterID base
, RegisterID index
, int scale
)
1960 ASSERT(mode
!= ModRmRegister
);
1962 putModRm(mode
, reg
, hasSib
);
1963 m_buffer
.putByteUnchecked((scale
<< 6) | ((index
& 7) << 3) | (base
& 7));
1966 void registerModRM(int reg
, RegisterID rm
)
1968 putModRm(ModRmRegister
, reg
, rm
);
1971 void memoryModRM(int reg
, RegisterID base
, int offset
)
1973 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
1974 #if PLATFORM(X86_64)
1975 if ((base
== hasSib
) || (base
== hasSib2
)) {
1977 if (base
== hasSib
) {
1979 if (!offset
) // No need to check if the base is noBase, since we know it is hasSib!
1980 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, noIndex
, 0);
1981 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
1982 putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0);
1983 m_buffer
.putByteUnchecked(offset
);
1985 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
1986 m_buffer
.putIntUnchecked(offset
);
1989 #if PLATFORM(X86_64)
1990 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
1992 if (!offset
&& (base
!= noBase
))
1994 putModRm(ModRmMemoryNoDisp
, reg
, base
);
1995 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
1996 putModRm(ModRmMemoryDisp8
, reg
, base
);
1997 m_buffer
.putByteUnchecked(offset
);
1999 putModRm(ModRmMemoryDisp32
, reg
, base
);
2000 m_buffer
.putIntUnchecked(offset
);
2005 void memoryModRM_disp32(int reg
, RegisterID base
, int offset
)
2007 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2008 #if PLATFORM(X86_64)
2009 if ((base
== hasSib
) || (base
== hasSib2
)) {
2011 if (base
== hasSib
) {
2013 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
2014 m_buffer
.putIntUnchecked(offset
);
2016 putModRm(ModRmMemoryDisp32
, reg
, base
);
2017 m_buffer
.putIntUnchecked(offset
);
2021 void memoryModRM(int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2023 ASSERT(index
!= noIndex
);
2025 #if PLATFORM(X86_64)
2026 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
2028 if (!offset
&& (base
!= noBase
))
2030 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, index
, scale
);
2031 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2032 putModRmSib(ModRmMemoryDisp8
, reg
, base
, index
, scale
);
2033 m_buffer
.putByteUnchecked(offset
);
2035 putModRmSib(ModRmMemoryDisp32
, reg
, base
, index
, scale
);
2036 m_buffer
.putIntUnchecked(offset
);
2040 #if !PLATFORM(X86_64)
2041 void memoryModRM(int reg
, void* address
)
2043 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2044 putModRm(ModRmMemoryNoDisp
, reg
, noBase
);
2045 m_buffer
.putIntUnchecked(reinterpret_cast<int32_t>(address
));
2049 AssemblerBuffer m_buffer
;
2055 #endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
2057 #endif // X86Assembler_h