2 * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
29 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
31 #include "AssemblerBuffer.h"
32 #include "JITCompilationEffort.h"
34 #include <wtf/Assertions.h>
35 #include <wtf/Vector.h>
39 inline bool CAN_SIGN_EXTEND_8_32(int32_t value
) { return value
== (int32_t)(signed char)value
; }
41 namespace X86Registers
{
78 typedef X86Registers::RegisterID RegisterID
;
79 typedef X86Registers::XMMRegisterID XMMRegisterID
;
80 typedef XMMRegisterID FPRegisterID
;
100 ConditionC
= ConditionB
,
101 ConditionNC
= ConditionAE
,
110 OP_2BYTE_ESCAPE
= 0x0F,
115 PRE_PREDICT_BRANCH_NOT_TAKEN
= 0x2E,
126 OP_MOVSXD_GvEv
= 0x63,
128 PRE_OPERAND_SIZE
= 0x66,
131 OP_IMUL_GvEvIz
= 0x69,
132 OP_GROUP1_EbIb
= 0x80,
133 OP_GROUP1_EvIz
= 0x81,
134 OP_GROUP1_EvIb
= 0x83,
142 OP_GROUP1A_Ev
= 0x8F,
148 OP_GROUP2_EvIb
= 0xC1,
150 OP_GROUP11_EvIb
= 0xC6,
151 OP_GROUP11_EvIz
= 0xC7,
153 OP_GROUP2_Ev1
= 0xD1,
154 OP_GROUP2_EvCL
= 0xD3,
156 OP_CALL_rel32
= 0xE8,
161 OP_GROUP3_EbIb
= 0xF6,
163 OP_GROUP3_EvIz
= 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
168 OP2_MOVSD_VsdWsd
= 0x10,
169 OP2_MOVSD_WsdVsd
= 0x11,
170 OP2_MOVSS_VsdWsd
= 0x10,
171 OP2_MOVSS_WsdVsd
= 0x11,
172 OP2_CVTSI2SD_VsdEd
= 0x2A,
173 OP2_CVTTSD2SI_GdWsd
= 0x2C,
174 OP2_UCOMISD_VsdWsd
= 0x2E,
175 OP2_ADDSD_VsdWsd
= 0x58,
176 OP2_MULSD_VsdWsd
= 0x59,
177 OP2_CVTSD2SS_VsdWsd
= 0x5A,
178 OP2_CVTSS2SD_VsdWsd
= 0x5A,
179 OP2_SUBSD_VsdWsd
= 0x5C,
180 OP2_DIVSD_VsdWsd
= 0x5E,
181 OP2_SQRTSD_VsdWsd
= 0x51,
182 OP2_ANDNPD_VpdWpd
= 0x55,
183 OP2_XORPD_VpdWpd
= 0x57,
184 OP2_MOVD_VdEd
= 0x6E,
185 OP2_MOVD_EdVd
= 0x7E,
186 OP2_JCC_rel32
= 0x80,
188 OP2_IMUL_GvEv
= 0xAF,
189 OP2_MOVZX_GvEb
= 0xB6,
190 OP2_MOVSX_GvEb
= 0xBE,
191 OP2_MOVZX_GvEw
= 0xB7,
192 OP2_MOVSX_GvEw
= 0xBF,
193 OP2_PEXTRW_GdUdIb
= 0xC5,
194 OP2_PSLLQ_UdqIb
= 0x73,
195 OP2_PSRLQ_UdqIb
= 0x73,
196 OP2_POR_VdqWdq
= 0XEB,
199 TwoByteOpcodeID
jccRel32(Condition cond
)
201 return (TwoByteOpcodeID
)(OP2_JCC_rel32
+ cond
);
204 TwoByteOpcodeID
setccOpcode(Condition cond
)
206 return (TwoByteOpcodeID
)(OP_SETCC
+ cond
);
240 GROUP14_OP_PSLLQ
= 6,
241 GROUP14_OP_PSRLQ
= 2,
243 ESCAPE_DD_FSTP_doubleReal
= 3,
246 class X86InstructionFormatter
;
250 : m_indexOfLastWatchpoint(INT_MIN
)
251 , m_indexOfTailOfLastWatchpoint(INT_MIN
)
257 void push_r(RegisterID reg
)
259 m_formatter
.oneByteOp(OP_PUSH_EAX
, reg
);
262 void pop_r(RegisterID reg
)
264 m_formatter
.oneByteOp(OP_POP_EAX
, reg
);
267 void push_i32(int imm
)
269 m_formatter
.oneByteOp(OP_PUSH_Iz
);
270 m_formatter
.immediate32(imm
);
273 void push_m(int offset
, RegisterID base
)
275 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_PUSH
, base
, offset
);
278 void pop_m(int offset
, RegisterID base
)
280 m_formatter
.oneByteOp(OP_GROUP1A_Ev
, GROUP1A_OP_POP
, base
, offset
);
283 // Arithmetic operations:
286 void adcl_im(int imm
, const void* addr
)
288 if (CAN_SIGN_EXTEND_8_32(imm
)) {
289 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADC
, addr
);
290 m_formatter
.immediate8(imm
);
292 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADC
, addr
);
293 m_formatter
.immediate32(imm
);
298 void addl_rr(RegisterID src
, RegisterID dst
)
300 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, dst
);
303 void addl_mr(int offset
, RegisterID base
, RegisterID dst
)
305 m_formatter
.oneByteOp(OP_ADD_GvEv
, dst
, base
, offset
);
309 void addl_mr(const void* addr
, RegisterID dst
)
311 m_formatter
.oneByteOp(OP_ADD_GvEv
, dst
, addr
);
315 void addl_rm(RegisterID src
, int offset
, RegisterID base
)
317 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, base
, offset
);
320 void addl_ir(int imm
, RegisterID dst
)
322 if (CAN_SIGN_EXTEND_8_32(imm
)) {
323 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
324 m_formatter
.immediate8(imm
);
326 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
327 m_formatter
.immediate32(imm
);
331 void addl_im(int imm
, int offset
, RegisterID base
)
333 if (CAN_SIGN_EXTEND_8_32(imm
)) {
334 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
335 m_formatter
.immediate8(imm
);
337 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
338 m_formatter
.immediate32(imm
);
343 void addq_rr(RegisterID src
, RegisterID dst
)
345 m_formatter
.oneByteOp64(OP_ADD_EvGv
, src
, dst
);
348 void addq_mr(int offset
, RegisterID base
, RegisterID dst
)
350 m_formatter
.oneByteOp64(OP_ADD_GvEv
, dst
, base
, offset
);
353 void addq_ir(int imm
, RegisterID dst
)
355 if (CAN_SIGN_EXTEND_8_32(imm
)) {
356 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
357 m_formatter
.immediate8(imm
);
359 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
360 m_formatter
.immediate32(imm
);
364 void addq_im(int imm
, int offset
, RegisterID base
)
366 if (CAN_SIGN_EXTEND_8_32(imm
)) {
367 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
368 m_formatter
.immediate8(imm
);
370 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
371 m_formatter
.immediate32(imm
);
375 void addl_im(int imm
, const void* addr
)
377 if (CAN_SIGN_EXTEND_8_32(imm
)) {
378 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, addr
);
379 m_formatter
.immediate8(imm
);
381 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, addr
);
382 m_formatter
.immediate32(imm
);
387 void andl_rr(RegisterID src
, RegisterID dst
)
389 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, dst
);
392 void andl_mr(int offset
, RegisterID base
, RegisterID dst
)
394 m_formatter
.oneByteOp(OP_AND_GvEv
, dst
, base
, offset
);
397 void andl_rm(RegisterID src
, int offset
, RegisterID base
)
399 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, base
, offset
);
402 void andl_ir(int imm
, RegisterID dst
)
404 if (CAN_SIGN_EXTEND_8_32(imm
)) {
405 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
406 m_formatter
.immediate8(imm
);
408 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
409 m_formatter
.immediate32(imm
);
413 void andl_im(int imm
, int offset
, RegisterID base
)
415 if (CAN_SIGN_EXTEND_8_32(imm
)) {
416 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, base
, offset
);
417 m_formatter
.immediate8(imm
);
419 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, base
, offset
);
420 m_formatter
.immediate32(imm
);
425 void andq_rr(RegisterID src
, RegisterID dst
)
427 m_formatter
.oneByteOp64(OP_AND_EvGv
, src
, dst
);
430 void andq_ir(int imm
, RegisterID dst
)
432 if (CAN_SIGN_EXTEND_8_32(imm
)) {
433 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
434 m_formatter
.immediate8(imm
);
436 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
437 m_formatter
.immediate32(imm
);
441 void andl_im(int imm
, const void* addr
)
443 if (CAN_SIGN_EXTEND_8_32(imm
)) {
444 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, addr
);
445 m_formatter
.immediate8(imm
);
447 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, addr
);
448 m_formatter
.immediate32(imm
);
453 void negl_r(RegisterID dst
)
455 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, dst
);
459 void negq_r(RegisterID dst
)
461 m_formatter
.oneByteOp64(OP_GROUP3_Ev
, GROUP3_OP_NEG
, dst
);
465 void negl_m(int offset
, RegisterID base
)
467 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, base
, offset
);
470 void notl_r(RegisterID dst
)
472 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, dst
);
475 void notl_m(int offset
, RegisterID base
)
477 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, base
, offset
);
480 void orl_rr(RegisterID src
, RegisterID dst
)
482 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, dst
);
485 void orl_mr(int offset
, RegisterID base
, RegisterID dst
)
487 m_formatter
.oneByteOp(OP_OR_GvEv
, dst
, base
, offset
);
490 void orl_rm(RegisterID src
, int offset
, RegisterID base
)
492 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, base
, offset
);
495 void orl_ir(int imm
, RegisterID dst
)
497 if (CAN_SIGN_EXTEND_8_32(imm
)) {
498 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
499 m_formatter
.immediate8(imm
);
501 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
502 m_formatter
.immediate32(imm
);
506 void orl_im(int imm
, int offset
, RegisterID base
)
508 if (CAN_SIGN_EXTEND_8_32(imm
)) {
509 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, base
, offset
);
510 m_formatter
.immediate8(imm
);
512 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, base
, offset
);
513 m_formatter
.immediate32(imm
);
518 void orq_rr(RegisterID src
, RegisterID dst
)
520 m_formatter
.oneByteOp64(OP_OR_EvGv
, src
, dst
);
523 void orq_ir(int imm
, RegisterID dst
)
525 if (CAN_SIGN_EXTEND_8_32(imm
)) {
526 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
527 m_formatter
.immediate8(imm
);
529 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
530 m_formatter
.immediate32(imm
);
534 void orl_im(int imm
, const void* addr
)
536 if (CAN_SIGN_EXTEND_8_32(imm
)) {
537 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, addr
);
538 m_formatter
.immediate8(imm
);
540 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, addr
);
541 m_formatter
.immediate32(imm
);
545 void orl_rm(RegisterID src
, const void* addr
)
547 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, addr
);
551 void subl_rr(RegisterID src
, RegisterID dst
)
553 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, dst
);
556 void subl_mr(int offset
, RegisterID base
, RegisterID dst
)
558 m_formatter
.oneByteOp(OP_SUB_GvEv
, dst
, base
, offset
);
561 void subl_rm(RegisterID src
, int offset
, RegisterID base
)
563 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, base
, offset
);
566 void subl_ir(int imm
, RegisterID dst
)
568 if (CAN_SIGN_EXTEND_8_32(imm
)) {
569 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
570 m_formatter
.immediate8(imm
);
572 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
573 m_formatter
.immediate32(imm
);
577 void subl_im(int imm
, int offset
, RegisterID base
)
579 if (CAN_SIGN_EXTEND_8_32(imm
)) {
580 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, base
, offset
);
581 m_formatter
.immediate8(imm
);
583 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, base
, offset
);
584 m_formatter
.immediate32(imm
);
589 void subq_rr(RegisterID src
, RegisterID dst
)
591 m_formatter
.oneByteOp64(OP_SUB_EvGv
, src
, dst
);
594 void subq_ir(int imm
, RegisterID dst
)
596 if (CAN_SIGN_EXTEND_8_32(imm
)) {
597 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
598 m_formatter
.immediate8(imm
);
600 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
601 m_formatter
.immediate32(imm
);
605 void subl_im(int imm
, const void* addr
)
607 if (CAN_SIGN_EXTEND_8_32(imm
)) {
608 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, addr
);
609 m_formatter
.immediate8(imm
);
611 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, addr
);
612 m_formatter
.immediate32(imm
);
617 void xorl_rr(RegisterID src
, RegisterID dst
)
619 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, dst
);
622 void xorl_mr(int offset
, RegisterID base
, RegisterID dst
)
624 m_formatter
.oneByteOp(OP_XOR_GvEv
, dst
, base
, offset
);
627 void xorl_rm(RegisterID src
, int offset
, RegisterID base
)
629 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, base
, offset
);
632 void xorl_im(int imm
, int offset
, RegisterID base
)
634 if (CAN_SIGN_EXTEND_8_32(imm
)) {
635 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, base
, offset
);
636 m_formatter
.immediate8(imm
);
638 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, base
, offset
);
639 m_formatter
.immediate32(imm
);
643 void xorl_ir(int imm
, RegisterID dst
)
645 if (CAN_SIGN_EXTEND_8_32(imm
)) {
646 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
647 m_formatter
.immediate8(imm
);
649 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
650 m_formatter
.immediate32(imm
);
655 void xorq_rr(RegisterID src
, RegisterID dst
)
657 m_formatter
.oneByteOp64(OP_XOR_EvGv
, src
, dst
);
660 void xorq_ir(int imm
, RegisterID dst
)
662 if (CAN_SIGN_EXTEND_8_32(imm
)) {
663 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
664 m_formatter
.immediate8(imm
);
666 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
667 m_formatter
.immediate32(imm
);
671 void xorq_rm(RegisterID src
, int offset
, RegisterID base
)
673 m_formatter
.oneByteOp64(OP_XOR_EvGv
, src
, base
, offset
);
676 void rorq_i8r(int imm
, RegisterID dst
)
679 m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_ROR
, dst
);
681 m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_ROR
, dst
);
682 m_formatter
.immediate8(imm
);
688 void sarl_i8r(int imm
, RegisterID dst
)
691 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
693 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
694 m_formatter
.immediate8(imm
);
698 void sarl_CLr(RegisterID dst
)
700 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
703 void shrl_i8r(int imm
, RegisterID dst
)
706 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHR
, dst
);
708 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHR
, dst
);
709 m_formatter
.immediate8(imm
);
713 void shrl_CLr(RegisterID dst
)
715 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHR
, dst
);
718 void shll_i8r(int imm
, RegisterID dst
)
721 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHL
, dst
);
723 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHL
, dst
);
724 m_formatter
.immediate8(imm
);
728 void shll_CLr(RegisterID dst
)
730 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHL
, dst
);
734 void sarq_CLr(RegisterID dst
)
736 m_formatter
.oneByteOp64(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
739 void sarq_i8r(int imm
, RegisterID dst
)
742 m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
744 m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
745 m_formatter
.immediate8(imm
);
750 void imull_rr(RegisterID src
, RegisterID dst
)
752 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, src
);
755 void imull_mr(int offset
, RegisterID base
, RegisterID dst
)
757 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, base
, offset
);
760 void imull_i32r(RegisterID src
, int32_t value
, RegisterID dst
)
762 m_formatter
.oneByteOp(OP_IMUL_GvEvIz
, dst
, src
);
763 m_formatter
.immediate32(value
);
766 void idivl_r(RegisterID dst
)
768 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_IDIV
, dst
);
773 void cmpl_rr(RegisterID src
, RegisterID dst
)
775 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, dst
);
778 void cmpl_rm(RegisterID src
, int offset
, RegisterID base
)
780 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, offset
);
783 void cmpl_mr(int offset
, RegisterID base
, RegisterID src
)
785 m_formatter
.oneByteOp(OP_CMP_GvEv
, src
, base
, offset
);
788 void cmpl_ir(int imm
, RegisterID dst
)
790 if (CAN_SIGN_EXTEND_8_32(imm
)) {
791 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
792 m_formatter
.immediate8(imm
);
794 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
795 m_formatter
.immediate32(imm
);
799 void cmpl_ir_force32(int imm
, RegisterID dst
)
801 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
802 m_formatter
.immediate32(imm
);
805 void cmpl_im(int imm
, int offset
, RegisterID base
)
807 if (CAN_SIGN_EXTEND_8_32(imm
)) {
808 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
809 m_formatter
.immediate8(imm
);
811 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
812 m_formatter
.immediate32(imm
);
816 void cmpb_im(int imm
, int offset
, RegisterID base
)
818 m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, base
, offset
);
819 m_formatter
.immediate8(imm
);
822 void cmpb_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
824 m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
825 m_formatter
.immediate8(imm
);
829 void cmpb_im(int imm
, const void* addr
)
831 m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, addr
);
832 m_formatter
.immediate8(imm
);
836 void cmpl_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
838 if (CAN_SIGN_EXTEND_8_32(imm
)) {
839 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
840 m_formatter
.immediate8(imm
);
842 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
843 m_formatter
.immediate32(imm
);
847 void cmpl_im_force32(int imm
, int offset
, RegisterID base
)
849 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
850 m_formatter
.immediate32(imm
);
854 void cmpq_rr(RegisterID src
, RegisterID dst
)
856 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, dst
);
859 void cmpq_rm(RegisterID src
, int offset
, RegisterID base
)
861 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, base
, offset
);
864 void cmpq_mr(int offset
, RegisterID base
, RegisterID src
)
866 m_formatter
.oneByteOp64(OP_CMP_GvEv
, src
, base
, offset
);
869 void cmpq_ir(int imm
, RegisterID dst
)
871 if (CAN_SIGN_EXTEND_8_32(imm
)) {
872 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
873 m_formatter
.immediate8(imm
);
875 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
876 m_formatter
.immediate32(imm
);
880 void cmpq_im(int imm
, int offset
, RegisterID base
)
882 if (CAN_SIGN_EXTEND_8_32(imm
)) {
883 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
884 m_formatter
.immediate8(imm
);
886 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
887 m_formatter
.immediate32(imm
);
891 void cmpq_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
893 if (CAN_SIGN_EXTEND_8_32(imm
)) {
894 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
895 m_formatter
.immediate8(imm
);
897 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
898 m_formatter
.immediate32(imm
);
902 void cmpl_rm(RegisterID reg
, const void* addr
)
904 m_formatter
.oneByteOp(OP_CMP_EvGv
, reg
, addr
);
907 void cmpl_im(int imm
, const void* addr
)
909 if (CAN_SIGN_EXTEND_8_32(imm
)) {
910 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, addr
);
911 m_formatter
.immediate8(imm
);
913 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, addr
);
914 m_formatter
.immediate32(imm
);
919 void cmpw_ir(int imm
, RegisterID dst
)
921 if (CAN_SIGN_EXTEND_8_32(imm
)) {
922 m_formatter
.prefix(PRE_OPERAND_SIZE
);
923 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
924 m_formatter
.immediate8(imm
);
926 m_formatter
.prefix(PRE_OPERAND_SIZE
);
927 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
928 m_formatter
.immediate16(imm
);
932 void cmpw_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
934 m_formatter
.prefix(PRE_OPERAND_SIZE
);
935 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, index
, scale
, offset
);
938 void cmpw_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
940 if (CAN_SIGN_EXTEND_8_32(imm
)) {
941 m_formatter
.prefix(PRE_OPERAND_SIZE
);
942 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
943 m_formatter
.immediate8(imm
);
945 m_formatter
.prefix(PRE_OPERAND_SIZE
);
946 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
947 m_formatter
.immediate16(imm
);
951 void testl_rr(RegisterID src
, RegisterID dst
)
953 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
956 void testl_i32r(int imm
, RegisterID dst
)
958 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
959 m_formatter
.immediate32(imm
);
962 void testl_i32m(int imm
, int offset
, RegisterID base
)
964 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
965 m_formatter
.immediate32(imm
);
968 void testb_rr(RegisterID src
, RegisterID dst
)
970 m_formatter
.oneByteOp8(OP_TEST_EbGb
, src
, dst
);
973 void testb_im(int imm
, int offset
, RegisterID base
)
975 m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, base
, offset
);
976 m_formatter
.immediate8(imm
);
979 void testb_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
981 m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
982 m_formatter
.immediate8(imm
);
986 void testb_im(int imm
, const void* addr
)
988 m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, addr
);
989 m_formatter
.immediate8(imm
);
993 void testl_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
995 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
996 m_formatter
.immediate32(imm
);
1000 void testq_rr(RegisterID src
, RegisterID dst
)
1002 m_formatter
.oneByteOp64(OP_TEST_EvGv
, src
, dst
);
1005 void testq_rm(RegisterID src
, int offset
, RegisterID base
)
1007 m_formatter
.oneByteOp64(OP_TEST_EvGv
, src
, base
, offset
);
1010 void testq_i32r(int imm
, RegisterID dst
)
1012 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
1013 m_formatter
.immediate32(imm
);
1016 void testq_i32m(int imm
, int offset
, RegisterID base
)
1018 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
1019 m_formatter
.immediate32(imm
);
1022 void testq_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1024 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
1025 m_formatter
.immediate32(imm
);
1029 void testw_rr(RegisterID src
, RegisterID dst
)
1031 m_formatter
.prefix(PRE_OPERAND_SIZE
);
1032 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
1035 void testb_i8r(int imm
, RegisterID dst
)
1037 m_formatter
.oneByteOp8(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, dst
);
1038 m_formatter
.immediate8(imm
);
1041 void setCC_r(Condition cond
, RegisterID dst
)
1043 m_formatter
.twoByteOp8(setccOpcode(cond
), (GroupOpcodeID
)0, dst
);
1046 void sete_r(RegisterID dst
)
1048 m_formatter
.twoByteOp8(setccOpcode(ConditionE
), (GroupOpcodeID
)0, dst
);
1051 void setz_r(RegisterID dst
)
1056 void setne_r(RegisterID dst
)
1058 m_formatter
.twoByteOp8(setccOpcode(ConditionNE
), (GroupOpcodeID
)0, dst
);
1061 void setnz_r(RegisterID dst
)
1066 // Various move ops:
1070 m_formatter
.oneByteOp(OP_CDQ
);
1073 void fstpl(int offset
, RegisterID base
)
1075 m_formatter
.oneByteOp(OP_ESCAPE_DD
, ESCAPE_DD_FSTP_doubleReal
, base
, offset
);
1078 void xchgl_rr(RegisterID src
, RegisterID dst
)
1080 m_formatter
.oneByteOp(OP_XCHG_EvGv
, src
, dst
);
1084 void xchgq_rr(RegisterID src
, RegisterID dst
)
1086 m_formatter
.oneByteOp64(OP_XCHG_EvGv
, src
, dst
);
1090 void movl_rr(RegisterID src
, RegisterID dst
)
1092 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, dst
);
1095 void movl_rm(RegisterID src
, int offset
, RegisterID base
)
1097 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, offset
);
1100 void movl_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
1102 m_formatter
.oneByteOp_disp32(OP_MOV_EvGv
, src
, base
, offset
);
1105 void movl_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1107 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1110 void movl_mEAX(const void* addr
)
1112 m_formatter
.oneByteOp(OP_MOV_EAXOv
);
1114 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1116 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1120 void movl_mr(int offset
, RegisterID base
, RegisterID dst
)
1122 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, offset
);
1125 void movl_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1127 m_formatter
.oneByteOp_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1130 void movl_mr_disp8(int offset
, RegisterID base
, RegisterID dst
)
1132 m_formatter
.oneByteOp_disp8(OP_MOV_GvEv
, dst
, base
, offset
);
1135 void movl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1137 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1140 void movl_i32r(int imm
, RegisterID dst
)
1142 m_formatter
.oneByteOp(OP_MOV_EAXIv
, dst
);
1143 m_formatter
.immediate32(imm
);
1146 void movl_i32m(int imm
, int offset
, RegisterID base
)
1148 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1149 m_formatter
.immediate32(imm
);
1152 void movl_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1154 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, index
, scale
, offset
);
1155 m_formatter
.immediate32(imm
);
1159 void movb_i8m(int imm
, const void* addr
)
1161 ASSERT(-128 <= imm
&& imm
< 128);
1162 m_formatter
.oneByteOp(OP_GROUP11_EvIb
, GROUP11_MOV
, addr
);
1163 m_formatter
.immediate8(imm
);
1167 void movb_i8m(int imm
, int offset
, RegisterID base
)
1169 ASSERT(-128 <= imm
&& imm
< 128);
1170 m_formatter
.oneByteOp(OP_GROUP11_EvIb
, GROUP11_MOV
, base
, offset
);
1171 m_formatter
.immediate8(imm
);
1174 void movb_i8m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1176 ASSERT(-128 <= imm
&& imm
< 128);
1177 m_formatter
.oneByteOp(OP_GROUP11_EvIb
, GROUP11_MOV
, base
, index
, scale
, offset
);
1178 m_formatter
.immediate8(imm
);
1181 void movb_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1183 m_formatter
.oneByteOp8(OP_MOV_EbGb
, src
, base
, index
, scale
, offset
);
1186 void movw_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1188 m_formatter
.prefix(PRE_OPERAND_SIZE
);
1189 m_formatter
.oneByteOp8(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1192 void movl_EAXm(const void* addr
)
1194 m_formatter
.oneByteOp(OP_MOV_OvEAX
);
1196 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1198 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1203 void movq_rr(RegisterID src
, RegisterID dst
)
1205 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, dst
);
1208 void movq_rm(RegisterID src
, int offset
, RegisterID base
)
1210 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, offset
);
1213 void movq_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
1215 m_formatter
.oneByteOp64_disp32(OP_MOV_EvGv
, src
, base
, offset
);
1218 void movq_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1220 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1223 void movq_mEAX(const void* addr
)
1225 m_formatter
.oneByteOp64(OP_MOV_EAXOv
);
1226 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1229 void movq_EAXm(const void* addr
)
1231 m_formatter
.oneByteOp64(OP_MOV_OvEAX
);
1232 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1235 void movq_mr(int offset
, RegisterID base
, RegisterID dst
)
1237 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, offset
);
1240 void movq_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1242 m_formatter
.oneByteOp64_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1245 void movq_mr_disp8(int offset
, RegisterID base
, RegisterID dst
)
1247 m_formatter
.oneByteOp64_disp8(OP_MOV_GvEv
, dst
, base
, offset
);
1250 void movq_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1252 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1255 void movq_i32m(int imm
, int offset
, RegisterID base
)
1257 m_formatter
.oneByteOp64(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1258 m_formatter
.immediate32(imm
);
1261 void movq_i64r(int64_t imm
, RegisterID dst
)
1263 m_formatter
.oneByteOp64(OP_MOV_EAXIv
, dst
);
1264 m_formatter
.immediate64(imm
);
1267 void movsxd_rr(RegisterID src
, RegisterID dst
)
1269 m_formatter
.oneByteOp64(OP_MOVSXD_GvEv
, dst
, src
);
1274 void movl_rm(RegisterID src
, const void* addr
)
1276 if (src
== X86Registers::eax
)
1279 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, addr
);
1282 void movl_mr(const void* addr
, RegisterID dst
)
1284 if (dst
== X86Registers::eax
)
1287 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, addr
);
1290 void movl_i32m(int imm
, const void* addr
)
1292 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, addr
);
1293 m_formatter
.immediate32(imm
);
1297 void movzwl_mr(int offset
, RegisterID base
, RegisterID dst
)
1299 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, offset
);
1302 void movzwl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1304 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, index
, scale
, offset
);
1307 void movswl_mr(int offset
, RegisterID base
, RegisterID dst
)
1309 m_formatter
.twoByteOp(OP2_MOVSX_GvEw
, dst
, base
, offset
);
1312 void movswl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1314 m_formatter
.twoByteOp(OP2_MOVSX_GvEw
, dst
, base
, index
, scale
, offset
);
1317 void movzbl_mr(int offset
, RegisterID base
, RegisterID dst
)
1319 m_formatter
.twoByteOp(OP2_MOVZX_GvEb
, dst
, base
, offset
);
1322 void movzbl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1324 m_formatter
.twoByteOp(OP2_MOVZX_GvEb
, dst
, base
, index
, scale
, offset
);
1327 void movsbl_mr(int offset
, RegisterID base
, RegisterID dst
)
1329 m_formatter
.twoByteOp(OP2_MOVSX_GvEb
, dst
, base
, offset
);
1332 void movsbl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1334 m_formatter
.twoByteOp(OP2_MOVSX_GvEb
, dst
, base
, index
, scale
, offset
);
1337 void movzbl_rr(RegisterID src
, RegisterID dst
)
1339 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1340 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1341 // REX prefixes are defined to be silently ignored by the processor.
1342 m_formatter
.twoByteOp8(OP2_MOVZX_GvEb
, dst
, src
);
1345 void leal_mr(int offset
, RegisterID base
, RegisterID dst
)
1347 m_formatter
.oneByteOp(OP_LEA
, dst
, base
, offset
);
1350 void leaq_mr(int offset
, RegisterID base
, RegisterID dst
)
1352 m_formatter
.oneByteOp64(OP_LEA
, dst
, base
, offset
);
1358 AssemblerLabel
call()
1360 m_formatter
.oneByteOp(OP_CALL_rel32
);
1361 return m_formatter
.immediateRel32();
1364 AssemblerLabel
call(RegisterID dst
)
1366 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, dst
);
1367 return m_formatter
.label();
1370 void call_m(int offset
, RegisterID base
)
1372 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, base
, offset
);
1375 AssemblerLabel
jmp()
1377 m_formatter
.oneByteOp(OP_JMP_rel32
);
1378 return m_formatter
.immediateRel32();
1381 // Return a AssemblerLabel so we have a label to the jump, so we can use this
1382 // To make a tail recursive call on x86-64. The MacroAssembler
1383 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1384 AssemblerLabel
jmp_r(RegisterID dst
)
1386 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, dst
);
1387 return m_formatter
.label();
1390 void jmp_m(int offset
, RegisterID base
)
1392 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, base
, offset
);
1396 void jmp_m(const void* address
)
1398 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, address
);
1402 AssemblerLabel
jne()
1404 m_formatter
.twoByteOp(jccRel32(ConditionNE
));
1405 return m_formatter
.immediateRel32();
1408 AssemblerLabel
jnz()
1415 m_formatter
.twoByteOp(jccRel32(ConditionE
));
1416 return m_formatter
.immediateRel32();
1426 m_formatter
.twoByteOp(jccRel32(ConditionL
));
1427 return m_formatter
.immediateRel32();
1432 m_formatter
.twoByteOp(jccRel32(ConditionB
));
1433 return m_formatter
.immediateRel32();
1436 AssemblerLabel
jle()
1438 m_formatter
.twoByteOp(jccRel32(ConditionLE
));
1439 return m_formatter
.immediateRel32();
1442 AssemblerLabel
jbe()
1444 m_formatter
.twoByteOp(jccRel32(ConditionBE
));
1445 return m_formatter
.immediateRel32();
1448 AssemblerLabel
jge()
1450 m_formatter
.twoByteOp(jccRel32(ConditionGE
));
1451 return m_formatter
.immediateRel32();
1456 m_formatter
.twoByteOp(jccRel32(ConditionG
));
1457 return m_formatter
.immediateRel32();
1462 m_formatter
.twoByteOp(jccRel32(ConditionA
));
1463 return m_formatter
.immediateRel32();
1466 AssemblerLabel
jae()
1468 m_formatter
.twoByteOp(jccRel32(ConditionAE
));
1469 return m_formatter
.immediateRel32();
1474 m_formatter
.twoByteOp(jccRel32(ConditionO
));
1475 return m_formatter
.immediateRel32();
1478 AssemblerLabel
jnp()
1480 m_formatter
.twoByteOp(jccRel32(ConditionNP
));
1481 return m_formatter
.immediateRel32();
1486 m_formatter
.twoByteOp(jccRel32(ConditionP
));
1487 return m_formatter
.immediateRel32();
1492 m_formatter
.twoByteOp(jccRel32(ConditionS
));
1493 return m_formatter
.immediateRel32();
1496 AssemblerLabel
jCC(Condition cond
)
1498 m_formatter
.twoByteOp(jccRel32(cond
));
1499 return m_formatter
.immediateRel32();
1504 void addsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1506 m_formatter
.prefix(PRE_SSE_F2
);
1507 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1510 void addsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1512 m_formatter
.prefix(PRE_SSE_F2
);
1513 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1517 void addsd_mr(const void* address
, XMMRegisterID dst
)
1519 m_formatter
.prefix(PRE_SSE_F2
);
1520 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, address
);
1524 void cvtsi2sd_rr(RegisterID src
, XMMRegisterID dst
)
1526 m_formatter
.prefix(PRE_SSE_F2
);
1527 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, src
);
1530 void cvtsi2sd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1532 m_formatter
.prefix(PRE_SSE_F2
);
1533 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, base
, offset
);
1537 void cvtsi2sd_mr(const void* address
, XMMRegisterID dst
)
1539 m_formatter
.prefix(PRE_SSE_F2
);
1540 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, address
);
1544 void cvttsd2si_rr(XMMRegisterID src
, RegisterID dst
)
1546 m_formatter
.prefix(PRE_SSE_F2
);
1547 m_formatter
.twoByteOp(OP2_CVTTSD2SI_GdWsd
, dst
, (RegisterID
)src
);
1550 void cvtsd2ss_rr(XMMRegisterID src
, XMMRegisterID dst
)
1552 m_formatter
.prefix(PRE_SSE_F2
);
1553 m_formatter
.twoByteOp(OP2_CVTSD2SS_VsdWsd
, dst
, (RegisterID
)src
);
1556 void cvtss2sd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1558 m_formatter
.prefix(PRE_SSE_F3
);
1559 m_formatter
.twoByteOp(OP2_CVTSS2SD_VsdWsd
, dst
, (RegisterID
)src
);
1563 void cvttsd2siq_rr(XMMRegisterID src
, RegisterID dst
)
1565 m_formatter
.prefix(PRE_SSE_F2
);
1566 m_formatter
.twoByteOp64(OP2_CVTTSD2SI_GdWsd
, dst
, (RegisterID
)src
);
1570 void movd_rr(XMMRegisterID src
, RegisterID dst
)
1572 m_formatter
.prefix(PRE_SSE_66
);
1573 m_formatter
.twoByteOp(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1576 void movd_rr(RegisterID src
, XMMRegisterID dst
)
1578 m_formatter
.prefix(PRE_SSE_66
);
1579 m_formatter
.twoByteOp(OP2_MOVD_VdEd
, (RegisterID
)dst
, src
);
1583 void movq_rr(XMMRegisterID src
, RegisterID dst
)
1585 m_formatter
.prefix(PRE_SSE_66
);
1586 m_formatter
.twoByteOp64(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1589 void movq_rr(RegisterID src
, XMMRegisterID dst
)
1591 m_formatter
.prefix(PRE_SSE_66
);
1592 m_formatter
.twoByteOp64(OP2_MOVD_VdEd
, (RegisterID
)dst
, src
);
1596 void movsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1598 m_formatter
.prefix(PRE_SSE_F2
);
1599 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1602 void movsd_rm(XMMRegisterID src
, int offset
, RegisterID base
)
1604 m_formatter
.prefix(PRE_SSE_F2
);
1605 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, offset
);
1608 void movsd_rm(XMMRegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1610 m_formatter
.prefix(PRE_SSE_F2
);
1611 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, index
, scale
, offset
);
1614 void movss_rm(XMMRegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1616 m_formatter
.prefix(PRE_SSE_F3
);
1617 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, index
, scale
, offset
);
1620 void movsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1622 m_formatter
.prefix(PRE_SSE_F2
);
1623 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1626 void movsd_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, XMMRegisterID dst
)
1628 m_formatter
.prefix(PRE_SSE_F2
);
1629 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, dst
, base
, index
, scale
, offset
);
1632 void movss_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, XMMRegisterID dst
)
1634 m_formatter
.prefix(PRE_SSE_F3
);
1635 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, dst
, base
, index
, scale
, offset
);
1639 void movsd_mr(const void* address
, XMMRegisterID dst
)
1641 m_formatter
.prefix(PRE_SSE_F2
);
1642 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, address
);
1644 void movsd_rm(XMMRegisterID src
, const void* address
)
1646 m_formatter
.prefix(PRE_SSE_F2
);
1647 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, address
);
1651 void mulsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1653 m_formatter
.prefix(PRE_SSE_F2
);
1654 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1657 void mulsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1659 m_formatter
.prefix(PRE_SSE_F2
);
1660 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1663 void pextrw_irr(int whichWord
, XMMRegisterID src
, RegisterID dst
)
1665 m_formatter
.prefix(PRE_SSE_66
);
1666 m_formatter
.twoByteOp(OP2_PEXTRW_GdUdIb
, (RegisterID
)dst
, (RegisterID
)src
);
1667 m_formatter
.immediate8(whichWord
);
1670 void psllq_i8r(int imm
, XMMRegisterID dst
)
1672 m_formatter
.prefix(PRE_SSE_66
);
1673 m_formatter
.twoByteOp8(OP2_PSLLQ_UdqIb
, GROUP14_OP_PSLLQ
, (RegisterID
)dst
);
1674 m_formatter
.immediate8(imm
);
1677 void psrlq_i8r(int imm
, XMMRegisterID dst
)
1679 m_formatter
.prefix(PRE_SSE_66
);
1680 m_formatter
.twoByteOp8(OP2_PSRLQ_UdqIb
, GROUP14_OP_PSRLQ
, (RegisterID
)dst
);
1681 m_formatter
.immediate8(imm
);
1684 void por_rr(XMMRegisterID src
, XMMRegisterID dst
)
1686 m_formatter
.prefix(PRE_SSE_66
);
1687 m_formatter
.twoByteOp(OP2_POR_VdqWdq
, (RegisterID
)dst
, (RegisterID
)src
);
1690 void subsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1692 m_formatter
.prefix(PRE_SSE_F2
);
1693 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1696 void subsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1698 m_formatter
.prefix(PRE_SSE_F2
);
1699 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1702 void ucomisd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1704 m_formatter
.prefix(PRE_SSE_66
);
1705 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1708 void ucomisd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1710 m_formatter
.prefix(PRE_SSE_66
);
1711 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1714 void divsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1716 m_formatter
.prefix(PRE_SSE_F2
);
1717 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1720 void divsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1722 m_formatter
.prefix(PRE_SSE_F2
);
1723 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1726 void xorpd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1728 m_formatter
.prefix(PRE_SSE_66
);
1729 m_formatter
.twoByteOp(OP2_XORPD_VpdWpd
, (RegisterID
)dst
, (RegisterID
)src
);
1732 void andnpd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1734 m_formatter
.prefix(PRE_SSE_66
);
1735 m_formatter
.twoByteOp(OP2_ANDNPD_VpdWpd
, (RegisterID
)dst
, (RegisterID
)src
);
1738 void sqrtsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1740 m_formatter
.prefix(PRE_SSE_F2
);
1741 m_formatter
.twoByteOp(OP2_SQRTSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1744 // Misc instructions:
1748 m_formatter
.oneByteOp(OP_INT3
);
1753 m_formatter
.oneByteOp(OP_RET
);
1756 void predictNotTaken()
1758 m_formatter
.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN
);
1761 // Assembler admin methods:
1763 size_t codeSize() const
1765 return m_formatter
.codeSize();
1768 AssemblerLabel
labelForWatchpoint()
1770 AssemblerLabel result
= m_formatter
.label();
1771 if (static_cast<int>(result
.m_offset
) != m_indexOfLastWatchpoint
)
1773 m_indexOfLastWatchpoint
= result
.m_offset
;
1774 m_indexOfTailOfLastWatchpoint
= result
.m_offset
+ maxJumpReplacementSize();
1778 AssemblerLabel
labelIgnoringWatchpoints()
1780 return m_formatter
.label();
1783 AssemblerLabel
label()
1785 AssemblerLabel result
= m_formatter
.label();
1786 while (UNLIKELY(static_cast<int>(result
.m_offset
) < m_indexOfTailOfLastWatchpoint
)) {
1788 result
= m_formatter
.label();
1793 AssemblerLabel
align(int alignment
)
1795 while (!m_formatter
.isAligned(alignment
))
1796 m_formatter
.oneByteOp(OP_HLT
);
1801 // Linking & patching:
1803 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1804 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1805 // code has been finalized it is (platform support permitting) within a non-
1806 // writable region of memory; to modify the code in an execute-only execuable
1807 // pool the 'repatch' and 'relink' methods should be used.
1809 void linkJump(AssemblerLabel from
, AssemblerLabel to
)
1811 ASSERT(from
.isSet());
1814 char* code
= reinterpret_cast<char*>(m_formatter
.data());
1815 ASSERT(!reinterpret_cast<int32_t*>(code
+ from
.m_offset
)[-1]);
1816 setRel32(code
+ from
.m_offset
, code
+ to
.m_offset
);
1819 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
1821 ASSERT(from
.isSet());
1823 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
1826 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
1828 ASSERT(from
.isSet());
1830 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
1833 static void linkPointer(void* code
, AssemblerLabel where
, void* value
)
1835 ASSERT(where
.isSet());
1837 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
);
1840 static void relinkJump(void* from
, void* to
)
1845 static void relinkCall(void* from
, void* to
)
1850 static void repatchCompact(void* where
, int32_t value
)
1852 ASSERT(value
>= std::numeric_limits
<int8_t>::min());
1853 ASSERT(value
<= std::numeric_limits
<int8_t>::max());
1854 setInt8(where
, value
);
1857 static void repatchInt32(void* where
, int32_t value
)
1859 setInt32(where
, value
);
1862 static void repatchPointer(void* where
, void* value
)
1864 setPointer(where
, value
);
1867 static void* readPointer(void* where
)
1869 return reinterpret_cast<void**>(where
)[-1];
1872 static void replaceWithJump(void* instructionStart
, void* to
)
1874 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
1875 uint8_t* dstPtr
= reinterpret_cast<uint8_t*>(to
);
1876 intptr_t distance
= (intptr_t)(dstPtr
- (ptr
+ 5));
1877 ptr
[0] = static_cast<uint8_t>(OP_JMP_rel32
);
1878 *reinterpret_cast<int32_t*>(ptr
+ 1) = static_cast<int32_t>(distance
);
1881 static ptrdiff_t maxJumpReplacementSize()
1887 static void revertJumpTo_movq_i64r(void* instructionStart
, int64_t imm
, RegisterID dst
)
1889 const int rexBytes
= 1;
1890 const int opcodeBytes
= 1;
1891 ASSERT(rexBytes
+ opcodeBytes
<= maxJumpReplacementSize());
1892 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
1893 ptr
[0] = PRE_REX
| (1 << 3) | (dst
>> 3);
1894 ptr
[1] = OP_MOV_EAXIv
| (dst
& 7);
1901 for (unsigned i
= rexBytes
+ opcodeBytes
; i
< static_cast<unsigned>(maxJumpReplacementSize()); ++i
)
1902 ptr
[i
] = u
.asBytes
[i
- rexBytes
- opcodeBytes
];
1906 static void revertJumpTo_cmpl_ir_force32(void* instructionStart
, int32_t imm
, RegisterID dst
)
1908 const int opcodeBytes
= 1;
1909 const int modRMBytes
= 1;
1910 ASSERT(opcodeBytes
+ modRMBytes
<= maxJumpReplacementSize());
1911 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
1912 ptr
[0] = OP_GROUP1_EvIz
;
1913 ptr
[1] = (X86InstructionFormatter::ModRmRegister
<< 6) | (GROUP1_OP_CMP
<< 3) | dst
;
1919 for (unsigned i
= opcodeBytes
+ modRMBytes
; i
< static_cast<unsigned>(maxJumpReplacementSize()); ++i
)
1920 ptr
[i
] = u
.asBytes
[i
- opcodeBytes
- modRMBytes
];
1923 static void revertJumpTo_cmpl_im_force32(void* instructionStart
, int32_t imm
, int offset
, RegisterID dst
)
1925 ASSERT_UNUSED(offset
, !offset
);
1926 const int opcodeBytes
= 1;
1927 const int modRMBytes
= 1;
1928 ASSERT(opcodeBytes
+ modRMBytes
<= maxJumpReplacementSize());
1929 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
1930 ptr
[0] = OP_GROUP1_EvIz
;
1931 ptr
[1] = (X86InstructionFormatter::ModRmMemoryNoDisp
<< 6) | (GROUP1_OP_CMP
<< 3) | dst
;
1937 for (unsigned i
= opcodeBytes
+ modRMBytes
; i
< static_cast<unsigned>(maxJumpReplacementSize()); ++i
)
1938 ptr
[i
] = u
.asBytes
[i
- opcodeBytes
- modRMBytes
];
1941 static void replaceWithLoad(void* instructionStart
)
1943 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
1945 if ((*ptr
& ~15) == PRE_REX
)
1955 RELEASE_ASSERT_NOT_REACHED();
1959 static void replaceWithAddressComputation(void* instructionStart
)
1961 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
1963 if ((*ptr
& ~15) == PRE_REX
)
1973 RELEASE_ASSERT_NOT_REACHED();
1977 static unsigned getCallReturnOffset(AssemblerLabel call
)
1979 ASSERT(call
.isSet());
1980 return call
.m_offset
;
1983 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
1985 ASSERT(label
.isSet());
1986 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
);
1989 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
1991 return b
.m_offset
- a
.m_offset
;
1994 PassRefPtr
<ExecutableMemoryHandle
> executableCopy(VM
& vm
, void* ownerUID
, JITCompilationEffort effort
)
1996 return m_formatter
.executableCopy(vm
, ownerUID
, effort
);
1999 unsigned debugOffset() { return m_formatter
.debugOffset(); }
2003 m_formatter
.oneByteOp(OP_NOP
);
2006 // This is a no-op on x86
2007 ALWAYS_INLINE
static void cacheFlush(void*, size_t) { }
2011 static void setPointer(void* where
, void* value
)
2013 reinterpret_cast<void**>(where
)[-1] = value
;
2016 static void setInt32(void* where
, int32_t value
)
2018 reinterpret_cast<int32_t*>(where
)[-1] = value
;
2021 static void setInt8(void* where
, int8_t value
)
2023 reinterpret_cast<int8_t*>(where
)[-1] = value
;
2026 static void setRel32(void* from
, void* to
)
2028 intptr_t offset
= reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
);
2029 ASSERT(offset
== static_cast<int32_t>(offset
));
2031 setInt32(from
, offset
);
2034 class X86InstructionFormatter
{
2036 static const int maxInstructionSize
= 16;
2047 // Legacy prefix bytes:
2049 // These are emmitted prior to the instruction.
2051 void prefix(OneByteOpcodeID pre
)
2053 m_buffer
.putByte(pre
);
2056 // Word-sized operands / no operand instruction formatters.
2058 // In addition to the opcode, the following operand permutations are supported:
2059 // * None - instruction takes no operands.
2060 // * One register - the low three bits of the RegisterID are added into the opcode.
2061 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
2062 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
2063 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
2065 // For 32-bit x86 targets, the address operand may also be provided as a void*.
2066 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
2068 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
2070 void oneByteOp(OneByteOpcodeID opcode
)
2072 m_buffer
.ensureSpace(maxInstructionSize
);
2073 m_buffer
.putByteUnchecked(opcode
);
2076 void oneByteOp(OneByteOpcodeID opcode
, RegisterID reg
)
2078 m_buffer
.ensureSpace(maxInstructionSize
);
2079 emitRexIfNeeded(0, 0, reg
);
2080 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
2083 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
2085 m_buffer
.ensureSpace(maxInstructionSize
);
2086 emitRexIfNeeded(reg
, 0, rm
);
2087 m_buffer
.putByteUnchecked(opcode
);
2088 registerModRM(reg
, rm
);
2091 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2093 m_buffer
.ensureSpace(maxInstructionSize
);
2094 emitRexIfNeeded(reg
, 0, base
);
2095 m_buffer
.putByteUnchecked(opcode
);
2096 memoryModRM(reg
, base
, offset
);
2099 void oneByteOp_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2101 m_buffer
.ensureSpace(maxInstructionSize
);
2102 emitRexIfNeeded(reg
, 0, base
);
2103 m_buffer
.putByteUnchecked(opcode
);
2104 memoryModRM_disp32(reg
, base
, offset
);
2107 void oneByteOp_disp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2109 m_buffer
.ensureSpace(maxInstructionSize
);
2110 emitRexIfNeeded(reg
, 0, base
);
2111 m_buffer
.putByteUnchecked(opcode
);
2112 memoryModRM_disp8(reg
, base
, offset
);
2115 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2117 m_buffer
.ensureSpace(maxInstructionSize
);
2118 emitRexIfNeeded(reg
, index
, base
);
2119 m_buffer
.putByteUnchecked(opcode
);
2120 memoryModRM(reg
, base
, index
, scale
, offset
);
2124 void oneByteOp(OneByteOpcodeID opcode
, int reg
, const void* address
)
2126 m_buffer
.ensureSpace(maxInstructionSize
);
2127 m_buffer
.putByteUnchecked(opcode
);
2128 memoryModRM(reg
, address
);
2132 void twoByteOp(TwoByteOpcodeID opcode
)
2134 m_buffer
.ensureSpace(maxInstructionSize
);
2135 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2136 m_buffer
.putByteUnchecked(opcode
);
2139 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
2141 m_buffer
.ensureSpace(maxInstructionSize
);
2142 emitRexIfNeeded(reg
, 0, rm
);
2143 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2144 m_buffer
.putByteUnchecked(opcode
);
2145 registerModRM(reg
, rm
);
2148 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2150 m_buffer
.ensureSpace(maxInstructionSize
);
2151 emitRexIfNeeded(reg
, 0, base
);
2152 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2153 m_buffer
.putByteUnchecked(opcode
);
2154 memoryModRM(reg
, base
, offset
);
2157 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2159 m_buffer
.ensureSpace(maxInstructionSize
);
2160 emitRexIfNeeded(reg
, index
, base
);
2161 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2162 m_buffer
.putByteUnchecked(opcode
);
2163 memoryModRM(reg
, base
, index
, scale
, offset
);
2167 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, const void* address
)
2169 m_buffer
.ensureSpace(maxInstructionSize
);
2170 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2171 m_buffer
.putByteUnchecked(opcode
);
2172 memoryModRM(reg
, address
);
2177 // Quad-word-sized operands:
2179 // Used to format 64-bit operantions, planting a REX.w prefix.
2180 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
2181 // the normal (non-'64'-postfixed) formatters should be used.
2183 void oneByteOp64(OneByteOpcodeID opcode
)
2185 m_buffer
.ensureSpace(maxInstructionSize
);
2187 m_buffer
.putByteUnchecked(opcode
);
2190 void oneByteOp64(OneByteOpcodeID opcode
, RegisterID reg
)
2192 m_buffer
.ensureSpace(maxInstructionSize
);
2193 emitRexW(0, 0, reg
);
2194 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
2197 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
2199 m_buffer
.ensureSpace(maxInstructionSize
);
2200 emitRexW(reg
, 0, rm
);
2201 m_buffer
.putByteUnchecked(opcode
);
2202 registerModRM(reg
, rm
);
2205 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2207 m_buffer
.ensureSpace(maxInstructionSize
);
2208 emitRexW(reg
, 0, base
);
2209 m_buffer
.putByteUnchecked(opcode
);
2210 memoryModRM(reg
, base
, offset
);
2213 void oneByteOp64_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2215 m_buffer
.ensureSpace(maxInstructionSize
);
2216 emitRexW(reg
, 0, base
);
2217 m_buffer
.putByteUnchecked(opcode
);
2218 memoryModRM_disp32(reg
, base
, offset
);
2221 void oneByteOp64_disp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2223 m_buffer
.ensureSpace(maxInstructionSize
);
2224 emitRexW(reg
, 0, base
);
2225 m_buffer
.putByteUnchecked(opcode
);
2226 memoryModRM_disp8(reg
, base
, offset
);
2229 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2231 m_buffer
.ensureSpace(maxInstructionSize
);
2232 emitRexW(reg
, index
, base
);
2233 m_buffer
.putByteUnchecked(opcode
);
2234 memoryModRM(reg
, base
, index
, scale
, offset
);
2237 void twoByteOp64(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
2239 m_buffer
.ensureSpace(maxInstructionSize
);
2240 emitRexW(reg
, 0, rm
);
2241 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2242 m_buffer
.putByteUnchecked(opcode
);
2243 registerModRM(reg
, rm
);
2249 // These methods format byte operations. Byte operations differ from the normal
2250 // formatters in the circumstances under which they will decide to emit REX prefixes.
2251 // These should be used where any register operand signifies a byte register.
2253 // The disctinction is due to the handling of register numbers in the range 4..7 on
2254 // x86-64. These register numbers may either represent the second byte of the first
2255 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
2257 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
2258 // be accessed where a REX prefix is present), these are likely best treated as
2259 // deprecated. In order to ensure the correct registers spl..dil are selected a
2260 // REX prefix will be emitted for any byte register operand in the range 4..15.
2262 // These formatters may be used in instructions where a mix of operand sizes, in which
2263 // case an unnecessary REX will be emitted, for example:
2265 // In this case a REX will be planted since edi is 7 (and were this a byte operand
2266 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
2267 // be silently ignored by the processor.
2269 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
2270 // is provided to check byte register operands.
2272 void oneByteOp8(OneByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
2274 m_buffer
.ensureSpace(maxInstructionSize
);
2275 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
2276 m_buffer
.putByteUnchecked(opcode
);
2277 registerModRM(groupOp
, rm
);
2280 void oneByteOp8(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
2282 m_buffer
.ensureSpace(maxInstructionSize
);
2283 emitRexIf(byteRegRequiresRex(reg
) || byteRegRequiresRex(rm
), reg
, 0, rm
);
2284 m_buffer
.putByteUnchecked(opcode
);
2285 registerModRM(reg
, rm
);
2288 void oneByteOp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2290 m_buffer
.ensureSpace(maxInstructionSize
);
2291 emitRexIf(byteRegRequiresRex(reg
) || regRequiresRex(index
) || regRequiresRex(base
), reg
, index
, base
);
2292 m_buffer
.putByteUnchecked(opcode
);
2293 memoryModRM(reg
, base
, index
, scale
, offset
);
2296 void twoByteOp8(TwoByteOpcodeID opcode
, RegisterID reg
, RegisterID rm
)
2298 m_buffer
.ensureSpace(maxInstructionSize
);
2299 emitRexIf(byteRegRequiresRex(reg
)|byteRegRequiresRex(rm
), reg
, 0, rm
);
2300 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2301 m_buffer
.putByteUnchecked(opcode
);
2302 registerModRM(reg
, rm
);
2305 void twoByteOp8(TwoByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
2307 m_buffer
.ensureSpace(maxInstructionSize
);
2308 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
2309 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2310 m_buffer
.putByteUnchecked(opcode
);
2311 registerModRM(groupOp
, rm
);
2316 // An immedaite should be appended where appropriate after an op has been emitted.
2317 // The writes are unchecked since the opcode formatters above will have ensured space.
2319 void immediate8(int imm
)
2321 m_buffer
.putByteUnchecked(imm
);
2324 void immediate16(int imm
)
2326 m_buffer
.putShortUnchecked(imm
);
2329 void immediate32(int imm
)
2331 m_buffer
.putIntUnchecked(imm
);
2334 void immediate64(int64_t imm
)
2336 m_buffer
.putInt64Unchecked(imm
);
2339 AssemblerLabel
immediateRel32()
2341 m_buffer
.putIntUnchecked(0);
2345 // Administrative methods:
2347 size_t codeSize() const { return m_buffer
.codeSize(); }
2348 AssemblerLabel
label() const { return m_buffer
.label(); }
2349 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
2350 void* data() const { return m_buffer
.data(); }
2352 PassRefPtr
<ExecutableMemoryHandle
> executableCopy(VM
& vm
, void* ownerUID
, JITCompilationEffort effort
)
2354 return m_buffer
.executableCopy(vm
, ownerUID
, effort
);
2357 unsigned debugOffset() { return m_buffer
.debugOffset(); }
2361 // Internals; ModRm and REX formatters.
2363 static const RegisterID noBase
= X86Registers::ebp
;
2364 static const RegisterID hasSib
= X86Registers::esp
;
2365 static const RegisterID noIndex
= X86Registers::esp
;
2367 static const RegisterID noBase2
= X86Registers::r13
;
2368 static const RegisterID hasSib2
= X86Registers::r12
;
2370 // Registers r8 & above require a REX prefixe.
2371 inline bool regRequiresRex(int reg
)
2373 return (reg
>= X86Registers::r8
);
2376 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
2377 inline bool byteRegRequiresRex(int reg
)
2379 return (reg
>= X86Registers::esp
);
2382 // Format a REX prefix byte.
2383 inline void emitRex(bool w
, int r
, int x
, int b
)
2388 m_buffer
.putByteUnchecked(PRE_REX
| ((int)w
<< 3) | ((r
>>3)<<2) | ((x
>>3)<<1) | (b
>>3));
2391 // Used to plant a REX byte with REX.w set (for 64-bit operations).
2392 inline void emitRexW(int r
, int x
, int b
)
2394 emitRex(true, r
, x
, b
);
2397 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
2398 // regRequiresRex() to check other registers (i.e. address base & index).
2399 inline void emitRexIf(bool condition
, int r
, int x
, int b
)
2401 if (condition
) emitRex(false, r
, x
, b
);
2404 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
2405 inline void emitRexIfNeeded(int r
, int x
, int b
)
2407 emitRexIf(regRequiresRex(r
) || regRequiresRex(x
) || regRequiresRex(b
), r
, x
, b
);
2410 // No REX prefix bytes on 32-bit x86.
2411 inline bool regRequiresRex(int) { return false; }
2412 inline bool byteRegRequiresRex(int) { return false; }
2413 inline void emitRexIf(bool, int, int, int) {}
2414 inline void emitRexIfNeeded(int, int, int) {}
2417 void putModRm(ModRmMode mode
, int reg
, RegisterID rm
)
2419 m_buffer
.putByteUnchecked((mode
<< 6) | ((reg
& 7) << 3) | (rm
& 7));
2422 void putModRmSib(ModRmMode mode
, int reg
, RegisterID base
, RegisterID index
, int scale
)
2424 ASSERT(mode
!= ModRmRegister
);
2426 putModRm(mode
, reg
, hasSib
);
2427 m_buffer
.putByteUnchecked((scale
<< 6) | ((index
& 7) << 3) | (base
& 7));
2430 void registerModRM(int reg
, RegisterID rm
)
2432 putModRm(ModRmRegister
, reg
, rm
);
2435 void memoryModRM(int reg
, RegisterID base
, int offset
)
2437 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2439 if ((base
== hasSib
) || (base
== hasSib2
)) {
2441 if (base
== hasSib
) {
2443 if (!offset
) // No need to check if the base is noBase, since we know it is hasSib!
2444 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, noIndex
, 0);
2445 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2446 putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0);
2447 m_buffer
.putByteUnchecked(offset
);
2449 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
2450 m_buffer
.putIntUnchecked(offset
);
2454 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
2456 if (!offset
&& (base
!= noBase
))
2458 putModRm(ModRmMemoryNoDisp
, reg
, base
);
2459 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2460 putModRm(ModRmMemoryDisp8
, reg
, base
);
2461 m_buffer
.putByteUnchecked(offset
);
2463 putModRm(ModRmMemoryDisp32
, reg
, base
);
2464 m_buffer
.putIntUnchecked(offset
);
2469 void memoryModRM_disp8(int reg
, RegisterID base
, int offset
)
2471 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2472 ASSERT(CAN_SIGN_EXTEND_8_32(offset
));
2474 if ((base
== hasSib
) || (base
== hasSib2
)) {
2476 if (base
== hasSib
) {
2478 putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0);
2479 m_buffer
.putByteUnchecked(offset
);
2481 putModRm(ModRmMemoryDisp8
, reg
, base
);
2482 m_buffer
.putByteUnchecked(offset
);
2486 void memoryModRM_disp32(int reg
, RegisterID base
, int offset
)
2488 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2490 if ((base
== hasSib
) || (base
== hasSib2
)) {
2492 if (base
== hasSib
) {
2494 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
2495 m_buffer
.putIntUnchecked(offset
);
2497 putModRm(ModRmMemoryDisp32
, reg
, base
);
2498 m_buffer
.putIntUnchecked(offset
);
2502 void memoryModRM(int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2504 ASSERT(index
!= noIndex
);
2507 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
2509 if (!offset
&& (base
!= noBase
))
2511 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, index
, scale
);
2512 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2513 putModRmSib(ModRmMemoryDisp8
, reg
, base
, index
, scale
);
2514 m_buffer
.putByteUnchecked(offset
);
2516 putModRmSib(ModRmMemoryDisp32
, reg
, base
, index
, scale
);
2517 m_buffer
.putIntUnchecked(offset
);
2522 void memoryModRM(int reg
, const void* address
)
2524 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2525 putModRm(ModRmMemoryNoDisp
, reg
, noBase
);
2526 m_buffer
.putIntUnchecked(reinterpret_cast<int32_t>(address
));
2530 AssemblerBuffer m_buffer
;
2532 int m_indexOfLastWatchpoint
;
2533 int m_indexOfTailOfLastWatchpoint
;
2538 #endif // ENABLE(ASSEMBLER) && CPU(X86)
2540 #endif // X86Assembler_h