2 * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
29 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
31 #include "AssemblerBuffer.h"
32 #include "JITCompilationEffort.h"
35 #include <wtf/Assertions.h>
36 #include <wtf/Vector.h>
40 inline bool CAN_SIGN_EXTEND_8_32(int32_t value
) { return value
== (int32_t)(signed char)value
; }
42 namespace X86Registers
{
44 #define FOR_EACH_CPU_REGISTER(V) \
45 FOR_EACH_CPU_GPREGISTER(V) \
46 FOR_EACH_CPU_SPECIAL_REGISTER(V) \
47 FOR_EACH_CPU_FPREGISTER(V)
49 // The following are defined as pairs of the following value:
50 // 1. type of the storage needed to save the register value by the JIT probe.
51 // 2. name of the register.
52 #define FOR_EACH_CPU_GPREGISTER(V) \
61 FOR_EACH_X86_64_CPU_GPREGISTER(V)
63 #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
67 // Note: the JITs only stores double values in the FP registers.
68 #define FOR_EACH_CPU_FPREGISTER(V) \
77 FOR_EACH_X86_64_CPU_FPREGISTER(V)
81 #define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add.
82 #define FOR_EACH_X86_64_CPU_FPREGISTER(V) // Nothing to add.
86 #define FOR_EACH_X86_64_CPU_GPREGISTER(V) \
96 #define FOR_EACH_X86_64_CPU_FPREGISTER(V) \
106 #endif // CPU(X86_64)
109 #define DECLARE_REGISTER(_type, _regName) _regName,
110 FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER
)
111 #undef DECLARE_REGISTER
115 #define DECLARE_REGISTER(_type, _regName) _regName,
116 FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER
)
117 #undef DECLARE_REGISTER
120 } // namespace X86Register
124 typedef X86Registers::RegisterID RegisterID
;
126 static RegisterID
firstRegister() { return X86Registers::eax
; }
127 static RegisterID
lastRegister()
130 return X86Registers::r15
;
132 return X86Registers::edi
;
136 typedef X86Registers::XMMRegisterID XMMRegisterID
;
137 typedef XMMRegisterID FPRegisterID
;
139 static FPRegisterID
firstFPRegister() { return X86Registers::xmm0
; }
140 static FPRegisterID
lastFPRegister()
143 return X86Registers::xmm15
;
145 return X86Registers::xmm7
;
167 ConditionC
= ConditionB
,
168 ConditionNC
= ConditionAE
,
179 OP_2BYTE_ESCAPE
= 0x0F,
185 PRE_PREDICT_BRANCH_NOT_TAKEN
= 0x2E,
198 OP_MOVSXD_GvEv
= 0x63,
200 PRE_OPERAND_SIZE
= 0x66,
203 OP_IMUL_GvEvIz
= 0x69,
204 OP_GROUP1_EbIb
= 0x80,
205 OP_GROUP1_EvIz
= 0x81,
206 OP_GROUP1_EvIb
= 0x83,
214 OP_GROUP1A_Ev
= 0x8F,
221 OP_TEST_EAXIv
= 0xA9,
223 OP_GROUP2_EvIb
= 0xC1,
225 OP_GROUP11_EvIb
= 0xC6,
226 OP_GROUP11_EvIz
= 0xC7,
228 OP_GROUP2_Ev1
= 0xD1,
229 OP_GROUP2_EvCL
= 0xD3,
231 OP_CALL_rel32
= 0xE8,
236 OP_GROUP3_EbIb
= 0xF6,
238 OP_GROUP3_EvIz
= 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
243 OP2_MOVSD_VsdWsd
= 0x10,
244 OP2_MOVSD_WsdVsd
= 0x11,
245 OP2_MOVSS_VsdWsd
= 0x10,
246 OP2_MOVSS_WsdVsd
= 0x11,
247 OP2_CVTSI2SD_VsdEd
= 0x2A,
248 OP2_CVTTSD2SI_GdWsd
= 0x2C,
249 OP2_UCOMISD_VsdWsd
= 0x2E,
250 OP2_ADDSD_VsdWsd
= 0x58,
251 OP2_MULSD_VsdWsd
= 0x59,
252 OP2_CVTSD2SS_VsdWsd
= 0x5A,
253 OP2_CVTSS2SD_VsdWsd
= 0x5A,
254 OP2_SUBSD_VsdWsd
= 0x5C,
255 OP2_DIVSD_VsdWsd
= 0x5E,
256 OP2_MOVMSKPD_VdEd
= 0x50,
257 OP2_SQRTSD_VsdWsd
= 0x51,
258 OP2_ANDNPD_VpdWpd
= 0x55,
259 OP2_XORPD_VpdWpd
= 0x57,
260 OP2_MOVD_VdEd
= 0x6E,
261 OP2_MOVD_EdVd
= 0x7E,
262 OP2_JCC_rel32
= 0x80,
264 OP2_3BYTE_ESCAPE
= 0xAE,
265 OP2_IMUL_GvEv
= 0xAF,
266 OP2_MOVZX_GvEb
= 0xB6,
268 OP2_MOVSX_GvEb
= 0xBE,
269 OP2_MOVZX_GvEw
= 0xB7,
270 OP2_MOVSX_GvEw
= 0xBF,
271 OP2_PEXTRW_GdUdIb
= 0xC5,
272 OP2_PSLLQ_UdqIb
= 0x73,
273 OP2_PSRLQ_UdqIb
= 0x73,
274 OP2_POR_VdqWdq
= 0XEB,
281 TwoByteOpcodeID
jccRel32(Condition cond
)
283 return (TwoByteOpcodeID
)(OP2_JCC_rel32
+ cond
);
286 TwoByteOpcodeID
setccOpcode(Condition cond
)
288 return (TwoByteOpcodeID
)(OP_SETCC
+ cond
);
322 GROUP14_OP_PSLLQ
= 6,
323 GROUP14_OP_PSRLQ
= 2,
325 ESCAPE_DD_FSTP_doubleReal
= 3,
328 class X86InstructionFormatter
;
332 : m_indexOfLastWatchpoint(INT_MIN
)
333 , m_indexOfTailOfLastWatchpoint(INT_MIN
)
337 AssemblerBuffer
& buffer() { return m_formatter
.m_buffer
; }
341 void push_r(RegisterID reg
)
343 m_formatter
.oneByteOp(OP_PUSH_EAX
, reg
);
346 void pop_r(RegisterID reg
)
348 m_formatter
.oneByteOp(OP_POP_EAX
, reg
);
351 void push_i32(int imm
)
353 m_formatter
.oneByteOp(OP_PUSH_Iz
);
354 m_formatter
.immediate32(imm
);
357 void push_m(int offset
, RegisterID base
)
359 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_PUSH
, base
, offset
);
362 void pop_m(int offset
, RegisterID base
)
364 m_formatter
.oneByteOp(OP_GROUP1A_Ev
, GROUP1A_OP_POP
, base
, offset
);
367 // Arithmetic operations:
370 void adcl_im(int imm
, const void* addr
)
372 if (CAN_SIGN_EXTEND_8_32(imm
)) {
373 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADC
, addr
);
374 m_formatter
.immediate8(imm
);
376 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADC
, addr
);
377 m_formatter
.immediate32(imm
);
382 void addl_rr(RegisterID src
, RegisterID dst
)
384 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, dst
);
387 void addl_mr(int offset
, RegisterID base
, RegisterID dst
)
389 m_formatter
.oneByteOp(OP_ADD_GvEv
, dst
, base
, offset
);
393 void addl_mr(const void* addr
, RegisterID dst
)
395 m_formatter
.oneByteOp(OP_ADD_GvEv
, dst
, addr
);
399 void addl_rm(RegisterID src
, int offset
, RegisterID base
)
401 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, base
, offset
);
404 void addl_ir(int imm
, RegisterID dst
)
406 if (CAN_SIGN_EXTEND_8_32(imm
)) {
407 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
408 m_formatter
.immediate8(imm
);
410 if (dst
== X86Registers::eax
)
411 m_formatter
.oneByteOp(OP_ADD_EAXIv
);
413 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
414 m_formatter
.immediate32(imm
);
418 void addl_im(int imm
, int offset
, RegisterID base
)
420 if (CAN_SIGN_EXTEND_8_32(imm
)) {
421 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
422 m_formatter
.immediate8(imm
);
424 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
425 m_formatter
.immediate32(imm
);
430 void addq_rr(RegisterID src
, RegisterID dst
)
432 m_formatter
.oneByteOp64(OP_ADD_EvGv
, src
, dst
);
435 void addq_mr(int offset
, RegisterID base
, RegisterID dst
)
437 m_formatter
.oneByteOp64(OP_ADD_GvEv
, dst
, base
, offset
);
440 void addq_ir(int imm
, RegisterID dst
)
442 if (CAN_SIGN_EXTEND_8_32(imm
)) {
443 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
444 m_formatter
.immediate8(imm
);
446 if (dst
== X86Registers::eax
)
447 m_formatter
.oneByteOp64(OP_ADD_EAXIv
);
449 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
450 m_formatter
.immediate32(imm
);
454 void addq_im(int imm
, int offset
, RegisterID base
)
456 if (CAN_SIGN_EXTEND_8_32(imm
)) {
457 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
458 m_formatter
.immediate8(imm
);
460 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
461 m_formatter
.immediate32(imm
);
465 void addl_im(int imm
, const void* addr
)
467 if (CAN_SIGN_EXTEND_8_32(imm
)) {
468 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, addr
);
469 m_formatter
.immediate8(imm
);
471 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, addr
);
472 m_formatter
.immediate32(imm
);
477 void andl_rr(RegisterID src
, RegisterID dst
)
479 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, dst
);
482 void andl_mr(int offset
, RegisterID base
, RegisterID dst
)
484 m_formatter
.oneByteOp(OP_AND_GvEv
, dst
, base
, offset
);
487 void andl_rm(RegisterID src
, int offset
, RegisterID base
)
489 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, base
, offset
);
492 void andl_ir(int imm
, RegisterID dst
)
494 if (CAN_SIGN_EXTEND_8_32(imm
)) {
495 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
496 m_formatter
.immediate8(imm
);
498 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
499 m_formatter
.immediate32(imm
);
503 void andl_im(int imm
, int offset
, RegisterID base
)
505 if (CAN_SIGN_EXTEND_8_32(imm
)) {
506 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, base
, offset
);
507 m_formatter
.immediate8(imm
);
509 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, base
, offset
);
510 m_formatter
.immediate32(imm
);
515 void andq_rr(RegisterID src
, RegisterID dst
)
517 m_formatter
.oneByteOp64(OP_AND_EvGv
, src
, dst
);
520 void andq_ir(int imm
, RegisterID dst
)
522 if (CAN_SIGN_EXTEND_8_32(imm
)) {
523 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
524 m_formatter
.immediate8(imm
);
526 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
527 m_formatter
.immediate32(imm
);
531 void andl_im(int imm
, const void* addr
)
533 if (CAN_SIGN_EXTEND_8_32(imm
)) {
534 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, addr
);
535 m_formatter
.immediate8(imm
);
537 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, addr
);
538 m_formatter
.immediate32(imm
);
543 void dec_r(RegisterID dst
)
545 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP1_OP_OR
, dst
);
549 void decq_r(RegisterID dst
)
551 m_formatter
.oneByteOp64(OP_GROUP5_Ev
, GROUP1_OP_OR
, dst
);
553 #endif // CPU(X86_64)
555 void inc_r(RegisterID dst
)
557 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP1_OP_ADD
, dst
);
561 void incq_r(RegisterID dst
)
563 m_formatter
.oneByteOp64(OP_GROUP5_Ev
, GROUP1_OP_ADD
, dst
);
566 void incq_m(int offset
, RegisterID base
)
568 m_formatter
.oneByteOp64(OP_GROUP5_Ev
, GROUP1_OP_ADD
, base
, offset
);
570 #endif // CPU(X86_64)
572 void negl_r(RegisterID dst
)
574 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, dst
);
578 void negq_r(RegisterID dst
)
580 m_formatter
.oneByteOp64(OP_GROUP3_Ev
, GROUP3_OP_NEG
, dst
);
584 void negl_m(int offset
, RegisterID base
)
586 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, base
, offset
);
589 void notl_r(RegisterID dst
)
591 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, dst
);
594 void notl_m(int offset
, RegisterID base
)
596 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, base
, offset
);
599 void orl_rr(RegisterID src
, RegisterID dst
)
601 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, dst
);
604 void orl_mr(int offset
, RegisterID base
, RegisterID dst
)
606 m_formatter
.oneByteOp(OP_OR_GvEv
, dst
, base
, offset
);
609 void orl_rm(RegisterID src
, int offset
, RegisterID base
)
611 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, base
, offset
);
614 void orl_ir(int imm
, RegisterID dst
)
616 if (CAN_SIGN_EXTEND_8_32(imm
)) {
617 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
618 m_formatter
.immediate8(imm
);
620 if (dst
== X86Registers::eax
)
621 m_formatter
.oneByteOp(OP_OR_EAXIv
);
623 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
624 m_formatter
.immediate32(imm
);
628 void orl_im(int imm
, int offset
, RegisterID base
)
630 if (CAN_SIGN_EXTEND_8_32(imm
)) {
631 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, base
, offset
);
632 m_formatter
.immediate8(imm
);
634 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, base
, offset
);
635 m_formatter
.immediate32(imm
);
640 void orq_rr(RegisterID src
, RegisterID dst
)
642 m_formatter
.oneByteOp64(OP_OR_EvGv
, src
, dst
);
645 void orq_ir(int imm
, RegisterID dst
)
647 if (CAN_SIGN_EXTEND_8_32(imm
)) {
648 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
649 m_formatter
.immediate8(imm
);
651 if (dst
== X86Registers::eax
)
652 m_formatter
.oneByteOp64(OP_OR_EAXIv
);
654 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
655 m_formatter
.immediate32(imm
);
659 void orl_im(int imm
, const void* addr
)
661 if (CAN_SIGN_EXTEND_8_32(imm
)) {
662 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, addr
);
663 m_formatter
.immediate8(imm
);
665 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, addr
);
666 m_formatter
.immediate32(imm
);
670 void orl_rm(RegisterID src
, const void* addr
)
672 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, addr
);
676 void subl_rr(RegisterID src
, RegisterID dst
)
678 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, dst
);
681 void subl_mr(int offset
, RegisterID base
, RegisterID dst
)
683 m_formatter
.oneByteOp(OP_SUB_GvEv
, dst
, base
, offset
);
686 void subl_rm(RegisterID src
, int offset
, RegisterID base
)
688 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, base
, offset
);
691 void subl_ir(int imm
, RegisterID dst
)
693 if (CAN_SIGN_EXTEND_8_32(imm
)) {
694 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
695 m_formatter
.immediate8(imm
);
697 if (dst
== X86Registers::eax
)
698 m_formatter
.oneByteOp(OP_SUB_EAXIv
);
700 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
701 m_formatter
.immediate32(imm
);
705 void subl_im(int imm
, int offset
, RegisterID base
)
707 if (CAN_SIGN_EXTEND_8_32(imm
)) {
708 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, base
, offset
);
709 m_formatter
.immediate8(imm
);
711 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, base
, offset
);
712 m_formatter
.immediate32(imm
);
717 void subq_rr(RegisterID src
, RegisterID dst
)
719 m_formatter
.oneByteOp64(OP_SUB_EvGv
, src
, dst
);
722 void subq_ir(int imm
, RegisterID dst
)
724 if (CAN_SIGN_EXTEND_8_32(imm
)) {
725 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
726 m_formatter
.immediate8(imm
);
728 if (dst
== X86Registers::eax
)
729 m_formatter
.oneByteOp64(OP_SUB_EAXIv
);
731 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
732 m_formatter
.immediate32(imm
);
736 void subl_im(int imm
, const void* addr
)
738 if (CAN_SIGN_EXTEND_8_32(imm
)) {
739 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, addr
);
740 m_formatter
.immediate8(imm
);
742 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, addr
);
743 m_formatter
.immediate32(imm
);
748 void xorl_rr(RegisterID src
, RegisterID dst
)
750 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, dst
);
753 void xorl_mr(int offset
, RegisterID base
, RegisterID dst
)
755 m_formatter
.oneByteOp(OP_XOR_GvEv
, dst
, base
, offset
);
758 void xorl_rm(RegisterID src
, int offset
, RegisterID base
)
760 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, base
, offset
);
763 void xorl_im(int imm
, int offset
, RegisterID base
)
765 if (CAN_SIGN_EXTEND_8_32(imm
)) {
766 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, base
, offset
);
767 m_formatter
.immediate8(imm
);
769 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, base
, offset
);
770 m_formatter
.immediate32(imm
);
774 void xorl_ir(int imm
, RegisterID dst
)
776 if (CAN_SIGN_EXTEND_8_32(imm
)) {
777 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
778 m_formatter
.immediate8(imm
);
780 if (dst
== X86Registers::eax
)
781 m_formatter
.oneByteOp(OP_XOR_EAXIv
);
783 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
784 m_formatter
.immediate32(imm
);
789 void xorq_rr(RegisterID src
, RegisterID dst
)
791 m_formatter
.oneByteOp64(OP_XOR_EvGv
, src
, dst
);
794 void xorq_ir(int imm
, RegisterID dst
)
796 if (CAN_SIGN_EXTEND_8_32(imm
)) {
797 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
798 m_formatter
.immediate8(imm
);
800 if (dst
== X86Registers::eax
)
801 m_formatter
.oneByteOp64(OP_XOR_EAXIv
);
803 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
804 m_formatter
.immediate32(imm
);
808 void xorq_rm(RegisterID src
, int offset
, RegisterID base
)
810 m_formatter
.oneByteOp64(OP_XOR_EvGv
, src
, base
, offset
);
813 void rorq_i8r(int imm
, RegisterID dst
)
816 m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_ROR
, dst
);
818 m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_ROR
, dst
);
819 m_formatter
.immediate8(imm
);
825 void bsr_rr(RegisterID src
, RegisterID dst
)
827 m_formatter
.twoByteOp(OP2_BSR
, dst
, src
);
830 void sarl_i8r(int imm
, RegisterID dst
)
833 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
835 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
836 m_formatter
.immediate8(imm
);
840 void sarl_CLr(RegisterID dst
)
842 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
845 void shrl_i8r(int imm
, RegisterID dst
)
848 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHR
, dst
);
850 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHR
, dst
);
851 m_formatter
.immediate8(imm
);
855 void shrl_CLr(RegisterID dst
)
857 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHR
, dst
);
860 void shll_i8r(int imm
, RegisterID dst
)
863 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHL
, dst
);
865 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHL
, dst
);
866 m_formatter
.immediate8(imm
);
870 void shll_CLr(RegisterID dst
)
872 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHL
, dst
);
876 void sarq_CLr(RegisterID dst
)
878 m_formatter
.oneByteOp64(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
881 void sarq_i8r(int imm
, RegisterID dst
)
884 m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
886 m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
887 m_formatter
.immediate8(imm
);
891 void shrq_i8r(int imm
, RegisterID dst
)
894 m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_SHR
, dst
);
896 m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_SHR
, dst
);
897 m_formatter
.immediate8(imm
);
901 void shlq_i8r(int imm
, RegisterID dst
)
904 m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_SHL
, dst
);
906 m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_SHL
, dst
);
907 m_formatter
.immediate8(imm
);
910 #endif // CPU(X86_64)
912 void imull_rr(RegisterID src
, RegisterID dst
)
914 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, src
);
918 void imulq_rr(RegisterID src
, RegisterID dst
)
920 m_formatter
.twoByteOp64(OP2_IMUL_GvEv
, dst
, src
);
922 #endif // CPU(X86_64)
924 void imull_mr(int offset
, RegisterID base
, RegisterID dst
)
926 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, base
, offset
);
929 void imull_i32r(RegisterID src
, int32_t value
, RegisterID dst
)
931 m_formatter
.oneByteOp(OP_IMUL_GvEvIz
, dst
, src
);
932 m_formatter
.immediate32(value
);
935 void idivl_r(RegisterID dst
)
937 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_IDIV
, dst
);
942 void cmpl_rr(RegisterID src
, RegisterID dst
)
944 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, dst
);
947 void cmpl_rm(RegisterID src
, int offset
, RegisterID base
)
949 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, offset
);
952 void cmpl_mr(int offset
, RegisterID base
, RegisterID src
)
954 m_formatter
.oneByteOp(OP_CMP_GvEv
, src
, base
, offset
);
957 void cmpl_ir(int imm
, RegisterID dst
)
959 if (CAN_SIGN_EXTEND_8_32(imm
)) {
960 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
961 m_formatter
.immediate8(imm
);
963 if (dst
== X86Registers::eax
)
964 m_formatter
.oneByteOp(OP_CMP_EAXIv
);
966 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
967 m_formatter
.immediate32(imm
);
971 void cmpl_ir_force32(int imm
, RegisterID dst
)
973 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
974 m_formatter
.immediate32(imm
);
977 void cmpl_im(int imm
, int offset
, RegisterID base
)
979 if (CAN_SIGN_EXTEND_8_32(imm
)) {
980 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
981 m_formatter
.immediate8(imm
);
983 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
984 m_formatter
.immediate32(imm
);
988 void cmpb_im(int imm
, int offset
, RegisterID base
)
990 m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, base
, offset
);
991 m_formatter
.immediate8(imm
);
994 void cmpb_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
996 m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
997 m_formatter
.immediate8(imm
);
1001 void cmpb_im(int imm
, const void* addr
)
1003 m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, addr
);
1004 m_formatter
.immediate8(imm
);
1008 void cmpl_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1010 if (CAN_SIGN_EXTEND_8_32(imm
)) {
1011 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
1012 m_formatter
.immediate8(imm
);
1014 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
1015 m_formatter
.immediate32(imm
);
1019 void cmpl_im_force32(int imm
, int offset
, RegisterID base
)
1021 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
1022 m_formatter
.immediate32(imm
);
1026 void cmpq_rr(RegisterID src
, RegisterID dst
)
1028 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, dst
);
1031 void cmpq_rm(RegisterID src
, int offset
, RegisterID base
)
1033 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, base
, offset
);
1036 void cmpq_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1038 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, base
, index
, scale
, offset
);
1041 void cmpq_mr(int offset
, RegisterID base
, RegisterID src
)
1043 m_formatter
.oneByteOp64(OP_CMP_GvEv
, src
, base
, offset
);
1046 void cmpq_ir(int imm
, RegisterID dst
)
1048 if (CAN_SIGN_EXTEND_8_32(imm
)) {
1049 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
1050 m_formatter
.immediate8(imm
);
1052 if (dst
== X86Registers::eax
)
1053 m_formatter
.oneByteOp64(OP_CMP_EAXIv
);
1055 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
1056 m_formatter
.immediate32(imm
);
1060 void cmpq_im(int imm
, int offset
, RegisterID base
)
1062 if (CAN_SIGN_EXTEND_8_32(imm
)) {
1063 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
1064 m_formatter
.immediate8(imm
);
1066 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
1067 m_formatter
.immediate32(imm
);
1071 void cmpq_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1073 if (CAN_SIGN_EXTEND_8_32(imm
)) {
1074 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
1075 m_formatter
.immediate8(imm
);
1077 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
1078 m_formatter
.immediate32(imm
);
1082 void cmpl_rm(RegisterID reg
, const void* addr
)
1084 m_formatter
.oneByteOp(OP_CMP_EvGv
, reg
, addr
);
1087 void cmpl_im(int imm
, const void* addr
)
1089 if (CAN_SIGN_EXTEND_8_32(imm
)) {
1090 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, addr
);
1091 m_formatter
.immediate8(imm
);
1093 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, addr
);
1094 m_formatter
.immediate32(imm
);
1099 void cmpw_ir(int imm
, RegisterID dst
)
1101 if (CAN_SIGN_EXTEND_8_32(imm
)) {
1102 m_formatter
.prefix(PRE_OPERAND_SIZE
);
1103 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
1104 m_formatter
.immediate8(imm
);
1106 m_formatter
.prefix(PRE_OPERAND_SIZE
);
1107 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
1108 m_formatter
.immediate16(imm
);
1112 void cmpw_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1114 m_formatter
.prefix(PRE_OPERAND_SIZE
);
1115 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, index
, scale
, offset
);
1118 void cmpw_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1120 if (CAN_SIGN_EXTEND_8_32(imm
)) {
1121 m_formatter
.prefix(PRE_OPERAND_SIZE
);
1122 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
1123 m_formatter
.immediate8(imm
);
1125 m_formatter
.prefix(PRE_OPERAND_SIZE
);
1126 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
1127 m_formatter
.immediate16(imm
);
1131 void testl_rr(RegisterID src
, RegisterID dst
)
1133 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
1136 void testl_i32r(int imm
, RegisterID dst
)
1138 if (dst
== X86Registers::eax
)
1139 m_formatter
.oneByteOp(OP_TEST_EAXIv
);
1141 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
1142 m_formatter
.immediate32(imm
);
1145 void testl_i32m(int imm
, int offset
, RegisterID base
)
1147 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
1148 m_formatter
.immediate32(imm
);
1151 void testb_rr(RegisterID src
, RegisterID dst
)
1153 m_formatter
.oneByteOp8(OP_TEST_EbGb
, src
, dst
);
1156 void testb_im(int imm
, int offset
, RegisterID base
)
1158 m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, base
, offset
);
1159 m_formatter
.immediate8(imm
);
1162 void testb_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1164 m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
1165 m_formatter
.immediate8(imm
);
1169 void testb_im(int imm
, const void* addr
)
1171 m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, addr
);
1172 m_formatter
.immediate8(imm
);
1176 void testl_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1178 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
1179 m_formatter
.immediate32(imm
);
1183 void testq_rr(RegisterID src
, RegisterID dst
)
1185 m_formatter
.oneByteOp64(OP_TEST_EvGv
, src
, dst
);
1188 void testq_rm(RegisterID src
, int offset
, RegisterID base
)
1190 m_formatter
.oneByteOp64(OP_TEST_EvGv
, src
, base
, offset
);
1193 void testq_i32r(int imm
, RegisterID dst
)
1195 if (dst
== X86Registers::eax
)
1196 m_formatter
.oneByteOp64(OP_TEST_EAXIv
);
1198 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
1199 m_formatter
.immediate32(imm
);
1202 void testq_i32m(int imm
, int offset
, RegisterID base
)
1204 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
1205 m_formatter
.immediate32(imm
);
1208 void testq_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1210 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
1211 m_formatter
.immediate32(imm
);
1215 void testw_rr(RegisterID src
, RegisterID dst
)
1217 m_formatter
.prefix(PRE_OPERAND_SIZE
);
1218 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
1221 void testb_i8r(int imm
, RegisterID dst
)
1223 if (dst
== X86Registers::eax
)
1224 m_formatter
.oneByteOp(OP_TEST_ALIb
);
1226 m_formatter
.oneByteOp8(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, dst
);
1227 m_formatter
.immediate8(imm
);
1230 void setCC_r(Condition cond
, RegisterID dst
)
1232 m_formatter
.twoByteOp8(setccOpcode(cond
), (GroupOpcodeID
)0, dst
);
1235 void sete_r(RegisterID dst
)
1237 m_formatter
.twoByteOp8(setccOpcode(ConditionE
), (GroupOpcodeID
)0, dst
);
1240 void setz_r(RegisterID dst
)
1245 void setne_r(RegisterID dst
)
1247 m_formatter
.twoByteOp8(setccOpcode(ConditionNE
), (GroupOpcodeID
)0, dst
);
1250 void setnz_r(RegisterID dst
)
1255 // Various move ops:
1259 m_formatter
.oneByteOp(OP_CDQ
);
1262 void fstpl(int offset
, RegisterID base
)
1264 m_formatter
.oneByteOp(OP_ESCAPE_DD
, ESCAPE_DD_FSTP_doubleReal
, base
, offset
);
1267 void xchgl_rr(RegisterID src
, RegisterID dst
)
1269 if (src
== X86Registers::eax
)
1270 m_formatter
.oneByteOp(OP_XCHG_EAX
, dst
);
1271 else if (dst
== X86Registers::eax
)
1272 m_formatter
.oneByteOp(OP_XCHG_EAX
, src
);
1274 m_formatter
.oneByteOp(OP_XCHG_EvGv
, src
, dst
);
1278 void xchgq_rr(RegisterID src
, RegisterID dst
)
1280 if (src
== X86Registers::eax
)
1281 m_formatter
.oneByteOp64(OP_XCHG_EAX
, dst
);
1282 else if (dst
== X86Registers::eax
)
1283 m_formatter
.oneByteOp64(OP_XCHG_EAX
, src
);
1285 m_formatter
.oneByteOp64(OP_XCHG_EvGv
, src
, dst
);
1289 void movl_rr(RegisterID src
, RegisterID dst
)
1291 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, dst
);
1294 void movl_rm(RegisterID src
, int offset
, RegisterID base
)
1296 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, offset
);
1299 void movl_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
1301 m_formatter
.oneByteOp_disp32(OP_MOV_EvGv
, src
, base
, offset
);
1304 void movl_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1306 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1309 void movl_mEAX(const void* addr
)
1311 m_formatter
.oneByteOp(OP_MOV_EAXOv
);
1313 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1315 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1319 void movl_mr(int offset
, RegisterID base
, RegisterID dst
)
1321 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, offset
);
1324 void movl_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1326 m_formatter
.oneByteOp_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1329 void movl_mr_disp8(int offset
, RegisterID base
, RegisterID dst
)
1331 m_formatter
.oneByteOp_disp8(OP_MOV_GvEv
, dst
, base
, offset
);
1334 void movl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1336 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1339 void movl_i32r(int imm
, RegisterID dst
)
1341 m_formatter
.oneByteOp(OP_MOV_EAXIv
, dst
);
1342 m_formatter
.immediate32(imm
);
1345 void movl_i32m(int imm
, int offset
, RegisterID base
)
1347 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1348 m_formatter
.immediate32(imm
);
1351 void movl_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1353 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, index
, scale
, offset
);
1354 m_formatter
.immediate32(imm
);
1358 void movb_i8m(int imm
, const void* addr
)
1360 ASSERT(-128 <= imm
&& imm
< 128);
1361 m_formatter
.oneByteOp(OP_GROUP11_EvIb
, GROUP11_MOV
, addr
);
1362 m_formatter
.immediate8(imm
);
1366 void movb_i8m(int imm
, int offset
, RegisterID base
)
1368 ASSERT(-128 <= imm
&& imm
< 128);
1369 m_formatter
.oneByteOp(OP_GROUP11_EvIb
, GROUP11_MOV
, base
, offset
);
1370 m_formatter
.immediate8(imm
);
1373 void movb_i8m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1375 ASSERT(-128 <= imm
&& imm
< 128);
1376 m_formatter
.oneByteOp(OP_GROUP11_EvIb
, GROUP11_MOV
, base
, index
, scale
, offset
);
1377 m_formatter
.immediate8(imm
);
1381 void movb_rm(RegisterID src
, const void* addr
)
1383 m_formatter
.oneByteOp(OP_MOV_EbGb
, src
, addr
);
1387 void movb_rm(RegisterID src
, int offset
, RegisterID base
)
1389 m_formatter
.oneByteOp8(OP_MOV_EbGb
, src
, base
, offset
);
1392 void movb_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1394 m_formatter
.oneByteOp8(OP_MOV_EbGb
, src
, base
, index
, scale
, offset
);
1397 void movw_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1399 m_formatter
.prefix(PRE_OPERAND_SIZE
);
1400 m_formatter
.oneByteOp8(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1403 void movl_EAXm(const void* addr
)
1405 m_formatter
.oneByteOp(OP_MOV_OvEAX
);
1407 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1409 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1414 void movq_rr(RegisterID src
, RegisterID dst
)
1416 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, dst
);
1419 void movq_rm(RegisterID src
, int offset
, RegisterID base
)
1421 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, offset
);
1424 void movq_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
1426 m_formatter
.oneByteOp64_disp32(OP_MOV_EvGv
, src
, base
, offset
);
1429 void movq_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1431 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1434 void movq_mEAX(const void* addr
)
1436 m_formatter
.oneByteOp64(OP_MOV_EAXOv
);
1437 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1440 void movq_EAXm(const void* addr
)
1442 m_formatter
.oneByteOp64(OP_MOV_OvEAX
);
1443 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1446 void movq_mr(int offset
, RegisterID base
, RegisterID dst
)
1448 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, offset
);
1451 void movq_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1453 m_formatter
.oneByteOp64_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1456 void movq_mr_disp8(int offset
, RegisterID base
, RegisterID dst
)
1458 m_formatter
.oneByteOp64_disp8(OP_MOV_GvEv
, dst
, base
, offset
);
1461 void movq_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1463 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1466 void movq_i32m(int imm
, int offset
, RegisterID base
)
1468 m_formatter
.oneByteOp64(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1469 m_formatter
.immediate32(imm
);
1472 void movq_i64r(int64_t imm
, RegisterID dst
)
1474 m_formatter
.oneByteOp64(OP_MOV_EAXIv
, dst
);
1475 m_formatter
.immediate64(imm
);
1478 void movsxd_rr(RegisterID src
, RegisterID dst
)
1480 m_formatter
.oneByteOp64(OP_MOVSXD_GvEv
, dst
, src
);
1485 void movl_rm(RegisterID src
, const void* addr
)
1487 if (src
== X86Registers::eax
)
1490 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, addr
);
1493 void movl_mr(const void* addr
, RegisterID dst
)
1495 if (dst
== X86Registers::eax
)
1498 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, addr
);
1501 void movl_i32m(int imm
, const void* addr
)
1503 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, addr
);
1504 m_formatter
.immediate32(imm
);
1508 void movzwl_mr(int offset
, RegisterID base
, RegisterID dst
)
1510 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, offset
);
1513 void movzwl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1515 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, index
, scale
, offset
);
1518 void movswl_mr(int offset
, RegisterID base
, RegisterID dst
)
1520 m_formatter
.twoByteOp(OP2_MOVSX_GvEw
, dst
, base
, offset
);
1523 void movswl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1525 m_formatter
.twoByteOp(OP2_MOVSX_GvEw
, dst
, base
, index
, scale
, offset
);
1528 void movzbl_mr(int offset
, RegisterID base
, RegisterID dst
)
1530 m_formatter
.twoByteOp(OP2_MOVZX_GvEb
, dst
, base
, offset
);
1533 void movzbl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1535 m_formatter
.twoByteOp(OP2_MOVZX_GvEb
, dst
, base
, index
, scale
, offset
);
1539 void movzbl_mr(const void* address
, RegisterID dst
)
1541 m_formatter
.twoByteOp(OP2_MOVZX_GvEb
, dst
, address
);
1545 void movsbl_mr(int offset
, RegisterID base
, RegisterID dst
)
1547 m_formatter
.twoByteOp(OP2_MOVSX_GvEb
, dst
, base
, offset
);
1550 void movsbl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1552 m_formatter
.twoByteOp(OP2_MOVSX_GvEb
, dst
, base
, index
, scale
, offset
);
1555 void movzbl_rr(RegisterID src
, RegisterID dst
)
1557 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1558 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1559 // REX prefixes are defined to be silently ignored by the processor.
1560 m_formatter
.twoByteOp8(OP2_MOVZX_GvEb
, dst
, src
);
1563 void leal_mr(int offset
, RegisterID base
, RegisterID dst
)
1565 m_formatter
.oneByteOp(OP_LEA
, dst
, base
, offset
);
1568 void leaq_mr(int offset
, RegisterID base
, RegisterID dst
)
1570 m_formatter
.oneByteOp64(OP_LEA
, dst
, base
, offset
);
1576 AssemblerLabel
call()
1578 m_formatter
.oneByteOp(OP_CALL_rel32
);
1579 return m_formatter
.immediateRel32();
1582 AssemblerLabel
call(RegisterID dst
)
1584 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, dst
);
1585 return m_formatter
.label();
1588 void call_m(int offset
, RegisterID base
)
1590 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, base
, offset
);
1593 AssemblerLabel
jmp()
1595 m_formatter
.oneByteOp(OP_JMP_rel32
);
1596 return m_formatter
.immediateRel32();
1599 // Return a AssemblerLabel so we have a label to the jump, so we can use this
1600 // To make a tail recursive call on x86-64. The MacroAssembler
1601 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1602 AssemblerLabel
jmp_r(RegisterID dst
)
1604 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, dst
);
1605 return m_formatter
.label();
1608 void jmp_m(int offset
, RegisterID base
)
1610 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, base
, offset
);
1614 void jmp_m(const void* address
)
1616 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, address
);
1620 AssemblerLabel
jne()
1622 m_formatter
.twoByteOp(jccRel32(ConditionNE
));
1623 return m_formatter
.immediateRel32();
1626 AssemblerLabel
jnz()
1633 m_formatter
.twoByteOp(jccRel32(ConditionE
));
1634 return m_formatter
.immediateRel32();
1644 m_formatter
.twoByteOp(jccRel32(ConditionL
));
1645 return m_formatter
.immediateRel32();
1650 m_formatter
.twoByteOp(jccRel32(ConditionB
));
1651 return m_formatter
.immediateRel32();
1654 AssemblerLabel
jle()
1656 m_formatter
.twoByteOp(jccRel32(ConditionLE
));
1657 return m_formatter
.immediateRel32();
1660 AssemblerLabel
jbe()
1662 m_formatter
.twoByteOp(jccRel32(ConditionBE
));
1663 return m_formatter
.immediateRel32();
1666 AssemblerLabel
jge()
1668 m_formatter
.twoByteOp(jccRel32(ConditionGE
));
1669 return m_formatter
.immediateRel32();
1674 m_formatter
.twoByteOp(jccRel32(ConditionG
));
1675 return m_formatter
.immediateRel32();
1680 m_formatter
.twoByteOp(jccRel32(ConditionA
));
1681 return m_formatter
.immediateRel32();
1684 AssemblerLabel
jae()
1686 m_formatter
.twoByteOp(jccRel32(ConditionAE
));
1687 return m_formatter
.immediateRel32();
1692 m_formatter
.twoByteOp(jccRel32(ConditionO
));
1693 return m_formatter
.immediateRel32();
1696 AssemblerLabel
jnp()
1698 m_formatter
.twoByteOp(jccRel32(ConditionNP
));
1699 return m_formatter
.immediateRel32();
1704 m_formatter
.twoByteOp(jccRel32(ConditionP
));
1705 return m_formatter
.immediateRel32();
1710 m_formatter
.twoByteOp(jccRel32(ConditionS
));
1711 return m_formatter
.immediateRel32();
1714 AssemblerLabel
jCC(Condition cond
)
1716 m_formatter
.twoByteOp(jccRel32(cond
));
1717 return m_formatter
.immediateRel32();
1722 void addsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1724 m_formatter
.prefix(PRE_SSE_F2
);
1725 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1728 void addsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1730 m_formatter
.prefix(PRE_SSE_F2
);
1731 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1735 void addsd_mr(const void* address
, XMMRegisterID dst
)
1737 m_formatter
.prefix(PRE_SSE_F2
);
1738 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, address
);
1742 void cvtsi2sd_rr(RegisterID src
, XMMRegisterID dst
)
1744 m_formatter
.prefix(PRE_SSE_F2
);
1745 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, src
);
1749 void cvtsi2sdq_rr(RegisterID src
, XMMRegisterID dst
)
1751 m_formatter
.prefix(PRE_SSE_F2
);
1752 m_formatter
.twoByteOp64(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, src
);
1756 void cvtsi2sd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1758 m_formatter
.prefix(PRE_SSE_F2
);
1759 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, base
, offset
);
1763 void cvtsi2sd_mr(const void* address
, XMMRegisterID dst
)
1765 m_formatter
.prefix(PRE_SSE_F2
);
1766 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, address
);
1770 void cvttsd2si_rr(XMMRegisterID src
, RegisterID dst
)
1772 m_formatter
.prefix(PRE_SSE_F2
);
1773 m_formatter
.twoByteOp(OP2_CVTTSD2SI_GdWsd
, dst
, (RegisterID
)src
);
1776 void cvtsd2ss_rr(XMMRegisterID src
, XMMRegisterID dst
)
1778 m_formatter
.prefix(PRE_SSE_F2
);
1779 m_formatter
.twoByteOp(OP2_CVTSD2SS_VsdWsd
, dst
, (RegisterID
)src
);
1782 void cvtss2sd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1784 m_formatter
.prefix(PRE_SSE_F3
);
1785 m_formatter
.twoByteOp(OP2_CVTSS2SD_VsdWsd
, dst
, (RegisterID
)src
);
1789 void cvttsd2siq_rr(XMMRegisterID src
, RegisterID dst
)
1791 m_formatter
.prefix(PRE_SSE_F2
);
1792 m_formatter
.twoByteOp64(OP2_CVTTSD2SI_GdWsd
, dst
, (RegisterID
)src
);
1796 void movd_rr(XMMRegisterID src
, RegisterID dst
)
1798 m_formatter
.prefix(PRE_SSE_66
);
1799 m_formatter
.twoByteOp(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1802 void movd_rr(RegisterID src
, XMMRegisterID dst
)
1804 m_formatter
.prefix(PRE_SSE_66
);
1805 m_formatter
.twoByteOp(OP2_MOVD_VdEd
, (RegisterID
)dst
, src
);
1809 void movmskpd_rr(XMMRegisterID src
, RegisterID dst
)
1811 m_formatter
.prefix(PRE_SSE_66
);
1812 m_formatter
.twoByteOp64(OP2_MOVMSKPD_VdEd
, dst
, (RegisterID
)src
);
1815 void movq_rr(XMMRegisterID src
, RegisterID dst
)
1817 m_formatter
.prefix(PRE_SSE_66
);
1818 m_formatter
.twoByteOp64(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1821 void movq_rr(RegisterID src
, XMMRegisterID dst
)
1823 m_formatter
.prefix(PRE_SSE_66
);
1824 m_formatter
.twoByteOp64(OP2_MOVD_VdEd
, (RegisterID
)dst
, src
);
1828 void movsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1830 m_formatter
.prefix(PRE_SSE_F2
);
1831 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1834 void movsd_rm(XMMRegisterID src
, int offset
, RegisterID base
)
1836 m_formatter
.prefix(PRE_SSE_F2
);
1837 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, offset
);
1840 void movsd_rm(XMMRegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1842 m_formatter
.prefix(PRE_SSE_F2
);
1843 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, index
, scale
, offset
);
1846 void movss_rm(XMMRegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1848 m_formatter
.prefix(PRE_SSE_F3
);
1849 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, index
, scale
, offset
);
1852 void movsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1854 m_formatter
.prefix(PRE_SSE_F2
);
1855 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1858 void movsd_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, XMMRegisterID dst
)
1860 m_formatter
.prefix(PRE_SSE_F2
);
1861 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, dst
, base
, index
, scale
, offset
);
1864 void movss_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, XMMRegisterID dst
)
1866 m_formatter
.prefix(PRE_SSE_F3
);
1867 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, dst
, base
, index
, scale
, offset
);
1871 void movsd_mr(const void* address
, XMMRegisterID dst
)
1873 m_formatter
.prefix(PRE_SSE_F2
);
1874 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, address
);
1876 void movsd_rm(XMMRegisterID src
, const void* address
)
1878 m_formatter
.prefix(PRE_SSE_F2
);
1879 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, address
);
1883 void mulsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1885 m_formatter
.prefix(PRE_SSE_F2
);
1886 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1889 void mulsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1891 m_formatter
.prefix(PRE_SSE_F2
);
1892 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1895 void pextrw_irr(int whichWord
, XMMRegisterID src
, RegisterID dst
)
1897 m_formatter
.prefix(PRE_SSE_66
);
1898 m_formatter
.twoByteOp(OP2_PEXTRW_GdUdIb
, (RegisterID
)dst
, (RegisterID
)src
);
1899 m_formatter
.immediate8(whichWord
);
1902 void psllq_i8r(int imm
, XMMRegisterID dst
)
1904 m_formatter
.prefix(PRE_SSE_66
);
1905 m_formatter
.twoByteOp8(OP2_PSLLQ_UdqIb
, GROUP14_OP_PSLLQ
, (RegisterID
)dst
);
1906 m_formatter
.immediate8(imm
);
1909 void psrlq_i8r(int imm
, XMMRegisterID dst
)
1911 m_formatter
.prefix(PRE_SSE_66
);
1912 m_formatter
.twoByteOp8(OP2_PSRLQ_UdqIb
, GROUP14_OP_PSRLQ
, (RegisterID
)dst
);
1913 m_formatter
.immediate8(imm
);
1916 void por_rr(XMMRegisterID src
, XMMRegisterID dst
)
1918 m_formatter
.prefix(PRE_SSE_66
);
1919 m_formatter
.twoByteOp(OP2_POR_VdqWdq
, (RegisterID
)dst
, (RegisterID
)src
);
1922 void subsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1924 m_formatter
.prefix(PRE_SSE_F2
);
1925 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1928 void subsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1930 m_formatter
.prefix(PRE_SSE_F2
);
1931 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1934 void ucomisd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1936 m_formatter
.prefix(PRE_SSE_66
);
1937 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1940 void ucomisd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1942 m_formatter
.prefix(PRE_SSE_66
);
1943 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1946 void divsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1948 m_formatter
.prefix(PRE_SSE_F2
);
1949 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1952 void divsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1954 m_formatter
.prefix(PRE_SSE_F2
);
1955 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1958 void xorpd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1960 m_formatter
.prefix(PRE_SSE_66
);
1961 m_formatter
.twoByteOp(OP2_XORPD_VpdWpd
, (RegisterID
)dst
, (RegisterID
)src
);
1964 void andnpd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1966 m_formatter
.prefix(PRE_SSE_66
);
1967 m_formatter
.twoByteOp(OP2_ANDNPD_VpdWpd
, (RegisterID
)dst
, (RegisterID
)src
);
1970 void sqrtsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1972 m_formatter
.prefix(PRE_SSE_F2
);
1973 m_formatter
.twoByteOp(OP2_SQRTSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1976 // Misc instructions:
1980 m_formatter
.oneByteOp(OP_INT3
);
1985 m_formatter
.oneByteOp(OP_RET
);
1988 void predictNotTaken()
1990 m_formatter
.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN
);
1995 m_formatter
.threeByteOp(OP3_MFENCE
);
1998 // Assembler admin methods:
2000 size_t codeSize() const
2002 return m_formatter
.codeSize();
2005 AssemblerLabel
labelForWatchpoint()
2007 AssemblerLabel result
= m_formatter
.label();
2008 if (static_cast<int>(result
.m_offset
) != m_indexOfLastWatchpoint
)
2010 m_indexOfLastWatchpoint
= result
.m_offset
;
2011 m_indexOfTailOfLastWatchpoint
= result
.m_offset
+ maxJumpReplacementSize();
2015 AssemblerLabel
labelIgnoringWatchpoints()
2017 return m_formatter
.label();
2020 AssemblerLabel
label()
2022 AssemblerLabel result
= m_formatter
.label();
2023 while (UNLIKELY(static_cast<int>(result
.m_offset
) < m_indexOfTailOfLastWatchpoint
)) {
2025 result
= m_formatter
.label();
2030 AssemblerLabel
align(int alignment
)
2032 while (!m_formatter
.isAligned(alignment
))
2033 m_formatter
.oneByteOp(OP_HLT
);
2038 // Linking & patching:
2040 // 'link' and 'patch' methods are for use on unprotected code - such as the code
2041 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
2042 // code has been finalized it is (platform support permitting) within a non-
2043 // writable region of memory; to modify the code in an execute-only execuable
2044 // pool the 'repatch' and 'relink' methods should be used.
2046 void linkJump(AssemblerLabel from
, AssemblerLabel to
)
2048 ASSERT(from
.isSet());
2051 char* code
= reinterpret_cast<char*>(m_formatter
.data());
2052 ASSERT(!reinterpret_cast<int32_t*>(code
+ from
.m_offset
)[-1]);
2053 setRel32(code
+ from
.m_offset
, code
+ to
.m_offset
);
2056 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
2058 ASSERT(from
.isSet());
2060 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
2063 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
2065 ASSERT(from
.isSet());
2067 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
2070 static void linkPointer(void* code
, AssemblerLabel where
, void* value
)
2072 ASSERT(where
.isSet());
2074 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
);
2077 static void relinkJump(void* from
, void* to
)
2082 static void relinkCall(void* from
, void* to
)
2087 static void repatchCompact(void* where
, int32_t value
)
2089 ASSERT(value
>= std::numeric_limits
<int8_t>::min());
2090 ASSERT(value
<= std::numeric_limits
<int8_t>::max());
2091 setInt8(where
, value
);
2094 static void repatchInt32(void* where
, int32_t value
)
2096 setInt32(where
, value
);
2099 static void repatchPointer(void* where
, void* value
)
2101 setPointer(where
, value
);
2104 static void* readPointer(void* where
)
2106 return reinterpret_cast<void**>(where
)[-1];
2109 static void replaceWithJump(void* instructionStart
, void* to
)
2111 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
2112 uint8_t* dstPtr
= reinterpret_cast<uint8_t*>(to
);
2113 intptr_t distance
= (intptr_t)(dstPtr
- (ptr
+ 5));
2114 ptr
[0] = static_cast<uint8_t>(OP_JMP_rel32
);
2115 *reinterpret_cast<int32_t*>(ptr
+ 1) = static_cast<int32_t>(distance
);
2118 static ptrdiff_t maxJumpReplacementSize()
2124 static void revertJumpTo_movq_i64r(void* instructionStart
, int64_t imm
, RegisterID dst
)
2126 const unsigned instructionSize
= 10; // REX.W MOV IMM64
2127 const int rexBytes
= 1;
2128 const int opcodeBytes
= 1;
2129 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
2130 ptr
[0] = PRE_REX
| (1 << 3) | (dst
>> 3);
2131 ptr
[1] = OP_MOV_EAXIv
| (dst
& 7);
2138 for (unsigned i
= rexBytes
+ opcodeBytes
; i
< instructionSize
; ++i
)
2139 ptr
[i
] = u
.asBytes
[i
- rexBytes
- opcodeBytes
];
2142 static void revertJumpTo_movl_i32r(void* instructionStart
, int32_t imm
, RegisterID dst
)
2144 // We only revert jumps on inline caches, and inline caches always use the scratch register (r11).
2145 // FIXME: If the above is ever false then we need to make this smarter with respect to emitting
2147 ASSERT(dst
== X86Registers::r11
);
2148 const unsigned instructionSize
= 6; // REX MOV IMM32
2149 const int rexBytes
= 1;
2150 const int opcodeBytes
= 1;
2151 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
2152 ptr
[0] = PRE_REX
| (dst
>> 3);
2153 ptr
[1] = OP_MOV_EAXIv
| (dst
& 7);
2160 for (unsigned i
= rexBytes
+ opcodeBytes
; i
< instructionSize
; ++i
)
2161 ptr
[i
] = u
.asBytes
[i
- rexBytes
- opcodeBytes
];
2165 static void revertJumpTo_cmpl_ir_force32(void* instructionStart
, int32_t imm
, RegisterID dst
)
2167 const int opcodeBytes
= 1;
2168 const int modRMBytes
= 1;
2169 ASSERT(opcodeBytes
+ modRMBytes
<= maxJumpReplacementSize());
2170 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
2171 ptr
[0] = OP_GROUP1_EvIz
;
2172 ptr
[1] = (X86InstructionFormatter::ModRmRegister
<< 6) | (GROUP1_OP_CMP
<< 3) | dst
;
2178 for (unsigned i
= opcodeBytes
+ modRMBytes
; i
< static_cast<unsigned>(maxJumpReplacementSize()); ++i
)
2179 ptr
[i
] = u
.asBytes
[i
- opcodeBytes
- modRMBytes
];
2182 static void revertJumpTo_cmpl_im_force32(void* instructionStart
, int32_t imm
, int offset
, RegisterID dst
)
2184 ASSERT_UNUSED(offset
, !offset
);
2185 const int opcodeBytes
= 1;
2186 const int modRMBytes
= 1;
2187 ASSERT(opcodeBytes
+ modRMBytes
<= maxJumpReplacementSize());
2188 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
2189 ptr
[0] = OP_GROUP1_EvIz
;
2190 ptr
[1] = (X86InstructionFormatter::ModRmMemoryNoDisp
<< 6) | (GROUP1_OP_CMP
<< 3) | dst
;
2196 for (unsigned i
= opcodeBytes
+ modRMBytes
; i
< static_cast<unsigned>(maxJumpReplacementSize()); ++i
)
2197 ptr
[i
] = u
.asBytes
[i
- opcodeBytes
- modRMBytes
];
2200 static void replaceWithLoad(void* instructionStart
)
2202 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
2204 if ((*ptr
& ~15) == PRE_REX
)
2214 RELEASE_ASSERT_NOT_REACHED();
2218 static void replaceWithAddressComputation(void* instructionStart
)
2220 uint8_t* ptr
= reinterpret_cast<uint8_t*>(instructionStart
);
2222 if ((*ptr
& ~15) == PRE_REX
)
2232 RELEASE_ASSERT_NOT_REACHED();
2236 static unsigned getCallReturnOffset(AssemblerLabel call
)
2238 ASSERT(call
.isSet());
2239 return call
.m_offset
;
2242 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
2244 ASSERT(label
.isSet());
2245 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
);
2248 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
2250 return b
.m_offset
- a
.m_offset
;
2253 unsigned debugOffset() { return m_formatter
.debugOffset(); }
2257 m_formatter
.oneByteOp(OP_NOP
);
2260 static void fillNops(void* base
, size_t size
)
2263 static const uint8_t nops
[10][10] = {
2271 {0x0f, 0x1f, 0x40, 0x08},
2272 // nopl 8(%[re]ax,%[re]ax,1)
2273 {0x0f, 0x1f, 0x44, 0x00, 0x08},
2274 // nopw 8(%[re]ax,%[re]ax,1)
2275 {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x08},
2276 // nopl 512(%[re]ax)
2277 {0x0f, 0x1f, 0x80, 0x00, 0x02, 0x00, 0x00},
2278 // nopl 512(%[re]ax,%[re]ax,1)
2279 {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
2280 // nopw 512(%[re]ax,%[re]ax,1)
2281 {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
2282 // nopw %cs:512(%[re]ax,%[re]ax,1)
2283 {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}
2286 uint8_t* where
= reinterpret_cast<uint8_t*>(base
);
2288 unsigned nopSize
= static_cast<unsigned>(std::min
<size_t>(size
, 15));
2289 unsigned numPrefixes
= nopSize
<= 10 ? 0 : nopSize
- 10;
2290 for (unsigned i
= 0; i
!= numPrefixes
; ++i
)
2293 unsigned nopRest
= nopSize
- numPrefixes
;
2294 for (unsigned i
= 0; i
!= nopRest
; ++i
)
2295 *where
++ = nops
[nopRest
-1][i
];
2300 memset(base
, OP_NOP
, size
);
2304 // This is a no-op on x86
2305 ALWAYS_INLINE
static void cacheFlush(void*, size_t) { }
2309 static void setPointer(void* where
, void* value
)
2311 reinterpret_cast<void**>(where
)[-1] = value
;
2314 static void setInt32(void* where
, int32_t value
)
2316 reinterpret_cast<int32_t*>(where
)[-1] = value
;
2319 static void setInt8(void* where
, int8_t value
)
2321 reinterpret_cast<int8_t*>(where
)[-1] = value
;
2324 static void setRel32(void* from
, void* to
)
2326 intptr_t offset
= reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
);
2327 ASSERT(offset
== static_cast<int32_t>(offset
));
2329 setInt32(from
, offset
);
2332 class X86InstructionFormatter
{
2334 static const int maxInstructionSize
= 16;
2345 // Legacy prefix bytes:
2347 // These are emmitted prior to the instruction.
2349 void prefix(OneByteOpcodeID pre
)
2351 m_buffer
.putByte(pre
);
2354 // Word-sized operands / no operand instruction formatters.
2356 // In addition to the opcode, the following operand permutations are supported:
2357 // * None - instruction takes no operands.
2358 // * One register - the low three bits of the RegisterID are added into the opcode.
2359 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
2360 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
2361 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
2363 // For 32-bit x86 targets, the address operand may also be provided as a void*.
2364 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
2366 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
2368 void oneByteOp(OneByteOpcodeID opcode
)
2370 m_buffer
.ensureSpace(maxInstructionSize
);
2371 m_buffer
.putByteUnchecked(opcode
);
2374 void oneByteOp(OneByteOpcodeID opcode
, RegisterID reg
)
2376 m_buffer
.ensureSpace(maxInstructionSize
);
2377 emitRexIfNeeded(0, 0, reg
);
2378 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
2381 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
2383 m_buffer
.ensureSpace(maxInstructionSize
);
2384 emitRexIfNeeded(reg
, 0, rm
);
2385 m_buffer
.putByteUnchecked(opcode
);
2386 registerModRM(reg
, rm
);
2389 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2391 m_buffer
.ensureSpace(maxInstructionSize
);
2392 emitRexIfNeeded(reg
, 0, base
);
2393 m_buffer
.putByteUnchecked(opcode
);
2394 memoryModRM(reg
, base
, offset
);
2397 void oneByteOp_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2399 m_buffer
.ensureSpace(maxInstructionSize
);
2400 emitRexIfNeeded(reg
, 0, base
);
2401 m_buffer
.putByteUnchecked(opcode
);
2402 memoryModRM_disp32(reg
, base
, offset
);
2405 void oneByteOp_disp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2407 m_buffer
.ensureSpace(maxInstructionSize
);
2408 emitRexIfNeeded(reg
, 0, base
);
2409 m_buffer
.putByteUnchecked(opcode
);
2410 memoryModRM_disp8(reg
, base
, offset
);
2413 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2415 m_buffer
.ensureSpace(maxInstructionSize
);
2416 emitRexIfNeeded(reg
, index
, base
);
2417 m_buffer
.putByteUnchecked(opcode
);
2418 memoryModRM(reg
, base
, index
, scale
, offset
);
2422 void oneByteOp(OneByteOpcodeID opcode
, int reg
, const void* address
)
2424 m_buffer
.ensureSpace(maxInstructionSize
);
2425 m_buffer
.putByteUnchecked(opcode
);
2426 memoryModRM(reg
, address
);
2430 void twoByteOp(TwoByteOpcodeID opcode
)
2432 m_buffer
.ensureSpace(maxInstructionSize
);
2433 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2434 m_buffer
.putByteUnchecked(opcode
);
2437 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
2439 m_buffer
.ensureSpace(maxInstructionSize
);
2440 emitRexIfNeeded(reg
, 0, rm
);
2441 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2442 m_buffer
.putByteUnchecked(opcode
);
2443 registerModRM(reg
, rm
);
2446 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2448 m_buffer
.ensureSpace(maxInstructionSize
);
2449 emitRexIfNeeded(reg
, 0, base
);
2450 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2451 m_buffer
.putByteUnchecked(opcode
);
2452 memoryModRM(reg
, base
, offset
);
2455 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2457 m_buffer
.ensureSpace(maxInstructionSize
);
2458 emitRexIfNeeded(reg
, index
, base
);
2459 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2460 m_buffer
.putByteUnchecked(opcode
);
2461 memoryModRM(reg
, base
, index
, scale
, offset
);
2465 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, const void* address
)
2467 m_buffer
.ensureSpace(maxInstructionSize
);
2468 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2469 m_buffer
.putByteUnchecked(opcode
);
2470 memoryModRM(reg
, address
);
2474 void threeByteOp(ThreeByteOpcodeID opcode
)
2476 m_buffer
.ensureSpace(maxInstructionSize
);
2477 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2478 m_buffer
.putByteUnchecked(OP2_3BYTE_ESCAPE
);
2479 m_buffer
.putByteUnchecked(opcode
);
2483 // Quad-word-sized operands:
2485 // Used to format 64-bit operantions, planting a REX.w prefix.
2486 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
2487 // the normal (non-'64'-postfixed) formatters should be used.
2489 void oneByteOp64(OneByteOpcodeID opcode
)
2491 m_buffer
.ensureSpace(maxInstructionSize
);
2493 m_buffer
.putByteUnchecked(opcode
);
2496 void oneByteOp64(OneByteOpcodeID opcode
, RegisterID reg
)
2498 m_buffer
.ensureSpace(maxInstructionSize
);
2499 emitRexW(0, 0, reg
);
2500 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
2503 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
2505 m_buffer
.ensureSpace(maxInstructionSize
);
2506 emitRexW(reg
, 0, rm
);
2507 m_buffer
.putByteUnchecked(opcode
);
2508 registerModRM(reg
, rm
);
2511 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2513 m_buffer
.ensureSpace(maxInstructionSize
);
2514 emitRexW(reg
, 0, base
);
2515 m_buffer
.putByteUnchecked(opcode
);
2516 memoryModRM(reg
, base
, offset
);
2519 void oneByteOp64_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2521 m_buffer
.ensureSpace(maxInstructionSize
);
2522 emitRexW(reg
, 0, base
);
2523 m_buffer
.putByteUnchecked(opcode
);
2524 memoryModRM_disp32(reg
, base
, offset
);
2527 void oneByteOp64_disp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2529 m_buffer
.ensureSpace(maxInstructionSize
);
2530 emitRexW(reg
, 0, base
);
2531 m_buffer
.putByteUnchecked(opcode
);
2532 memoryModRM_disp8(reg
, base
, offset
);
2535 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2537 m_buffer
.ensureSpace(maxInstructionSize
);
2538 emitRexW(reg
, index
, base
);
2539 m_buffer
.putByteUnchecked(opcode
);
2540 memoryModRM(reg
, base
, index
, scale
, offset
);
2543 void twoByteOp64(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
2545 m_buffer
.ensureSpace(maxInstructionSize
);
2546 emitRexW(reg
, 0, rm
);
2547 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2548 m_buffer
.putByteUnchecked(opcode
);
2549 registerModRM(reg
, rm
);
2555 // These methods format byte operations. Byte operations differ from the normal
2556 // formatters in the circumstances under which they will decide to emit REX prefixes.
2557 // These should be used where any register operand signifies a byte register.
2559 // The disctinction is due to the handling of register numbers in the range 4..7 on
2560 // x86-64. These register numbers may either represent the second byte of the first
2561 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
2563 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
2564 // be accessed where a REX prefix is present), these are likely best treated as
2565 // deprecated. In order to ensure the correct registers spl..dil are selected a
2566 // REX prefix will be emitted for any byte register operand in the range 4..15.
2568 // These formatters may be used in instructions where a mix of operand sizes, in which
2569 // case an unnecessary REX will be emitted, for example:
2571 // In this case a REX will be planted since edi is 7 (and were this a byte operand
2572 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
2573 // be silently ignored by the processor.
2575 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
2576 // is provided to check byte register operands.
2578 void oneByteOp8(OneByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
2580 m_buffer
.ensureSpace(maxInstructionSize
);
2581 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
2582 m_buffer
.putByteUnchecked(opcode
);
2583 registerModRM(groupOp
, rm
);
2586 void oneByteOp8(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
2588 m_buffer
.ensureSpace(maxInstructionSize
);
2589 emitRexIf(byteRegRequiresRex(reg
) || byteRegRequiresRex(rm
), reg
, 0, rm
);
2590 m_buffer
.putByteUnchecked(opcode
);
2591 registerModRM(reg
, rm
);
2594 void oneByteOp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
2596 m_buffer
.ensureSpace(maxInstructionSize
);
2597 emitRexIf(byteRegRequiresRex(reg
) || byteRegRequiresRex(base
), reg
, 0, base
);
2598 m_buffer
.putByteUnchecked(opcode
);
2599 memoryModRM(reg
, base
, offset
);
2602 void oneByteOp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2604 m_buffer
.ensureSpace(maxInstructionSize
);
2605 emitRexIf(byteRegRequiresRex(reg
) || regRequiresRex(index
) || regRequiresRex(base
), reg
, index
, base
);
2606 m_buffer
.putByteUnchecked(opcode
);
2607 memoryModRM(reg
, base
, index
, scale
, offset
);
2610 void twoByteOp8(TwoByteOpcodeID opcode
, RegisterID reg
, RegisterID rm
)
2612 m_buffer
.ensureSpace(maxInstructionSize
);
2613 emitRexIf(byteRegRequiresRex(reg
)|byteRegRequiresRex(rm
), reg
, 0, rm
);
2614 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2615 m_buffer
.putByteUnchecked(opcode
);
2616 registerModRM(reg
, rm
);
2619 void twoByteOp8(TwoByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
2621 m_buffer
.ensureSpace(maxInstructionSize
);
2622 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
2623 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
2624 m_buffer
.putByteUnchecked(opcode
);
2625 registerModRM(groupOp
, rm
);
2630 // An immedaite should be appended where appropriate after an op has been emitted.
2631 // The writes are unchecked since the opcode formatters above will have ensured space.
2633 void immediate8(int imm
)
2635 m_buffer
.putByteUnchecked(imm
);
2638 void immediate16(int imm
)
2640 m_buffer
.putShortUnchecked(imm
);
2643 void immediate32(int imm
)
2645 m_buffer
.putIntUnchecked(imm
);
2648 void immediate64(int64_t imm
)
2650 m_buffer
.putInt64Unchecked(imm
);
2653 AssemblerLabel
immediateRel32()
2655 m_buffer
.putIntUnchecked(0);
2659 // Administrative methods:
2661 size_t codeSize() const { return m_buffer
.codeSize(); }
2662 AssemblerLabel
label() const { return m_buffer
.label(); }
2663 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
2664 void* data() const { return m_buffer
.data(); }
2666 unsigned debugOffset() { return m_buffer
.debugOffset(); }
2670 // Internals; ModRm and REX formatters.
2672 static const RegisterID noBase
= X86Registers::ebp
;
2673 static const RegisterID hasSib
= X86Registers::esp
;
2674 static const RegisterID noIndex
= X86Registers::esp
;
2676 static const RegisterID noBase2
= X86Registers::r13
;
2677 static const RegisterID hasSib2
= X86Registers::r12
;
2679 // Registers r8 & above require a REX prefixe.
2680 inline bool regRequiresRex(int reg
)
2682 return (reg
>= X86Registers::r8
);
2685 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
2686 inline bool byteRegRequiresRex(int reg
)
2688 return (reg
>= X86Registers::esp
);
2691 // Format a REX prefix byte.
2692 inline void emitRex(bool w
, int r
, int x
, int b
)
2697 m_buffer
.putByteUnchecked(PRE_REX
| ((int)w
<< 3) | ((r
>>3)<<2) | ((x
>>3)<<1) | (b
>>3));
2700 // Used to plant a REX byte with REX.w set (for 64-bit operations).
2701 inline void emitRexW(int r
, int x
, int b
)
2703 emitRex(true, r
, x
, b
);
2706 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
2707 // regRequiresRex() to check other registers (i.e. address base & index).
2708 inline void emitRexIf(bool condition
, int r
, int x
, int b
)
2710 if (condition
) emitRex(false, r
, x
, b
);
2713 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
2714 inline void emitRexIfNeeded(int r
, int x
, int b
)
2716 emitRexIf(regRequiresRex(r
) || regRequiresRex(x
) || regRequiresRex(b
), r
, x
, b
);
2719 // No REX prefix bytes on 32-bit x86.
2720 inline bool regRequiresRex(int) { return false; }
2721 inline bool byteRegRequiresRex(int) { return false; }
2722 inline void emitRexIf(bool, int, int, int) {}
2723 inline void emitRexIfNeeded(int, int, int) {}
2726 void putModRm(ModRmMode mode
, int reg
, RegisterID rm
)
2728 m_buffer
.putByteUnchecked((mode
<< 6) | ((reg
& 7) << 3) | (rm
& 7));
2731 void putModRmSib(ModRmMode mode
, int reg
, RegisterID base
, RegisterID index
, int scale
)
2733 ASSERT(mode
!= ModRmRegister
);
2735 putModRm(mode
, reg
, hasSib
);
2736 m_buffer
.putByteUnchecked((scale
<< 6) | ((index
& 7) << 3) | (base
& 7));
2739 void registerModRM(int reg
, RegisterID rm
)
2741 putModRm(ModRmRegister
, reg
, rm
);
2744 void memoryModRM(int reg
, RegisterID base
, int offset
)
2746 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2748 if ((base
== hasSib
) || (base
== hasSib2
)) {
2750 if (base
== hasSib
) {
2752 if (!offset
) // No need to check if the base is noBase, since we know it is hasSib!
2753 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, noIndex
, 0);
2754 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2755 putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0);
2756 m_buffer
.putByteUnchecked(offset
);
2758 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
2759 m_buffer
.putIntUnchecked(offset
);
2763 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
2765 if (!offset
&& (base
!= noBase
))
2767 putModRm(ModRmMemoryNoDisp
, reg
, base
);
2768 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2769 putModRm(ModRmMemoryDisp8
, reg
, base
);
2770 m_buffer
.putByteUnchecked(offset
);
2772 putModRm(ModRmMemoryDisp32
, reg
, base
);
2773 m_buffer
.putIntUnchecked(offset
);
2778 void memoryModRM_disp8(int reg
, RegisterID base
, int offset
)
2780 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2781 ASSERT(CAN_SIGN_EXTEND_8_32(offset
));
2783 if ((base
== hasSib
) || (base
== hasSib2
)) {
2785 if (base
== hasSib
) {
2787 putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0);
2788 m_buffer
.putByteUnchecked(offset
);
2790 putModRm(ModRmMemoryDisp8
, reg
, base
);
2791 m_buffer
.putByteUnchecked(offset
);
2795 void memoryModRM_disp32(int reg
, RegisterID base
, int offset
)
2797 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2799 if ((base
== hasSib
) || (base
== hasSib2
)) {
2801 if (base
== hasSib
) {
2803 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
2804 m_buffer
.putIntUnchecked(offset
);
2806 putModRm(ModRmMemoryDisp32
, reg
, base
);
2807 m_buffer
.putIntUnchecked(offset
);
2811 void memoryModRM(int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2813 ASSERT(index
!= noIndex
);
2816 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
2818 if (!offset
&& (base
!= noBase
))
2820 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, index
, scale
);
2821 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2822 putModRmSib(ModRmMemoryDisp8
, reg
, base
, index
, scale
);
2823 m_buffer
.putByteUnchecked(offset
);
2825 putModRmSib(ModRmMemoryDisp32
, reg
, base
, index
, scale
);
2826 m_buffer
.putIntUnchecked(offset
);
2831 void memoryModRM(int reg
, const void* address
)
2833 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2834 putModRm(ModRmMemoryNoDisp
, reg
, noBase
);
2835 m_buffer
.putIntUnchecked(reinterpret_cast<int32_t>(address
));
2840 AssemblerBuffer m_buffer
;
2842 int m_indexOfLastWatchpoint
;
2843 int m_indexOfTailOfLastWatchpoint
;
2848 #endif // ENABLE(ASSEMBLER) && CPU(X86)
2850 #endif // X86Assembler_h