2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 #ifndef X86Assembler_h
27 #define X86Assembler_h
29 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
31 #include "AssemblerBuffer.h"
33 #include <wtf/Assertions.h>
34 #include <wtf/Vector.h>
38 inline bool CAN_SIGN_EXTEND_8_32(int32_t value
) { return value
== (int32_t)(signed char)value
; }
40 namespace X86Registers
{
77 typedef X86Registers::RegisterID RegisterID
;
78 typedef X86Registers::XMMRegisterID XMMRegisterID
;
79 typedef XMMRegisterID FPRegisterID
;
99 ConditionC
= ConditionB
,
100 ConditionNC
= ConditionAE
,
109 OP_2BYTE_ESCAPE
= 0x0F,
114 PRE_PREDICT_BRANCH_NOT_TAKEN
= 0x2E,
125 OP_MOVSXD_GvEv
= 0x63,
127 PRE_OPERAND_SIZE
= 0x66,
130 OP_IMUL_GvEvIz
= 0x69,
131 OP_GROUP1_EbIb
= 0x80,
132 OP_GROUP1_EvIz
= 0x81,
133 OP_GROUP1_EvIb
= 0x83,
140 OP_GROUP1A_Ev
= 0x8F,
146 OP_GROUP2_EvIb
= 0xC1,
148 OP_GROUP11_EvIz
= 0xC7,
150 OP_GROUP2_Ev1
= 0xD1,
151 OP_GROUP2_EvCL
= 0xD3,
152 OP_CALL_rel32
= 0xE8,
156 OP_GROUP3_EbIb
= 0xF6,
158 OP_GROUP3_EvIz
= 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.
163 OP2_MOVSD_VsdWsd
= 0x10,
164 OP2_MOVSD_WsdVsd
= 0x11,
165 OP2_CVTSI2SD_VsdEd
= 0x2A,
166 OP2_CVTTSD2SI_GdWsd
= 0x2C,
167 OP2_UCOMISD_VsdWsd
= 0x2E,
168 OP2_ADDSD_VsdWsd
= 0x58,
169 OP2_MULSD_VsdWsd
= 0x59,
170 OP2_SUBSD_VsdWsd
= 0x5C,
171 OP2_DIVSD_VsdWsd
= 0x5E,
172 OP2_SQRTSD_VsdWsd
= 0x51,
173 OP2_XORPD_VpdWpd
= 0x57,
174 OP2_MOVD_VdEd
= 0x6E,
175 OP2_MOVD_EdVd
= 0x7E,
176 OP2_JCC_rel32
= 0x80,
178 OP2_IMUL_GvEv
= 0xAF,
179 OP2_MOVZX_GvEb
= 0xB6,
180 OP2_MOVZX_GvEw
= 0xB7,
181 OP2_PEXTRW_GdUdIb
= 0xC5,
184 TwoByteOpcodeID
jccRel32(Condition cond
)
186 return (TwoByteOpcodeID
)(OP2_JCC_rel32
+ cond
);
189 TwoByteOpcodeID
setccOpcode(Condition cond
)
191 return (TwoByteOpcodeID
)(OP_SETCC
+ cond
);
221 class X86InstructionFormatter
;
230 void push_r(RegisterID reg
)
232 m_formatter
.oneByteOp(OP_PUSH_EAX
, reg
);
235 void pop_r(RegisterID reg
)
237 m_formatter
.oneByteOp(OP_POP_EAX
, reg
);
240 void push_i32(int imm
)
242 m_formatter
.oneByteOp(OP_PUSH_Iz
);
243 m_formatter
.immediate32(imm
);
246 void push_m(int offset
, RegisterID base
)
248 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_PUSH
, base
, offset
);
251 void pop_m(int offset
, RegisterID base
)
253 m_formatter
.oneByteOp(OP_GROUP1A_Ev
, GROUP1A_OP_POP
, base
, offset
);
256 // Arithmetic operations:
259 void adcl_im(int imm
, const void* addr
)
261 if (CAN_SIGN_EXTEND_8_32(imm
)) {
262 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADC
, addr
);
263 m_formatter
.immediate8(imm
);
265 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADC
, addr
);
266 m_formatter
.immediate32(imm
);
271 void addl_rr(RegisterID src
, RegisterID dst
)
273 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, dst
);
276 void addl_mr(int offset
, RegisterID base
, RegisterID dst
)
278 m_formatter
.oneByteOp(OP_ADD_GvEv
, dst
, base
, offset
);
281 void addl_rm(RegisterID src
, int offset
, RegisterID base
)
283 m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, base
, offset
);
286 void addl_ir(int imm
, RegisterID dst
)
288 if (CAN_SIGN_EXTEND_8_32(imm
)) {
289 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
290 m_formatter
.immediate8(imm
);
292 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
293 m_formatter
.immediate32(imm
);
297 void addl_im(int imm
, int offset
, RegisterID base
)
299 if (CAN_SIGN_EXTEND_8_32(imm
)) {
300 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
301 m_formatter
.immediate8(imm
);
303 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
304 m_formatter
.immediate32(imm
);
309 void addq_rr(RegisterID src
, RegisterID dst
)
311 m_formatter
.oneByteOp64(OP_ADD_EvGv
, src
, dst
);
314 void addq_ir(int imm
, RegisterID dst
)
316 if (CAN_SIGN_EXTEND_8_32(imm
)) {
317 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
);
318 m_formatter
.immediate8(imm
);
320 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
);
321 m_formatter
.immediate32(imm
);
325 void addq_im(int imm
, int offset
, RegisterID base
)
327 if (CAN_SIGN_EXTEND_8_32(imm
)) {
328 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
);
329 m_formatter
.immediate8(imm
);
331 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
);
332 m_formatter
.immediate32(imm
);
336 void addl_im(int imm
, const void* addr
)
338 if (CAN_SIGN_EXTEND_8_32(imm
)) {
339 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, addr
);
340 m_formatter
.immediate8(imm
);
342 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, addr
);
343 m_formatter
.immediate32(imm
);
348 void andl_rr(RegisterID src
, RegisterID dst
)
350 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, dst
);
353 void andl_mr(int offset
, RegisterID base
, RegisterID dst
)
355 m_formatter
.oneByteOp(OP_AND_GvEv
, dst
, base
, offset
);
358 void andl_rm(RegisterID src
, int offset
, RegisterID base
)
360 m_formatter
.oneByteOp(OP_AND_EvGv
, src
, base
, offset
);
363 void andl_ir(int imm
, RegisterID dst
)
365 if (CAN_SIGN_EXTEND_8_32(imm
)) {
366 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
367 m_formatter
.immediate8(imm
);
369 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
370 m_formatter
.immediate32(imm
);
374 void andl_im(int imm
, int offset
, RegisterID base
)
376 if (CAN_SIGN_EXTEND_8_32(imm
)) {
377 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, base
, offset
);
378 m_formatter
.immediate8(imm
);
380 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, base
, offset
);
381 m_formatter
.immediate32(imm
);
386 void andq_rr(RegisterID src
, RegisterID dst
)
388 m_formatter
.oneByteOp64(OP_AND_EvGv
, src
, dst
);
391 void andq_ir(int imm
, RegisterID dst
)
393 if (CAN_SIGN_EXTEND_8_32(imm
)) {
394 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
);
395 m_formatter
.immediate8(imm
);
397 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
);
398 m_formatter
.immediate32(imm
);
402 void andl_im(int imm
, const void* addr
)
404 if (CAN_SIGN_EXTEND_8_32(imm
)) {
405 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, addr
);
406 m_formatter
.immediate8(imm
);
408 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, addr
);
409 m_formatter
.immediate32(imm
);
414 void negl_r(RegisterID dst
)
416 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, dst
);
419 void negl_m(int offset
, RegisterID base
)
421 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, base
, offset
);
424 void notl_r(RegisterID dst
)
426 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, dst
);
429 void notl_m(int offset
, RegisterID base
)
431 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, base
, offset
);
434 void orl_rr(RegisterID src
, RegisterID dst
)
436 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, dst
);
439 void orl_mr(int offset
, RegisterID base
, RegisterID dst
)
441 m_formatter
.oneByteOp(OP_OR_GvEv
, dst
, base
, offset
);
444 void orl_rm(RegisterID src
, int offset
, RegisterID base
)
446 m_formatter
.oneByteOp(OP_OR_EvGv
, src
, base
, offset
);
449 void orl_ir(int imm
, RegisterID dst
)
451 if (CAN_SIGN_EXTEND_8_32(imm
)) {
452 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
453 m_formatter
.immediate8(imm
);
455 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
456 m_formatter
.immediate32(imm
);
460 void orl_im(int imm
, int offset
, RegisterID base
)
462 if (CAN_SIGN_EXTEND_8_32(imm
)) {
463 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, base
, offset
);
464 m_formatter
.immediate8(imm
);
466 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, base
, offset
);
467 m_formatter
.immediate32(imm
);
472 void orq_rr(RegisterID src
, RegisterID dst
)
474 m_formatter
.oneByteOp64(OP_OR_EvGv
, src
, dst
);
477 void orq_ir(int imm
, RegisterID dst
)
479 if (CAN_SIGN_EXTEND_8_32(imm
)) {
480 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
);
481 m_formatter
.immediate8(imm
);
483 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
);
484 m_formatter
.immediate32(imm
);
488 void orl_im(int imm
, const void* addr
)
490 if (CAN_SIGN_EXTEND_8_32(imm
)) {
491 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, addr
);
492 m_formatter
.immediate8(imm
);
494 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, addr
);
495 m_formatter
.immediate32(imm
);
500 void subl_rr(RegisterID src
, RegisterID dst
)
502 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, dst
);
505 void subl_mr(int offset
, RegisterID base
, RegisterID dst
)
507 m_formatter
.oneByteOp(OP_SUB_GvEv
, dst
, base
, offset
);
510 void subl_rm(RegisterID src
, int offset
, RegisterID base
)
512 m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, base
, offset
);
515 void subl_ir(int imm
, RegisterID dst
)
517 if (CAN_SIGN_EXTEND_8_32(imm
)) {
518 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
519 m_formatter
.immediate8(imm
);
521 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
522 m_formatter
.immediate32(imm
);
526 void subl_im(int imm
, int offset
, RegisterID base
)
528 if (CAN_SIGN_EXTEND_8_32(imm
)) {
529 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, base
, offset
);
530 m_formatter
.immediate8(imm
);
532 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, base
, offset
);
533 m_formatter
.immediate32(imm
);
538 void subq_rr(RegisterID src
, RegisterID dst
)
540 m_formatter
.oneByteOp64(OP_SUB_EvGv
, src
, dst
);
543 void subq_ir(int imm
, RegisterID dst
)
545 if (CAN_SIGN_EXTEND_8_32(imm
)) {
546 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
);
547 m_formatter
.immediate8(imm
);
549 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
);
550 m_formatter
.immediate32(imm
);
554 void subl_im(int imm
, const void* addr
)
556 if (CAN_SIGN_EXTEND_8_32(imm
)) {
557 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, addr
);
558 m_formatter
.immediate8(imm
);
560 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, addr
);
561 m_formatter
.immediate32(imm
);
566 void xorl_rr(RegisterID src
, RegisterID dst
)
568 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, dst
);
571 void xorl_mr(int offset
, RegisterID base
, RegisterID dst
)
573 m_formatter
.oneByteOp(OP_XOR_GvEv
, dst
, base
, offset
);
576 void xorl_rm(RegisterID src
, int offset
, RegisterID base
)
578 m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, base
, offset
);
581 void xorl_im(int imm
, int offset
, RegisterID base
)
583 if (CAN_SIGN_EXTEND_8_32(imm
)) {
584 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, base
, offset
);
585 m_formatter
.immediate8(imm
);
587 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, base
, offset
);
588 m_formatter
.immediate32(imm
);
592 void xorl_ir(int imm
, RegisterID dst
)
594 if (CAN_SIGN_EXTEND_8_32(imm
)) {
595 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
596 m_formatter
.immediate8(imm
);
598 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
599 m_formatter
.immediate32(imm
);
604 void xorq_rr(RegisterID src
, RegisterID dst
)
606 m_formatter
.oneByteOp64(OP_XOR_EvGv
, src
, dst
);
609 void xorq_ir(int imm
, RegisterID dst
)
611 if (CAN_SIGN_EXTEND_8_32(imm
)) {
612 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
);
613 m_formatter
.immediate8(imm
);
615 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
);
616 m_formatter
.immediate32(imm
);
621 void sarl_i8r(int imm
, RegisterID dst
)
624 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
626 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
627 m_formatter
.immediate8(imm
);
631 void sarl_CLr(RegisterID dst
)
633 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
636 void shrl_i8r(int imm
, RegisterID dst
)
639 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHR
, dst
);
641 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHR
, dst
);
642 m_formatter
.immediate8(imm
);
646 void shrl_CLr(RegisterID dst
)
648 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHR
, dst
);
651 void shll_i8r(int imm
, RegisterID dst
)
654 m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHL
, dst
);
656 m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHL
, dst
);
657 m_formatter
.immediate8(imm
);
661 void shll_CLr(RegisterID dst
)
663 m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHL
, dst
);
667 void sarq_CLr(RegisterID dst
)
669 m_formatter
.oneByteOp64(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
);
672 void sarq_i8r(int imm
, RegisterID dst
)
675 m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
);
677 m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
);
678 m_formatter
.immediate8(imm
);
683 void imull_rr(RegisterID src
, RegisterID dst
)
685 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, src
);
688 void imull_mr(int offset
, RegisterID base
, RegisterID dst
)
690 m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, base
, offset
);
693 void imull_i32r(RegisterID src
, int32_t value
, RegisterID dst
)
695 m_formatter
.oneByteOp(OP_IMUL_GvEvIz
, dst
, src
);
696 m_formatter
.immediate32(value
);
699 void idivl_r(RegisterID dst
)
701 m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_IDIV
, dst
);
706 void cmpl_rr(RegisterID src
, RegisterID dst
)
708 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, dst
);
711 void cmpl_rm(RegisterID src
, int offset
, RegisterID base
)
713 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, offset
);
716 void cmpl_mr(int offset
, RegisterID base
, RegisterID src
)
718 m_formatter
.oneByteOp(OP_CMP_GvEv
, src
, base
, offset
);
721 void cmpl_ir(int imm
, RegisterID dst
)
723 if (CAN_SIGN_EXTEND_8_32(imm
)) {
724 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
725 m_formatter
.immediate8(imm
);
727 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
728 m_formatter
.immediate32(imm
);
732 void cmpl_ir_force32(int imm
, RegisterID dst
)
734 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
735 m_formatter
.immediate32(imm
);
738 void cmpl_im(int imm
, int offset
, RegisterID base
)
740 if (CAN_SIGN_EXTEND_8_32(imm
)) {
741 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
742 m_formatter
.immediate8(imm
);
744 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
745 m_formatter
.immediate32(imm
);
749 void cmpb_im(int imm
, int offset
, RegisterID base
)
751 m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, base
, offset
);
752 m_formatter
.immediate8(imm
);
755 void cmpb_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
757 m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
758 m_formatter
.immediate8(imm
);
761 void cmpl_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
763 if (CAN_SIGN_EXTEND_8_32(imm
)) {
764 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
765 m_formatter
.immediate8(imm
);
767 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
768 m_formatter
.immediate32(imm
);
772 void cmpl_im_force32(int imm
, int offset
, RegisterID base
)
774 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
775 m_formatter
.immediate32(imm
);
779 void cmpq_rr(RegisterID src
, RegisterID dst
)
781 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, dst
);
784 void cmpq_rm(RegisterID src
, int offset
, RegisterID base
)
786 m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, base
, offset
);
789 void cmpq_mr(int offset
, RegisterID base
, RegisterID src
)
791 m_formatter
.oneByteOp64(OP_CMP_GvEv
, src
, base
, offset
);
794 void cmpq_ir(int imm
, RegisterID dst
)
796 if (CAN_SIGN_EXTEND_8_32(imm
)) {
797 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
);
798 m_formatter
.immediate8(imm
);
800 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
);
801 m_formatter
.immediate32(imm
);
805 void cmpq_im(int imm
, int offset
, RegisterID base
)
807 if (CAN_SIGN_EXTEND_8_32(imm
)) {
808 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
);
809 m_formatter
.immediate8(imm
);
811 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
);
812 m_formatter
.immediate32(imm
);
816 void cmpq_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
818 if (CAN_SIGN_EXTEND_8_32(imm
)) {
819 m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
820 m_formatter
.immediate8(imm
);
822 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
823 m_formatter
.immediate32(imm
);
827 void cmpl_rm(RegisterID reg
, const void* addr
)
829 m_formatter
.oneByteOp(OP_CMP_EvGv
, reg
, addr
);
832 void cmpl_im(int imm
, const void* addr
)
834 if (CAN_SIGN_EXTEND_8_32(imm
)) {
835 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, addr
);
836 m_formatter
.immediate8(imm
);
838 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, addr
);
839 m_formatter
.immediate32(imm
);
844 void cmpw_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
846 m_formatter
.prefix(PRE_OPERAND_SIZE
);
847 m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, index
, scale
, offset
);
850 void cmpw_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
852 if (CAN_SIGN_EXTEND_8_32(imm
)) {
853 m_formatter
.prefix(PRE_OPERAND_SIZE
);
854 m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
855 m_formatter
.immediate8(imm
);
857 m_formatter
.prefix(PRE_OPERAND_SIZE
);
858 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
);
859 m_formatter
.immediate16(imm
);
863 void testl_rr(RegisterID src
, RegisterID dst
)
865 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
868 void testl_i32r(int imm
, RegisterID dst
)
870 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
871 m_formatter
.immediate32(imm
);
874 void testl_i32m(int imm
, int offset
, RegisterID base
)
876 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
877 m_formatter
.immediate32(imm
);
880 void testb_rr(RegisterID src
, RegisterID dst
)
882 m_formatter
.oneByteOp(OP_TEST_EbGb
, src
, dst
);
885 void testb_im(int imm
, int offset
, RegisterID base
)
887 m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, base
, offset
);
888 m_formatter
.immediate8(imm
);
891 void testb_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
893 m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
894 m_formatter
.immediate8(imm
);
897 void testl_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
899 m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
900 m_formatter
.immediate32(imm
);
904 void testq_rr(RegisterID src
, RegisterID dst
)
906 m_formatter
.oneByteOp64(OP_TEST_EvGv
, src
, dst
);
909 void testq_i32r(int imm
, RegisterID dst
)
911 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
);
912 m_formatter
.immediate32(imm
);
915 void testq_i32m(int imm
, int offset
, RegisterID base
)
917 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
);
918 m_formatter
.immediate32(imm
);
921 void testq_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
)
923 m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
);
924 m_formatter
.immediate32(imm
);
928 void testw_rr(RegisterID src
, RegisterID dst
)
930 m_formatter
.prefix(PRE_OPERAND_SIZE
);
931 m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
);
934 void testb_i8r(int imm
, RegisterID dst
)
936 m_formatter
.oneByteOp8(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, dst
);
937 m_formatter
.immediate8(imm
);
940 void setCC_r(Condition cond
, RegisterID dst
)
942 m_formatter
.twoByteOp8(setccOpcode(cond
), (GroupOpcodeID
)0, dst
);
945 void sete_r(RegisterID dst
)
947 m_formatter
.twoByteOp8(setccOpcode(ConditionE
), (GroupOpcodeID
)0, dst
);
950 void setz_r(RegisterID dst
)
955 void setne_r(RegisterID dst
)
957 m_formatter
.twoByteOp8(setccOpcode(ConditionNE
), (GroupOpcodeID
)0, dst
);
960 void setnz_r(RegisterID dst
)
969 m_formatter
.oneByteOp(OP_CDQ
);
972 void xchgl_rr(RegisterID src
, RegisterID dst
)
974 m_formatter
.oneByteOp(OP_XCHG_EvGv
, src
, dst
);
978 void xchgq_rr(RegisterID src
, RegisterID dst
)
980 m_formatter
.oneByteOp64(OP_XCHG_EvGv
, src
, dst
);
984 void movl_rr(RegisterID src
, RegisterID dst
)
986 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, dst
);
989 void movl_rm(RegisterID src
, int offset
, RegisterID base
)
991 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, offset
);
994 void movl_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
996 m_formatter
.oneByteOp_disp32(OP_MOV_EvGv
, src
, base
, offset
);
999 void movl_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1001 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1004 void movl_mEAX(const void* addr
)
1006 m_formatter
.oneByteOp(OP_MOV_EAXOv
);
1008 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1010 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1014 void movl_mr(int offset
, RegisterID base
, RegisterID dst
)
1016 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, offset
);
1019 void movl_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1021 m_formatter
.oneByteOp_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1024 void movl_mr_disp8(int offset
, RegisterID base
, RegisterID dst
)
1026 m_formatter
.oneByteOp_disp8(OP_MOV_GvEv
, dst
, base
, offset
);
1029 void movl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1031 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1034 void movl_i32r(int imm
, RegisterID dst
)
1036 m_formatter
.oneByteOp(OP_MOV_EAXIv
, dst
);
1037 m_formatter
.immediate32(imm
);
1040 void movl_i32m(int imm
, int offset
, RegisterID base
)
1042 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1043 m_formatter
.immediate32(imm
);
1046 void movl_EAXm(const void* addr
)
1048 m_formatter
.oneByteOp(OP_MOV_OvEAX
);
1050 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1052 m_formatter
.immediate32(reinterpret_cast<int>(addr
));
1057 void movq_rr(RegisterID src
, RegisterID dst
)
1059 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, dst
);
1062 void movq_rm(RegisterID src
, int offset
, RegisterID base
)
1064 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, offset
);
1067 void movq_rm_disp32(RegisterID src
, int offset
, RegisterID base
)
1069 m_formatter
.oneByteOp64_disp32(OP_MOV_EvGv
, src
, base
, offset
);
1072 void movq_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
)
1074 m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
);
1077 void movq_mEAX(const void* addr
)
1079 m_formatter
.oneByteOp64(OP_MOV_EAXOv
);
1080 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1083 void movq_EAXm(const void* addr
)
1085 m_formatter
.oneByteOp64(OP_MOV_OvEAX
);
1086 m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
));
1089 void movq_mr(int offset
, RegisterID base
, RegisterID dst
)
1091 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, offset
);
1094 void movq_mr_disp32(int offset
, RegisterID base
, RegisterID dst
)
1096 m_formatter
.oneByteOp64_disp32(OP_MOV_GvEv
, dst
, base
, offset
);
1099 void movq_mr_disp8(int offset
, RegisterID base
, RegisterID dst
)
1101 m_formatter
.oneByteOp64_disp8(OP_MOV_GvEv
, dst
, base
, offset
);
1104 void movq_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1106 m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
);
1109 void movq_i32m(int imm
, int offset
, RegisterID base
)
1111 m_formatter
.oneByteOp64(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
);
1112 m_formatter
.immediate32(imm
);
1115 void movq_i64r(int64_t imm
, RegisterID dst
)
1117 m_formatter
.oneByteOp64(OP_MOV_EAXIv
, dst
);
1118 m_formatter
.immediate64(imm
);
1121 void movsxd_rr(RegisterID src
, RegisterID dst
)
1123 m_formatter
.oneByteOp64(OP_MOVSXD_GvEv
, dst
, src
);
1128 void movl_rm(RegisterID src
, const void* addr
)
1130 if (src
== X86Registers::eax
)
1133 m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, addr
);
1136 void movl_mr(const void* addr
, RegisterID dst
)
1138 if (dst
== X86Registers::eax
)
1141 m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, addr
);
1144 void movl_i32m(int imm
, const void* addr
)
1146 m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, addr
);
1147 m_formatter
.immediate32(imm
);
1151 void movzwl_mr(int offset
, RegisterID base
, RegisterID dst
)
1153 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, offset
);
1156 void movzwl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
)
1158 m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, index
, scale
, offset
);
1161 void movzbl_rr(RegisterID src
, RegisterID dst
)
1163 // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
1164 // is in the range ESP-EDI, and the src would not have required a REX). Unneeded
1165 // REX prefixes are defined to be silently ignored by the processor.
1166 m_formatter
.twoByteOp8(OP2_MOVZX_GvEb
, dst
, src
);
1169 void leal_mr(int offset
, RegisterID base
, RegisterID dst
)
1171 m_formatter
.oneByteOp(OP_LEA
, dst
, base
, offset
);
1174 void leaq_mr(int offset
, RegisterID base
, RegisterID dst
)
1176 m_formatter
.oneByteOp64(OP_LEA
, dst
, base
, offset
);
1182 AssemblerLabel
call()
1184 m_formatter
.oneByteOp(OP_CALL_rel32
);
1185 return m_formatter
.immediateRel32();
1188 AssemblerLabel
call(RegisterID dst
)
1190 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, dst
);
1191 return m_formatter
.label();
1194 void call_m(int offset
, RegisterID base
)
1196 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, base
, offset
);
1199 AssemblerLabel
jmp()
1201 m_formatter
.oneByteOp(OP_JMP_rel32
);
1202 return m_formatter
.immediateRel32();
1205 // Return a AssemblerLabel so we have a label to the jump, so we can use this
1206 // To make a tail recursive call on x86-64. The MacroAssembler
1207 // really shouldn't wrap this as a Jump, since it can't be linked. :-/
1208 AssemblerLabel
jmp_r(RegisterID dst
)
1210 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, dst
);
1211 return m_formatter
.label();
1214 void jmp_m(int offset
, RegisterID base
)
1216 m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, base
, offset
);
1219 AssemblerLabel
jne()
1221 m_formatter
.twoByteOp(jccRel32(ConditionNE
));
1222 return m_formatter
.immediateRel32();
1225 AssemblerLabel
jnz()
1232 m_formatter
.twoByteOp(jccRel32(ConditionE
));
1233 return m_formatter
.immediateRel32();
1243 m_formatter
.twoByteOp(jccRel32(ConditionL
));
1244 return m_formatter
.immediateRel32();
1249 m_formatter
.twoByteOp(jccRel32(ConditionB
));
1250 return m_formatter
.immediateRel32();
1253 AssemblerLabel
jle()
1255 m_formatter
.twoByteOp(jccRel32(ConditionLE
));
1256 return m_formatter
.immediateRel32();
1259 AssemblerLabel
jbe()
1261 m_formatter
.twoByteOp(jccRel32(ConditionBE
));
1262 return m_formatter
.immediateRel32();
1265 AssemblerLabel
jge()
1267 m_formatter
.twoByteOp(jccRel32(ConditionGE
));
1268 return m_formatter
.immediateRel32();
1273 m_formatter
.twoByteOp(jccRel32(ConditionG
));
1274 return m_formatter
.immediateRel32();
1279 m_formatter
.twoByteOp(jccRel32(ConditionA
));
1280 return m_formatter
.immediateRel32();
1283 AssemblerLabel
jae()
1285 m_formatter
.twoByteOp(jccRel32(ConditionAE
));
1286 return m_formatter
.immediateRel32();
1291 m_formatter
.twoByteOp(jccRel32(ConditionO
));
1292 return m_formatter
.immediateRel32();
1297 m_formatter
.twoByteOp(jccRel32(ConditionP
));
1298 return m_formatter
.immediateRel32();
1303 m_formatter
.twoByteOp(jccRel32(ConditionS
));
1304 return m_formatter
.immediateRel32();
1307 AssemblerLabel
jCC(Condition cond
)
1309 m_formatter
.twoByteOp(jccRel32(cond
));
1310 return m_formatter
.immediateRel32();
1315 void addsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1317 m_formatter
.prefix(PRE_SSE_F2
);
1318 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1321 void addsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1323 m_formatter
.prefix(PRE_SSE_F2
);
1324 m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1327 void cvtsi2sd_rr(RegisterID src
, XMMRegisterID dst
)
1329 m_formatter
.prefix(PRE_SSE_F2
);
1330 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, src
);
1333 void cvtsi2sd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1335 m_formatter
.prefix(PRE_SSE_F2
);
1336 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, base
, offset
);
1340 void cvtsi2sd_mr(const void* address
, XMMRegisterID dst
)
1342 m_formatter
.prefix(PRE_SSE_F2
);
1343 m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, address
);
1347 void cvttsd2si_rr(XMMRegisterID src
, RegisterID dst
)
1349 m_formatter
.prefix(PRE_SSE_F2
);
1350 m_formatter
.twoByteOp(OP2_CVTTSD2SI_GdWsd
, dst
, (RegisterID
)src
);
1353 void movd_rr(XMMRegisterID src
, RegisterID dst
)
1355 m_formatter
.prefix(PRE_SSE_66
);
1356 m_formatter
.twoByteOp(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1360 void movq_rr(XMMRegisterID src
, RegisterID dst
)
1362 m_formatter
.prefix(PRE_SSE_66
);
1363 m_formatter
.twoByteOp64(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
);
1366 void movq_rr(RegisterID src
, XMMRegisterID dst
)
1368 m_formatter
.prefix(PRE_SSE_66
);
1369 m_formatter
.twoByteOp64(OP2_MOVD_VdEd
, (RegisterID
)dst
, src
);
1373 void movsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1375 m_formatter
.prefix(PRE_SSE_F2
);
1376 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1379 void movsd_rm(XMMRegisterID src
, int offset
, RegisterID base
)
1381 m_formatter
.prefix(PRE_SSE_F2
);
1382 m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, offset
);
1385 void movsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1387 m_formatter
.prefix(PRE_SSE_F2
);
1388 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1392 void movsd_mr(const void* address
, XMMRegisterID dst
)
1394 m_formatter
.prefix(PRE_SSE_F2
);
1395 m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, address
);
1399 void mulsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1401 m_formatter
.prefix(PRE_SSE_F2
);
1402 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1405 void mulsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1407 m_formatter
.prefix(PRE_SSE_F2
);
1408 m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1411 void pextrw_irr(int whichWord
, XMMRegisterID src
, RegisterID dst
)
1413 m_formatter
.prefix(PRE_SSE_66
);
1414 m_formatter
.twoByteOp(OP2_PEXTRW_GdUdIb
, (RegisterID
)dst
, (RegisterID
)src
);
1415 m_formatter
.immediate8(whichWord
);
1418 void subsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1420 m_formatter
.prefix(PRE_SSE_F2
);
1421 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1424 void subsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1426 m_formatter
.prefix(PRE_SSE_F2
);
1427 m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1430 void ucomisd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1432 m_formatter
.prefix(PRE_SSE_66
);
1433 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1436 void ucomisd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1438 m_formatter
.prefix(PRE_SSE_66
);
1439 m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1442 void divsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1444 m_formatter
.prefix(PRE_SSE_F2
);
1445 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1448 void divsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
)
1450 m_formatter
.prefix(PRE_SSE_F2
);
1451 m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
);
1454 void xorpd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1456 m_formatter
.prefix(PRE_SSE_66
);
1457 m_formatter
.twoByteOp(OP2_XORPD_VpdWpd
, (RegisterID
)dst
, (RegisterID
)src
);
1460 void sqrtsd_rr(XMMRegisterID src
, XMMRegisterID dst
)
1462 m_formatter
.prefix(PRE_SSE_F2
);
1463 m_formatter
.twoByteOp(OP2_SQRTSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
);
1466 // Misc instructions:
1470 m_formatter
.oneByteOp(OP_INT3
);
1475 m_formatter
.oneByteOp(OP_RET
);
1478 void predictNotTaken()
1480 m_formatter
.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN
);
1483 // Assembler admin methods:
1485 size_t codeSize() const
1487 return m_formatter
.codeSize();
1490 AssemblerLabel
label()
1492 return m_formatter
.label();
1495 AssemblerLabel
align(int alignment
)
1497 while (!m_formatter
.isAligned(alignment
))
1498 m_formatter
.oneByteOp(OP_HLT
);
1503 // Linking & patching:
1505 // 'link' and 'patch' methods are for use on unprotected code - such as the code
1506 // within the AssemblerBuffer, and code being patched by the patch buffer. Once
1507 // code has been finalized it is (platform support permitting) within a non-
1508 // writable region of memory; to modify the code in an execute-only execuable
1509 // pool the 'repatch' and 'relink' methods should be used.
1511 void linkJump(AssemblerLabel from
, AssemblerLabel to
)
1513 ASSERT(from
.isSet());
1516 char* code
= reinterpret_cast<char*>(m_formatter
.data());
1517 ASSERT(!reinterpret_cast<int32_t*>(code
+ from
.m_offset
)[-1]);
1518 setRel32(code
+ from
.m_offset
, code
+ to
.m_offset
);
1521 static void linkJump(void* code
, AssemblerLabel from
, void* to
)
1523 ASSERT(from
.isSet());
1525 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
1528 static void linkCall(void* code
, AssemblerLabel from
, void* to
)
1530 ASSERT(from
.isSet());
1532 setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
);
1535 static void linkPointer(void* code
, AssemblerLabel where
, void* value
)
1537 ASSERT(where
.isSet());
1539 setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
);
1542 static void relinkJump(void* from
, void* to
)
1547 static void relinkCall(void* from
, void* to
)
1552 static void repatchCompact(void* where
, int32_t value
)
1555 ASSERT(value
<= std::numeric_limits
<int8_t>::max());
1556 setInt8(where
, value
);
1559 static void repatchInt32(void* where
, int32_t value
)
1561 setInt32(where
, value
);
1564 static void repatchPointer(void* where
, void* value
)
1566 setPointer(where
, value
);
1569 static void* readPointer(void* where
)
1571 return reinterpret_cast<void**>(where
)[-1];
1574 static unsigned getCallReturnOffset(AssemblerLabel call
)
1576 ASSERT(call
.isSet());
1577 return call
.m_offset
;
1580 static void* getRelocatedAddress(void* code
, AssemblerLabel label
)
1582 ASSERT(label
.isSet());
1583 return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
);
1586 static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
)
1588 return b
.m_offset
- a
.m_offset
;
1591 void* executableCopy(JSGlobalData
& globalData
, ExecutablePool
* allocator
)
1593 return m_formatter
.executableCopy(globalData
, allocator
);
1596 void rewindToLabel(AssemblerLabel rewindTo
) { m_formatter
.rewindToLabel(rewindTo
); }
1599 unsigned debugOffset() { return m_formatter
.debugOffset(); }
1604 m_formatter
.oneByteOp(OP_NOP
);
1609 static void setPointer(void* where
, void* value
)
1611 reinterpret_cast<void**>(where
)[-1] = value
;
1614 static void setInt32(void* where
, int32_t value
)
1616 reinterpret_cast<int32_t*>(where
)[-1] = value
;
1619 static void setInt8(void* where
, int8_t value
)
1621 reinterpret_cast<int8_t*>(where
)[-1] = value
;
1624 static void setRel32(void* from
, void* to
)
1626 intptr_t offset
= reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
);
1627 ASSERT(offset
== static_cast<int32_t>(offset
));
1629 setInt32(from
, offset
);
1632 class X86InstructionFormatter
{
1634 static const int maxInstructionSize
= 16;
1638 // Legacy prefix bytes:
1640 // These are emmitted prior to the instruction.
1642 void prefix(OneByteOpcodeID pre
)
1644 m_buffer
.putByte(pre
);
1647 // Word-sized operands / no operand instruction formatters.
1649 // In addition to the opcode, the following operand permutations are supported:
1650 // * None - instruction takes no operands.
1651 // * One register - the low three bits of the RegisterID are added into the opcode.
1652 // * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place).
1653 // * Three argument ModRM - a register, and a register and an offset describing a memory operand.
1654 // * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand.
1656 // For 32-bit x86 targets, the address operand may also be provided as a void*.
1657 // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used.
1659 // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F).
1661 void oneByteOp(OneByteOpcodeID opcode
)
1663 m_buffer
.ensureSpace(maxInstructionSize
);
1664 m_buffer
.putByteUnchecked(opcode
);
1667 void oneByteOp(OneByteOpcodeID opcode
, RegisterID reg
)
1669 m_buffer
.ensureSpace(maxInstructionSize
);
1670 emitRexIfNeeded(0, 0, reg
);
1671 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
1674 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
1676 m_buffer
.ensureSpace(maxInstructionSize
);
1677 emitRexIfNeeded(reg
, 0, rm
);
1678 m_buffer
.putByteUnchecked(opcode
);
1679 registerModRM(reg
, rm
);
1682 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1684 m_buffer
.ensureSpace(maxInstructionSize
);
1685 emitRexIfNeeded(reg
, 0, base
);
1686 m_buffer
.putByteUnchecked(opcode
);
1687 memoryModRM(reg
, base
, offset
);
1690 void oneByteOp_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1692 m_buffer
.ensureSpace(maxInstructionSize
);
1693 emitRexIfNeeded(reg
, 0, base
);
1694 m_buffer
.putByteUnchecked(opcode
);
1695 memoryModRM_disp32(reg
, base
, offset
);
1698 void oneByteOp_disp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1700 m_buffer
.ensureSpace(maxInstructionSize
);
1701 emitRexIfNeeded(reg
, 0, base
);
1702 m_buffer
.putByteUnchecked(opcode
);
1703 memoryModRM_disp8(reg
, base
, offset
);
1706 void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1708 m_buffer
.ensureSpace(maxInstructionSize
);
1709 emitRexIfNeeded(reg
, index
, base
);
1710 m_buffer
.putByteUnchecked(opcode
);
1711 memoryModRM(reg
, base
, index
, scale
, offset
);
1715 void oneByteOp(OneByteOpcodeID opcode
, int reg
, const void* address
)
1717 m_buffer
.ensureSpace(maxInstructionSize
);
1718 m_buffer
.putByteUnchecked(opcode
);
1719 memoryModRM(reg
, address
);
1723 void twoByteOp(TwoByteOpcodeID opcode
)
1725 m_buffer
.ensureSpace(maxInstructionSize
);
1726 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1727 m_buffer
.putByteUnchecked(opcode
);
1730 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
1732 m_buffer
.ensureSpace(maxInstructionSize
);
1733 emitRexIfNeeded(reg
, 0, rm
);
1734 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1735 m_buffer
.putByteUnchecked(opcode
);
1736 registerModRM(reg
, rm
);
1739 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1741 m_buffer
.ensureSpace(maxInstructionSize
);
1742 emitRexIfNeeded(reg
, 0, base
);
1743 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1744 m_buffer
.putByteUnchecked(opcode
);
1745 memoryModRM(reg
, base
, offset
);
1748 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1750 m_buffer
.ensureSpace(maxInstructionSize
);
1751 emitRexIfNeeded(reg
, index
, base
);
1752 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1753 m_buffer
.putByteUnchecked(opcode
);
1754 memoryModRM(reg
, base
, index
, scale
, offset
);
1758 void twoByteOp(TwoByteOpcodeID opcode
, int reg
, const void* address
)
1760 m_buffer
.ensureSpace(maxInstructionSize
);
1761 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1762 m_buffer
.putByteUnchecked(opcode
);
1763 memoryModRM(reg
, address
);
1768 // Quad-word-sized operands:
1770 // Used to format 64-bit operantions, planting a REX.w prefix.
1771 // When planting d64 or f64 instructions, not requiring a REX.w prefix,
1772 // the normal (non-'64'-postfixed) formatters should be used.
1774 void oneByteOp64(OneByteOpcodeID opcode
)
1776 m_buffer
.ensureSpace(maxInstructionSize
);
1778 m_buffer
.putByteUnchecked(opcode
);
1781 void oneByteOp64(OneByteOpcodeID opcode
, RegisterID reg
)
1783 m_buffer
.ensureSpace(maxInstructionSize
);
1784 emitRexW(0, 0, reg
);
1785 m_buffer
.putByteUnchecked(opcode
+ (reg
& 7));
1788 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID rm
)
1790 m_buffer
.ensureSpace(maxInstructionSize
);
1791 emitRexW(reg
, 0, rm
);
1792 m_buffer
.putByteUnchecked(opcode
);
1793 registerModRM(reg
, rm
);
1796 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1798 m_buffer
.ensureSpace(maxInstructionSize
);
1799 emitRexW(reg
, 0, base
);
1800 m_buffer
.putByteUnchecked(opcode
);
1801 memoryModRM(reg
, base
, offset
);
1804 void oneByteOp64_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1806 m_buffer
.ensureSpace(maxInstructionSize
);
1807 emitRexW(reg
, 0, base
);
1808 m_buffer
.putByteUnchecked(opcode
);
1809 memoryModRM_disp32(reg
, base
, offset
);
1812 void oneByteOp64_disp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
)
1814 m_buffer
.ensureSpace(maxInstructionSize
);
1815 emitRexW(reg
, 0, base
);
1816 m_buffer
.putByteUnchecked(opcode
);
1817 memoryModRM_disp8(reg
, base
, offset
);
1820 void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
1822 m_buffer
.ensureSpace(maxInstructionSize
);
1823 emitRexW(reg
, index
, base
);
1824 m_buffer
.putByteUnchecked(opcode
);
1825 memoryModRM(reg
, base
, index
, scale
, offset
);
1828 void twoByteOp64(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
)
1830 m_buffer
.ensureSpace(maxInstructionSize
);
1831 emitRexW(reg
, 0, rm
);
1832 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1833 m_buffer
.putByteUnchecked(opcode
);
1834 registerModRM(reg
, rm
);
1840 // These methods format byte operations. Byte operations differ from the normal
1841 // formatters in the circumstances under which they will decide to emit REX prefixes.
1842 // These should be used where any register operand signifies a byte register.
1844 // The disctinction is due to the handling of register numbers in the range 4..7 on
1845 // x86-64. These register numbers may either represent the second byte of the first
1846 // four registers (ah..bh) or the first byte of the second four registers (spl..dil).
1848 // Since ah..bh cannot be used in all permutations of operands (specifically cannot
1849 // be accessed where a REX prefix is present), these are likely best treated as
1850 // deprecated. In order to ensure the correct registers spl..dil are selected a
1851 // REX prefix will be emitted for any byte register operand in the range 4..15.
1853 // These formatters may be used in instructions where a mix of operand sizes, in which
1854 // case an unnecessary REX will be emitted, for example:
1856 // In this case a REX will be planted since edi is 7 (and were this a byte operand
1857 // a REX would be required to specify dil instead of bh). Unneeded REX prefixes will
1858 // be silently ignored by the processor.
1860 // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex()
1861 // is provided to check byte register operands.
1863 void oneByteOp8(OneByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
1865 m_buffer
.ensureSpace(maxInstructionSize
);
1866 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
1867 m_buffer
.putByteUnchecked(opcode
);
1868 registerModRM(groupOp
, rm
);
1871 void twoByteOp8(TwoByteOpcodeID opcode
, RegisterID reg
, RegisterID rm
)
1873 m_buffer
.ensureSpace(maxInstructionSize
);
1874 emitRexIf(byteRegRequiresRex(reg
)|byteRegRequiresRex(rm
), reg
, 0, rm
);
1875 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1876 m_buffer
.putByteUnchecked(opcode
);
1877 registerModRM(reg
, rm
);
1880 void twoByteOp8(TwoByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
)
1882 m_buffer
.ensureSpace(maxInstructionSize
);
1883 emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
);
1884 m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
);
1885 m_buffer
.putByteUnchecked(opcode
);
1886 registerModRM(groupOp
, rm
);
1891 // An immedaite should be appended where appropriate after an op has been emitted.
1892 // The writes are unchecked since the opcode formatters above will have ensured space.
1894 void immediate8(int imm
)
1896 m_buffer
.putByteUnchecked(imm
);
1899 void immediate16(int imm
)
1901 m_buffer
.putShortUnchecked(imm
);
1904 void immediate32(int imm
)
1906 m_buffer
.putIntUnchecked(imm
);
1909 void immediate64(int64_t imm
)
1911 m_buffer
.putInt64Unchecked(imm
);
1914 AssemblerLabel
immediateRel32()
1916 m_buffer
.putIntUnchecked(0);
1920 // Administrative methods:
1922 size_t codeSize() const { return m_buffer
.codeSize(); }
1923 AssemblerLabel
label() const { return m_buffer
.label(); }
1924 bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); }
1925 void* data() const { return m_buffer
.data(); }
1927 void* executableCopy(JSGlobalData
& globalData
, ExecutablePool
* allocator
)
1929 return m_buffer
.executableCopy(globalData
, allocator
);
1932 void rewindToLabel(AssemblerLabel rewindTo
) { m_buffer
.rewindToLabel(rewindTo
); }
1935 unsigned debugOffset() { return m_buffer
.debugOffset(); }
1940 // Internals; ModRm and REX formatters.
1942 static const RegisterID noBase
= X86Registers::ebp
;
1943 static const RegisterID hasSib
= X86Registers::esp
;
1944 static const RegisterID noIndex
= X86Registers::esp
;
1946 static const RegisterID noBase2
= X86Registers::r13
;
1947 static const RegisterID hasSib2
= X86Registers::r12
;
1949 // Registers r8 & above require a REX prefixe.
1950 inline bool regRequiresRex(int reg
)
1952 return (reg
>= X86Registers::r8
);
1955 // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
1956 inline bool byteRegRequiresRex(int reg
)
1958 return (reg
>= X86Registers::esp
);
1961 // Format a REX prefix byte.
1962 inline void emitRex(bool w
, int r
, int x
, int b
)
1964 m_buffer
.putByteUnchecked(PRE_REX
| ((int)w
<< 3) | ((r
>>3)<<2) | ((x
>>3)<<1) | (b
>>3));
1967 // Used to plant a REX byte with REX.w set (for 64-bit operations).
1968 inline void emitRexW(int r
, int x
, int b
)
1970 emitRex(true, r
, x
, b
);
1973 // Used for operations with byte operands - use byteRegRequiresRex() to check register operands,
1974 // regRequiresRex() to check other registers (i.e. address base & index).
1975 inline void emitRexIf(bool condition
, int r
, int x
, int b
)
1977 if (condition
) emitRex(false, r
, x
, b
);
1980 // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above).
1981 inline void emitRexIfNeeded(int r
, int x
, int b
)
1983 emitRexIf(regRequiresRex(r
) || regRequiresRex(x
) || regRequiresRex(b
), r
, x
, b
);
1986 // No REX prefix bytes on 32-bit x86.
1987 inline bool regRequiresRex(int) { return false; }
1988 inline bool byteRegRequiresRex(int) { return false; }
1989 inline void emitRexIf(bool, int, int, int) {}
1990 inline void emitRexIfNeeded(int, int, int) {}
2000 void putModRm(ModRmMode mode
, int reg
, RegisterID rm
)
2002 m_buffer
.putByteUnchecked((mode
<< 6) | ((reg
& 7) << 3) | (rm
& 7));
2005 void putModRmSib(ModRmMode mode
, int reg
, RegisterID base
, RegisterID index
, int scale
)
2007 ASSERT(mode
!= ModRmRegister
);
2009 putModRm(mode
, reg
, hasSib
);
2010 m_buffer
.putByteUnchecked((scale
<< 6) | ((index
& 7) << 3) | (base
& 7));
2013 void registerModRM(int reg
, RegisterID rm
)
2015 putModRm(ModRmRegister
, reg
, rm
);
2018 void memoryModRM(int reg
, RegisterID base
, int offset
)
2020 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2022 if ((base
== hasSib
) || (base
== hasSib2
)) {
2024 if (base
== hasSib
) {
2026 if (!offset
) // No need to check if the base is noBase, since we know it is hasSib!
2027 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, noIndex
, 0);
2028 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2029 putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0);
2030 m_buffer
.putByteUnchecked(offset
);
2032 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
2033 m_buffer
.putIntUnchecked(offset
);
2037 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
2039 if (!offset
&& (base
!= noBase
))
2041 putModRm(ModRmMemoryNoDisp
, reg
, base
);
2042 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2043 putModRm(ModRmMemoryDisp8
, reg
, base
);
2044 m_buffer
.putByteUnchecked(offset
);
2046 putModRm(ModRmMemoryDisp32
, reg
, base
);
2047 m_buffer
.putIntUnchecked(offset
);
2052 void memoryModRM_disp8(int reg
, RegisterID base
, int offset
)
2054 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2055 ASSERT(CAN_SIGN_EXTEND_8_32(offset
));
2057 if ((base
== hasSib
) || (base
== hasSib2
)) {
2059 if (base
== hasSib
) {
2061 putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0);
2062 m_buffer
.putByteUnchecked(offset
);
2064 putModRm(ModRmMemoryDisp8
, reg
, base
);
2065 m_buffer
.putByteUnchecked(offset
);
2069 void memoryModRM_disp32(int reg
, RegisterID base
, int offset
)
2071 // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
2073 if ((base
== hasSib
) || (base
== hasSib2
)) {
2075 if (base
== hasSib
) {
2077 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0);
2078 m_buffer
.putIntUnchecked(offset
);
2080 putModRm(ModRmMemoryDisp32
, reg
, base
);
2081 m_buffer
.putIntUnchecked(offset
);
2085 void memoryModRM(int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
)
2087 ASSERT(index
!= noIndex
);
2090 if (!offset
&& (base
!= noBase
) && (base
!= noBase2
))
2092 if (!offset
&& (base
!= noBase
))
2094 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, index
, scale
);
2095 else if (CAN_SIGN_EXTEND_8_32(offset
)) {
2096 putModRmSib(ModRmMemoryDisp8
, reg
, base
, index
, scale
);
2097 m_buffer
.putByteUnchecked(offset
);
2099 putModRmSib(ModRmMemoryDisp32
, reg
, base
, index
, scale
);
2100 m_buffer
.putIntUnchecked(offset
);
2105 void memoryModRM(int reg
, const void* address
)
2107 // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
2108 putModRm(ModRmMemoryNoDisp
, reg
, noBase
);
2109 m_buffer
.putIntUnchecked(reinterpret_cast<int32_t>(address
));
2113 AssemblerBuffer m_buffer
;
2119 #endif // ENABLE(ASSEMBLER) && CPU(X86)
2121 #endif // X86Assembler_h