2  * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved. 
   4  * Redistribution and use in source and binary forms, with or without 
   5  * modification, are permitted provided that the following conditions 
   7  * 1. Redistributions of source code must retain the above copyright 
   8  *    notice, this list of conditions and the following disclaimer. 
   9  * 2. Redistributions in binary form must reproduce the above copyright 
  10  *    notice, this list of conditions and the following disclaimer in the 
  11  *    documentation and/or other materials provided with the distribution. 
  13  * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY 
  14  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 
  15  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 
  16  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL APPLE INC. OR 
  17  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 
  18  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 
  19  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 
  20  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY 
  21  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 
  22  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 
  23  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.  
  26 #ifndef X86Assembler_h 
  27 #define X86Assembler_h 
  29 #if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64)) 
  31 #include "AssemblerBuffer.h" 
  32 #include "JITCompilationEffort.h" 
  35 #include <wtf/Assertions.h> 
  36 #include <wtf/Vector.h> 
  39 #include <xmmintrin.h> 
  44 inline bool CAN_SIGN_EXTEND_8_32(int32_t value
) { return value 
== (int32_t)(signed char)value
; } 
  46 namespace X86Registers 
{ 
  92     #define FOR_EACH_CPU_REGISTER(V) \ 
  93         FOR_EACH_CPU_GPREGISTER(V) \ 
  94         FOR_EACH_CPU_SPECIAL_REGISTER(V) \ 
  95         FOR_EACH_CPU_FPREGISTER(V) 
  97     #define FOR_EACH_CPU_GPREGISTER(V) \ 
 106         FOR_EACH_X86_64_CPU_GPREGISTER(V) 
 108     #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ 
 112     #define FOR_EACH_CPU_FPREGISTER(V) \ 
 123     #define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add. 
 125     #define FOR_EACH_X86_64_CPU_GPREGISTER(V) \ 
 134 #endif // CPU(X86_64) 
 135 #endif // USE(MASM_PROBE) 
 140     typedef X86Registers::RegisterID RegisterID
; 
 142     static RegisterID 
firstRegister() { return X86Registers::eax
; } 
 143     static RegisterID 
lastRegister() 
 146         return X86Registers::r15
; 
 148         return X86Registers::edi
; 
 152     typedef X86Registers::XMMRegisterID XMMRegisterID
; 
 153     typedef XMMRegisterID FPRegisterID
; 
 155     static FPRegisterID 
firstFPRegister() { return X86Registers::xmm0
; } 
 156     static FPRegisterID 
lastFPRegister() 
 159         return X86Registers::xmm15
; 
 161         return X86Registers::xmm7
; 
 183         ConditionC  
= ConditionB
, 
 184         ConditionNC 
= ConditionAE
, 
 195         OP_2BYTE_ESCAPE                 
= 0x0F, 
 201         PRE_PREDICT_BRANCH_NOT_TAKEN    
= 0x2E, 
 214         OP_MOVSXD_GvEv                  
= 0x63, 
 216         PRE_OPERAND_SIZE                
= 0x66, 
 219         OP_IMUL_GvEvIz                  
= 0x69, 
 220         OP_GROUP1_EbIb                  
= 0x80, 
 221         OP_GROUP1_EvIz                  
= 0x81, 
 222         OP_GROUP1_EvIb                  
= 0x83, 
 230         OP_GROUP1A_Ev                   
= 0x8F, 
 237         OP_TEST_EAXIv                   
= 0xA9, 
 239         OP_GROUP2_EvIb                  
= 0xC1, 
 241         OP_GROUP11_EvIb                 
= 0xC6, 
 242         OP_GROUP11_EvIz                 
= 0xC7, 
 244         OP_GROUP2_Ev1                   
= 0xD1, 
 245         OP_GROUP2_EvCL                  
= 0xD3, 
 247         OP_CALL_rel32                   
= 0xE8, 
 252         OP_GROUP3_EbIb                  
= 0xF6, 
 254         OP_GROUP3_EvIz                  
= 0xF7, // OP_GROUP3_Ev has an immediate, when instruction is a test.  
 259         OP2_MOVSD_VsdWsd    
= 0x10, 
 260         OP2_MOVSD_WsdVsd    
= 0x11, 
 261         OP2_MOVSS_VsdWsd    
= 0x10, 
 262         OP2_MOVSS_WsdVsd    
= 0x11, 
 263         OP2_CVTSI2SD_VsdEd  
= 0x2A, 
 264         OP2_CVTTSD2SI_GdWsd 
= 0x2C, 
 265         OP2_UCOMISD_VsdWsd  
= 0x2E, 
 266         OP2_ADDSD_VsdWsd    
= 0x58, 
 267         OP2_MULSD_VsdWsd    
= 0x59, 
 268         OP2_CVTSD2SS_VsdWsd 
= 0x5A, 
 269         OP2_CVTSS2SD_VsdWsd 
= 0x5A, 
 270         OP2_SUBSD_VsdWsd    
= 0x5C, 
 271         OP2_DIVSD_VsdWsd    
= 0x5E, 
 272         OP2_SQRTSD_VsdWsd   
= 0x51, 
 273         OP2_ANDNPD_VpdWpd   
= 0x55, 
 274         OP2_XORPD_VpdWpd    
= 0x57, 
 275         OP2_MOVD_VdEd       
= 0x6E, 
 276         OP2_MOVD_EdVd       
= 0x7E, 
 277         OP2_JCC_rel32       
= 0x80, 
 279         OP2_3BYTE_ESCAPE    
= 0xAE, 
 280         OP2_IMUL_GvEv       
= 0xAF, 
 281         OP2_MOVZX_GvEb      
= 0xB6, 
 282         OP2_MOVSX_GvEb      
= 0xBE, 
 283         OP2_MOVZX_GvEw      
= 0xB7, 
 284         OP2_MOVSX_GvEw      
= 0xBF, 
 285         OP2_PEXTRW_GdUdIb   
= 0xC5, 
 286         OP2_PSLLQ_UdqIb     
= 0x73, 
 287         OP2_PSRLQ_UdqIb     
= 0x73, 
 288         OP2_POR_VdqWdq      
= 0XEB, 
 295     TwoByteOpcodeID 
jccRel32(Condition cond
) 
 297         return (TwoByteOpcodeID
)(OP2_JCC_rel32 
+ cond
); 
 300     TwoByteOpcodeID 
setccOpcode(Condition cond
) 
 302         return (TwoByteOpcodeID
)(OP_SETCC 
+ cond
); 
 336         GROUP14_OP_PSLLQ 
= 6, 
 337         GROUP14_OP_PSRLQ 
= 2, 
 339         ESCAPE_DD_FSTP_doubleReal 
= 3, 
 342     class X86InstructionFormatter
; 
 346         : m_indexOfLastWatchpoint(INT_MIN
) 
 347         , m_indexOfTailOfLastWatchpoint(INT_MIN
) 
 351     AssemblerBuffer
& buffer() { return m_formatter
.m_buffer
; } 
 355     void push_r(RegisterID reg
) 
 357         m_formatter
.oneByteOp(OP_PUSH_EAX
, reg
); 
 360     void pop_r(RegisterID reg
) 
 362         m_formatter
.oneByteOp(OP_POP_EAX
, reg
); 
 365     void push_i32(int imm
) 
 367         m_formatter
.oneByteOp(OP_PUSH_Iz
); 
 368         m_formatter
.immediate32(imm
); 
 371     void push_m(int offset
, RegisterID base
) 
 373         m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_PUSH
, base
, offset
); 
 376     void pop_m(int offset
, RegisterID base
) 
 378         m_formatter
.oneByteOp(OP_GROUP1A_Ev
, GROUP1A_OP_POP
, base
, offset
); 
 381     // Arithmetic operations: 
 384     void adcl_im(int imm
, const void* addr
) 
 386         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 387             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADC
, addr
); 
 388             m_formatter
.immediate8(imm
); 
 390             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADC
, addr
); 
 391             m_formatter
.immediate32(imm
); 
 396     void addl_rr(RegisterID src
, RegisterID dst
) 
 398         m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, dst
); 
 401     void addl_mr(int offset
, RegisterID base
, RegisterID dst
) 
 403         m_formatter
.oneByteOp(OP_ADD_GvEv
, dst
, base
, offset
); 
 407     void addl_mr(const void* addr
, RegisterID dst
) 
 409         m_formatter
.oneByteOp(OP_ADD_GvEv
, dst
, addr
); 
 413     void addl_rm(RegisterID src
, int offset
, RegisterID base
) 
 415         m_formatter
.oneByteOp(OP_ADD_EvGv
, src
, base
, offset
); 
 418     void addl_ir(int imm
, RegisterID dst
) 
 420         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 421             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
); 
 422             m_formatter
.immediate8(imm
); 
 424             if (dst 
== X86Registers::eax
) 
 425                 m_formatter
.oneByteOp(OP_ADD_EAXIv
); 
 427                 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
); 
 428             m_formatter
.immediate32(imm
); 
 432     void addl_im(int imm
, int offset
, RegisterID base
) 
 434         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 435             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
); 
 436             m_formatter
.immediate8(imm
); 
 438             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
); 
 439             m_formatter
.immediate32(imm
); 
 444     void addq_rr(RegisterID src
, RegisterID dst
) 
 446         m_formatter
.oneByteOp64(OP_ADD_EvGv
, src
, dst
); 
 449     void addq_mr(int offset
, RegisterID base
, RegisterID dst
) 
 451         m_formatter
.oneByteOp64(OP_ADD_GvEv
, dst
, base
, offset
); 
 454     void addq_ir(int imm
, RegisterID dst
) 
 456         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 457             m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, dst
); 
 458             m_formatter
.immediate8(imm
); 
 460             if (dst 
== X86Registers::eax
) 
 461                 m_formatter
.oneByteOp64(OP_ADD_EAXIv
); 
 463                 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, dst
); 
 464             m_formatter
.immediate32(imm
); 
 468     void addq_im(int imm
, int offset
, RegisterID base
) 
 470         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 471             m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, base
, offset
); 
 472             m_formatter
.immediate8(imm
); 
 474             m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, base
, offset
); 
 475             m_formatter
.immediate32(imm
); 
 479     void addl_im(int imm
, const void* addr
) 
 481         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 482             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_ADD
, addr
); 
 483             m_formatter
.immediate8(imm
); 
 485             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_ADD
, addr
); 
 486             m_formatter
.immediate32(imm
); 
 491     void andl_rr(RegisterID src
, RegisterID dst
) 
 493         m_formatter
.oneByteOp(OP_AND_EvGv
, src
, dst
); 
 496     void andl_mr(int offset
, RegisterID base
, RegisterID dst
) 
 498         m_formatter
.oneByteOp(OP_AND_GvEv
, dst
, base
, offset
); 
 501     void andl_rm(RegisterID src
, int offset
, RegisterID base
) 
 503         m_formatter
.oneByteOp(OP_AND_EvGv
, src
, base
, offset
); 
 506     void andl_ir(int imm
, RegisterID dst
) 
 508         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 509             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
); 
 510             m_formatter
.immediate8(imm
); 
 512             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
); 
 513             m_formatter
.immediate32(imm
); 
 517     void andl_im(int imm
, int offset
, RegisterID base
) 
 519         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 520             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, base
, offset
); 
 521             m_formatter
.immediate8(imm
); 
 523             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, base
, offset
); 
 524             m_formatter
.immediate32(imm
); 
 529     void andq_rr(RegisterID src
, RegisterID dst
) 
 531         m_formatter
.oneByteOp64(OP_AND_EvGv
, src
, dst
); 
 534     void andq_ir(int imm
, RegisterID dst
) 
 536         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 537             m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_AND
, dst
); 
 538             m_formatter
.immediate8(imm
); 
 540             m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_AND
, dst
); 
 541             m_formatter
.immediate32(imm
); 
 545     void andl_im(int imm
, const void* addr
) 
 547         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 548             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_AND
, addr
); 
 549             m_formatter
.immediate8(imm
); 
 551             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_AND
, addr
); 
 552             m_formatter
.immediate32(imm
); 
 557     void dec_r(RegisterID dst
) 
 559         m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP1_OP_OR
, dst
); 
 563     void decq_r(RegisterID dst
) 
 565         m_formatter
.oneByteOp64(OP_GROUP5_Ev
, GROUP1_OP_OR
, dst
); 
 567 #endif // CPU(X86_64) 
 569     void inc_r(RegisterID dst
) 
 571         m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP1_OP_ADD
, dst
); 
 575     void incq_r(RegisterID dst
) 
 577         m_formatter
.oneByteOp64(OP_GROUP5_Ev
, GROUP1_OP_ADD
, dst
); 
 579 #endif // CPU(X86_64) 
 581     void negl_r(RegisterID dst
) 
 583         m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, dst
); 
 587     void negq_r(RegisterID dst
) 
 589         m_formatter
.oneByteOp64(OP_GROUP3_Ev
, GROUP3_OP_NEG
, dst
); 
 593     void negl_m(int offset
, RegisterID base
) 
 595         m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NEG
, base
, offset
); 
 598     void notl_r(RegisterID dst
) 
 600         m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, dst
); 
 603     void notl_m(int offset
, RegisterID base
) 
 605         m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_NOT
, base
, offset
); 
 608     void orl_rr(RegisterID src
, RegisterID dst
) 
 610         m_formatter
.oneByteOp(OP_OR_EvGv
, src
, dst
); 
 613     void orl_mr(int offset
, RegisterID base
, RegisterID dst
) 
 615         m_formatter
.oneByteOp(OP_OR_GvEv
, dst
, base
, offset
); 
 618     void orl_rm(RegisterID src
, int offset
, RegisterID base
) 
 620         m_formatter
.oneByteOp(OP_OR_EvGv
, src
, base
, offset
); 
 623     void orl_ir(int imm
, RegisterID dst
) 
 625         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 626             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
); 
 627             m_formatter
.immediate8(imm
); 
 629             if (dst 
== X86Registers::eax
) 
 630                 m_formatter
.oneByteOp(OP_OR_EAXIv
); 
 632                 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
); 
 633             m_formatter
.immediate32(imm
); 
 637     void orl_im(int imm
, int offset
, RegisterID base
) 
 639         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 640             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, base
, offset
); 
 641             m_formatter
.immediate8(imm
); 
 643             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, base
, offset
); 
 644             m_formatter
.immediate32(imm
); 
 649     void orq_rr(RegisterID src
, RegisterID dst
) 
 651         m_formatter
.oneByteOp64(OP_OR_EvGv
, src
, dst
); 
 654     void orq_ir(int imm
, RegisterID dst
) 
 656         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 657             m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_OR
, dst
); 
 658             m_formatter
.immediate8(imm
); 
 660             if (dst 
== X86Registers::eax
) 
 661                 m_formatter
.oneByteOp64(OP_OR_EAXIv
); 
 663                 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_OR
, dst
); 
 664             m_formatter
.immediate32(imm
); 
 668     void orl_im(int imm
, const void* addr
) 
 670         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 671             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_OR
, addr
); 
 672             m_formatter
.immediate8(imm
); 
 674             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_OR
, addr
); 
 675             m_formatter
.immediate32(imm
); 
 679     void orl_rm(RegisterID src
, const void* addr
) 
 681         m_formatter
.oneByteOp(OP_OR_EvGv
, src
, addr
); 
 685     void subl_rr(RegisterID src
, RegisterID dst
) 
 687         m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, dst
); 
 690     void subl_mr(int offset
, RegisterID base
, RegisterID dst
) 
 692         m_formatter
.oneByteOp(OP_SUB_GvEv
, dst
, base
, offset
); 
 695     void subl_rm(RegisterID src
, int offset
, RegisterID base
) 
 697         m_formatter
.oneByteOp(OP_SUB_EvGv
, src
, base
, offset
); 
 700     void subl_ir(int imm
, RegisterID dst
) 
 702         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 703             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
); 
 704             m_formatter
.immediate8(imm
); 
 706             if (dst 
== X86Registers::eax
) 
 707                 m_formatter
.oneByteOp(OP_SUB_EAXIv
); 
 709                 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
); 
 710             m_formatter
.immediate32(imm
); 
 714     void subl_im(int imm
, int offset
, RegisterID base
) 
 716         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 717             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, base
, offset
); 
 718             m_formatter
.immediate8(imm
); 
 720             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, base
, offset
); 
 721             m_formatter
.immediate32(imm
); 
 726     void subq_rr(RegisterID src
, RegisterID dst
) 
 728         m_formatter
.oneByteOp64(OP_SUB_EvGv
, src
, dst
); 
 731     void subq_ir(int imm
, RegisterID dst
) 
 733         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 734             m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, dst
); 
 735             m_formatter
.immediate8(imm
); 
 737             if (dst 
== X86Registers::eax
) 
 738                 m_formatter
.oneByteOp64(OP_SUB_EAXIv
); 
 740                 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, dst
); 
 741             m_formatter
.immediate32(imm
); 
 745     void subl_im(int imm
, const void* addr
) 
 747         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 748             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_SUB
, addr
); 
 749             m_formatter
.immediate8(imm
); 
 751             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_SUB
, addr
); 
 752             m_formatter
.immediate32(imm
); 
 757     void xorl_rr(RegisterID src
, RegisterID dst
) 
 759         m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, dst
); 
 762     void xorl_mr(int offset
, RegisterID base
, RegisterID dst
) 
 764         m_formatter
.oneByteOp(OP_XOR_GvEv
, dst
, base
, offset
); 
 767     void xorl_rm(RegisterID src
, int offset
, RegisterID base
) 
 769         m_formatter
.oneByteOp(OP_XOR_EvGv
, src
, base
, offset
); 
 772     void xorl_im(int imm
, int offset
, RegisterID base
) 
 774         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 775             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, base
, offset
); 
 776             m_formatter
.immediate8(imm
); 
 778             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, base
, offset
); 
 779             m_formatter
.immediate32(imm
); 
 783     void xorl_ir(int imm
, RegisterID dst
) 
 785         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 786             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
); 
 787             m_formatter
.immediate8(imm
); 
 789             if (dst 
== X86Registers::eax
) 
 790                 m_formatter
.oneByteOp(OP_XOR_EAXIv
); 
 792                 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
); 
 793             m_formatter
.immediate32(imm
); 
 798     void xorq_rr(RegisterID src
, RegisterID dst
) 
 800         m_formatter
.oneByteOp64(OP_XOR_EvGv
, src
, dst
); 
 803     void xorq_ir(int imm
, RegisterID dst
) 
 805         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 806             m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_XOR
, dst
); 
 807             m_formatter
.immediate8(imm
); 
 809             if (dst 
== X86Registers::eax
) 
 810                 m_formatter
.oneByteOp64(OP_XOR_EAXIv
); 
 812                 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_XOR
, dst
); 
 813             m_formatter
.immediate32(imm
); 
 817     void xorq_rm(RegisterID src
, int offset
, RegisterID base
) 
 819         m_formatter
.oneByteOp64(OP_XOR_EvGv
, src
, base
, offset
); 
 822     void rorq_i8r(int imm
, RegisterID dst
) 
 825             m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_ROR
, dst
); 
 827             m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_ROR
, dst
); 
 828             m_formatter
.immediate8(imm
); 
 834     void sarl_i8r(int imm
, RegisterID dst
) 
 837             m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
); 
 839             m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
); 
 840             m_formatter
.immediate8(imm
); 
 844     void sarl_CLr(RegisterID dst
) 
 846         m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
); 
 849     void shrl_i8r(int imm
, RegisterID dst
) 
 852             m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHR
, dst
); 
 854             m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHR
, dst
); 
 855             m_formatter
.immediate8(imm
); 
 859     void shrl_CLr(RegisterID dst
) 
 861         m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHR
, dst
); 
 864     void shll_i8r(int imm
, RegisterID dst
) 
 867             m_formatter
.oneByteOp(OP_GROUP2_Ev1
, GROUP2_OP_SHL
, dst
); 
 869             m_formatter
.oneByteOp(OP_GROUP2_EvIb
, GROUP2_OP_SHL
, dst
); 
 870             m_formatter
.immediate8(imm
); 
 874     void shll_CLr(RegisterID dst
) 
 876         m_formatter
.oneByteOp(OP_GROUP2_EvCL
, GROUP2_OP_SHL
, dst
); 
 880     void sarq_CLr(RegisterID dst
) 
 882         m_formatter
.oneByteOp64(OP_GROUP2_EvCL
, GROUP2_OP_SAR
, dst
); 
 885     void sarq_i8r(int imm
, RegisterID dst
) 
 888             m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_SAR
, dst
); 
 890             m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_SAR
, dst
); 
 891             m_formatter
.immediate8(imm
); 
 895     void shlq_i8r(int imm
, RegisterID dst
) 
 898             m_formatter
.oneByteOp64(OP_GROUP2_Ev1
, GROUP2_OP_SHL
, dst
); 
 900             m_formatter
.oneByteOp64(OP_GROUP2_EvIb
, GROUP2_OP_SHL
, dst
); 
 901             m_formatter
.immediate8(imm
); 
 904 #endif // CPU(X86_64) 
 906     void imull_rr(RegisterID src
, RegisterID dst
) 
 908         m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, src
); 
 912     void imulq_rr(RegisterID src
, RegisterID dst
) 
 914         m_formatter
.twoByteOp64(OP2_IMUL_GvEv
, dst
, src
); 
 916 #endif // CPU(X86_64) 
 918     void imull_mr(int offset
, RegisterID base
, RegisterID dst
) 
 920         m_formatter
.twoByteOp(OP2_IMUL_GvEv
, dst
, base
, offset
); 
 923     void imull_i32r(RegisterID src
, int32_t value
, RegisterID dst
) 
 925         m_formatter
.oneByteOp(OP_IMUL_GvEvIz
, dst
, src
); 
 926         m_formatter
.immediate32(value
); 
 929     void idivl_r(RegisterID dst
) 
 931         m_formatter
.oneByteOp(OP_GROUP3_Ev
, GROUP3_OP_IDIV
, dst
); 
 936     void cmpl_rr(RegisterID src
, RegisterID dst
) 
 938         m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, dst
); 
 941     void cmpl_rm(RegisterID src
, int offset
, RegisterID base
) 
 943         m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, offset
); 
 946     void cmpl_mr(int offset
, RegisterID base
, RegisterID src
) 
 948         m_formatter
.oneByteOp(OP_CMP_GvEv
, src
, base
, offset
); 
 951     void cmpl_ir(int imm
, RegisterID dst
) 
 953         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 954             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
); 
 955             m_formatter
.immediate8(imm
); 
 957             if (dst 
== X86Registers::eax
) 
 958                 m_formatter
.oneByteOp(OP_CMP_EAXIv
); 
 960                 m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
); 
 961             m_formatter
.immediate32(imm
); 
 965     void cmpl_ir_force32(int imm
, RegisterID dst
) 
 967         m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
); 
 968         m_formatter
.immediate32(imm
); 
 971     void cmpl_im(int imm
, int offset
, RegisterID base
) 
 973         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
 974             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
); 
 975             m_formatter
.immediate8(imm
); 
 977             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
); 
 978             m_formatter
.immediate32(imm
); 
 982     void cmpb_im(int imm
, int offset
, RegisterID base
) 
 984         m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, base
, offset
); 
 985         m_formatter
.immediate8(imm
); 
 988     void cmpb_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
 990         m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
); 
 991         m_formatter
.immediate8(imm
); 
 995     void cmpb_im(int imm
, const void* addr
) 
 997         m_formatter
.oneByteOp(OP_GROUP1_EbIb
, GROUP1_OP_CMP
, addr
); 
 998         m_formatter
.immediate8(imm
); 
1002     void cmpl_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1004         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
1005             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
); 
1006             m_formatter
.immediate8(imm
); 
1008             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
); 
1009             m_formatter
.immediate32(imm
); 
1013     void cmpl_im_force32(int imm
, int offset
, RegisterID base
) 
1015         m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
); 
1016         m_formatter
.immediate32(imm
); 
1020     void cmpq_rr(RegisterID src
, RegisterID dst
) 
1022         m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, dst
); 
1025     void cmpq_rm(RegisterID src
, int offset
, RegisterID base
) 
1027         m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, base
, offset
); 
1030     void cmpq_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1032         m_formatter
.oneByteOp64(OP_CMP_EvGv
, src
, base
, index
, scale
, offset
); 
1035     void cmpq_mr(int offset
, RegisterID base
, RegisterID src
) 
1037         m_formatter
.oneByteOp64(OP_CMP_GvEv
, src
, base
, offset
); 
1040     void cmpq_ir(int imm
, RegisterID dst
) 
1042         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
1043             m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
); 
1044             m_formatter
.immediate8(imm
); 
1046             if (dst 
== X86Registers::eax
) 
1047                 m_formatter
.oneByteOp64(OP_CMP_EAXIv
); 
1049                 m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
); 
1050             m_formatter
.immediate32(imm
); 
1054     void cmpq_im(int imm
, int offset
, RegisterID base
) 
1056         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
1057             m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, offset
); 
1058             m_formatter
.immediate8(imm
); 
1060             m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, offset
); 
1061             m_formatter
.immediate32(imm
); 
1065     void cmpq_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1067         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
1068             m_formatter
.oneByteOp64(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
); 
1069             m_formatter
.immediate8(imm
); 
1071             m_formatter
.oneByteOp64(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
); 
1072             m_formatter
.immediate32(imm
); 
1076     void cmpl_rm(RegisterID reg
, const void* addr
) 
1078         m_formatter
.oneByteOp(OP_CMP_EvGv
, reg
, addr
); 
1081     void cmpl_im(int imm
, const void* addr
) 
1083         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
1084             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, addr
); 
1085             m_formatter
.immediate8(imm
); 
1087             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, addr
); 
1088             m_formatter
.immediate32(imm
); 
1093     void cmpw_ir(int imm
, RegisterID dst
) 
1095         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
1096             m_formatter
.prefix(PRE_OPERAND_SIZE
); 
1097             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, dst
); 
1098             m_formatter
.immediate8(imm
); 
1100             m_formatter
.prefix(PRE_OPERAND_SIZE
); 
1101             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, dst
); 
1102             m_formatter
.immediate16(imm
); 
1106     void cmpw_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1108         m_formatter
.prefix(PRE_OPERAND_SIZE
); 
1109         m_formatter
.oneByteOp(OP_CMP_EvGv
, src
, base
, index
, scale
, offset
); 
1112     void cmpw_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1114         if (CAN_SIGN_EXTEND_8_32(imm
)) { 
1115             m_formatter
.prefix(PRE_OPERAND_SIZE
); 
1116             m_formatter
.oneByteOp(OP_GROUP1_EvIb
, GROUP1_OP_CMP
, base
, index
, scale
, offset
); 
1117             m_formatter
.immediate8(imm
); 
1119             m_formatter
.prefix(PRE_OPERAND_SIZE
); 
1120             m_formatter
.oneByteOp(OP_GROUP1_EvIz
, GROUP1_OP_CMP
, base
, index
, scale
, offset
); 
1121             m_formatter
.immediate16(imm
); 
1125     void testl_rr(RegisterID src
, RegisterID dst
) 
1127         m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
); 
1130     void testl_i32r(int imm
, RegisterID dst
) 
1132         if (dst 
== X86Registers::eax
) 
1133             m_formatter
.oneByteOp(OP_TEST_EAXIv
); 
1135             m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
); 
1136         m_formatter
.immediate32(imm
); 
1139     void testl_i32m(int imm
, int offset
, RegisterID base
) 
1141         m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
); 
1142         m_formatter
.immediate32(imm
); 
1145     void testb_rr(RegisterID src
, RegisterID dst
) 
1147         m_formatter
.oneByteOp8(OP_TEST_EbGb
, src
, dst
); 
1150     void testb_im(int imm
, int offset
, RegisterID base
) 
1152         m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, base
, offset
); 
1153         m_formatter
.immediate8(imm
); 
1156     void testb_im(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1158         m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, base
, index
, scale
, offset
); 
1159         m_formatter
.immediate8(imm
); 
1163     void testb_im(int imm
, const void* addr
) 
1165         m_formatter
.oneByteOp(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, addr
); 
1166         m_formatter
.immediate8(imm
); 
1170     void testl_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1172         m_formatter
.oneByteOp(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
); 
1173         m_formatter
.immediate32(imm
); 
1177     void testq_rr(RegisterID src
, RegisterID dst
) 
1179         m_formatter
.oneByteOp64(OP_TEST_EvGv
, src
, dst
); 
1182     void testq_rm(RegisterID src
, int offset
, RegisterID base
) 
1184         m_formatter
.oneByteOp64(OP_TEST_EvGv
, src
, base
, offset
); 
1187     void testq_i32r(int imm
, RegisterID dst
) 
1189         if (dst 
== X86Registers::eax
) 
1190             m_formatter
.oneByteOp64(OP_TEST_EAXIv
); 
1192             m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, dst
); 
1193         m_formatter
.immediate32(imm
); 
1196     void testq_i32m(int imm
, int offset
, RegisterID base
) 
1198         m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, offset
); 
1199         m_formatter
.immediate32(imm
); 
1202     void testq_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1204         m_formatter
.oneByteOp64(OP_GROUP3_EvIz
, GROUP3_OP_TEST
, base
, index
, scale
, offset
); 
1205         m_formatter
.immediate32(imm
); 
1209     void testw_rr(RegisterID src
, RegisterID dst
) 
1211         m_formatter
.prefix(PRE_OPERAND_SIZE
); 
1212         m_formatter
.oneByteOp(OP_TEST_EvGv
, src
, dst
); 
1215     void testb_i8r(int imm
, RegisterID dst
) 
1217         if (dst 
== X86Registers::eax
) 
1218             m_formatter
.oneByteOp(OP_TEST_ALIb
); 
1220             m_formatter
.oneByteOp8(OP_GROUP3_EbIb
, GROUP3_OP_TEST
, dst
); 
1221         m_formatter
.immediate8(imm
); 
1224     void setCC_r(Condition cond
, RegisterID dst
) 
1226         m_formatter
.twoByteOp8(setccOpcode(cond
), (GroupOpcodeID
)0, dst
); 
1229     void sete_r(RegisterID dst
) 
1231         m_formatter
.twoByteOp8(setccOpcode(ConditionE
), (GroupOpcodeID
)0, dst
); 
1234     void setz_r(RegisterID dst
) 
1239     void setne_r(RegisterID dst
) 
1241         m_formatter
.twoByteOp8(setccOpcode(ConditionNE
), (GroupOpcodeID
)0, dst
); 
1244     void setnz_r(RegisterID dst
) 
1249     // Various move ops: 
1253         m_formatter
.oneByteOp(OP_CDQ
); 
1256     void fstpl(int offset
, RegisterID base
) 
1258         m_formatter
.oneByteOp(OP_ESCAPE_DD
, ESCAPE_DD_FSTP_doubleReal
, base
, offset
); 
1261     void xchgl_rr(RegisterID src
, RegisterID dst
) 
1263         if (src 
== X86Registers::eax
) 
1264             m_formatter
.oneByteOp(OP_XCHG_EAX
, dst
); 
1265         else if (dst 
== X86Registers::eax
) 
1266             m_formatter
.oneByteOp(OP_XCHG_EAX
, src
); 
1268             m_formatter
.oneByteOp(OP_XCHG_EvGv
, src
, dst
); 
1272     void xchgq_rr(RegisterID src
, RegisterID dst
) 
1274         if (src 
== X86Registers::eax
) 
1275             m_formatter
.oneByteOp64(OP_XCHG_EAX
, dst
); 
1276         else if (dst 
== X86Registers::eax
) 
1277             m_formatter
.oneByteOp64(OP_XCHG_EAX
, src
); 
1279             m_formatter
.oneByteOp64(OP_XCHG_EvGv
, src
, dst
); 
1283     void movl_rr(RegisterID src
, RegisterID dst
) 
1285         m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, dst
); 
1288     void movl_rm(RegisterID src
, int offset
, RegisterID base
) 
1290         m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, offset
); 
1293     void movl_rm_disp32(RegisterID src
, int offset
, RegisterID base
) 
1295         m_formatter
.oneByteOp_disp32(OP_MOV_EvGv
, src
, base
, offset
); 
1298     void movl_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1300         m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
); 
1303     void movl_mEAX(const void* addr
) 
1305         m_formatter
.oneByteOp(OP_MOV_EAXOv
); 
1307         m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
)); 
1309         m_formatter
.immediate32(reinterpret_cast<int>(addr
)); 
1313     void movl_mr(int offset
, RegisterID base
, RegisterID dst
) 
1315         m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, offset
); 
1318     void movl_mr_disp32(int offset
, RegisterID base
, RegisterID dst
) 
1320         m_formatter
.oneByteOp_disp32(OP_MOV_GvEv
, dst
, base
, offset
); 
1323     void movl_mr_disp8(int offset
, RegisterID base
, RegisterID dst
) 
1325         m_formatter
.oneByteOp_disp8(OP_MOV_GvEv
, dst
, base
, offset
); 
1328     void movl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
) 
1330         m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
); 
1333     void movl_i32r(int imm
, RegisterID dst
) 
1335         m_formatter
.oneByteOp(OP_MOV_EAXIv
, dst
); 
1336         m_formatter
.immediate32(imm
); 
1339     void movl_i32m(int imm
, int offset
, RegisterID base
) 
1341         m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
); 
1342         m_formatter
.immediate32(imm
); 
1345     void movl_i32m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1347         m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, index
, scale
, offset
); 
1348         m_formatter
.immediate32(imm
); 
1352     void movb_i8m(int imm
, const void* addr
) 
1354         ASSERT(-128 <= imm 
&& imm 
< 128); 
1355         m_formatter
.oneByteOp(OP_GROUP11_EvIb
, GROUP11_MOV
, addr
); 
1356         m_formatter
.immediate8(imm
); 
1360     void movb_i8m(int imm
, int offset
, RegisterID base
) 
1362         ASSERT(-128 <= imm 
&& imm 
< 128); 
1363         m_formatter
.oneByteOp(OP_GROUP11_EvIb
, GROUP11_MOV
, base
, offset
); 
1364         m_formatter
.immediate8(imm
); 
1367     void movb_i8m(int imm
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1369         ASSERT(-128 <= imm 
&& imm 
< 128); 
1370         m_formatter
.oneByteOp(OP_GROUP11_EvIb
, GROUP11_MOV
, base
, index
, scale
, offset
); 
1371         m_formatter
.immediate8(imm
); 
1375     void movb_rm(RegisterID src
, const void* addr
) 
1377         m_formatter
.oneByteOp(OP_MOV_EbGb
, src
, addr
); 
1381     void movb_rm(RegisterID src
, int offset
, RegisterID base
) 
1383         m_formatter
.oneByteOp8(OP_MOV_EbGb
, src
, base
, offset
); 
1386     void movb_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1388         m_formatter
.oneByteOp8(OP_MOV_EbGb
, src
, base
, index
, scale
, offset
); 
1391     void movw_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1393         m_formatter
.prefix(PRE_OPERAND_SIZE
); 
1394         m_formatter
.oneByteOp8(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
); 
1397     void movl_EAXm(const void* addr
) 
1399         m_formatter
.oneByteOp(OP_MOV_OvEAX
); 
1401         m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
)); 
1403         m_formatter
.immediate32(reinterpret_cast<int>(addr
)); 
1408     void movq_rr(RegisterID src
, RegisterID dst
) 
1410         m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, dst
); 
1413     void movq_rm(RegisterID src
, int offset
, RegisterID base
) 
1415         m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, offset
); 
1418     void movq_rm_disp32(RegisterID src
, int offset
, RegisterID base
) 
1420         m_formatter
.oneByteOp64_disp32(OP_MOV_EvGv
, src
, base
, offset
); 
1423     void movq_rm(RegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1425         m_formatter
.oneByteOp64(OP_MOV_EvGv
, src
, base
, index
, scale
, offset
); 
1428     void movq_mEAX(const void* addr
) 
1430         m_formatter
.oneByteOp64(OP_MOV_EAXOv
); 
1431         m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
)); 
1434     void movq_EAXm(const void* addr
) 
1436         m_formatter
.oneByteOp64(OP_MOV_OvEAX
); 
1437         m_formatter
.immediate64(reinterpret_cast<int64_t>(addr
)); 
1440     void movq_mr(int offset
, RegisterID base
, RegisterID dst
) 
1442         m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, offset
); 
1445     void movq_mr_disp32(int offset
, RegisterID base
, RegisterID dst
) 
1447         m_formatter
.oneByteOp64_disp32(OP_MOV_GvEv
, dst
, base
, offset
); 
1450     void movq_mr_disp8(int offset
, RegisterID base
, RegisterID dst
) 
1452         m_formatter
.oneByteOp64_disp8(OP_MOV_GvEv
, dst
, base
, offset
); 
1455     void movq_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
) 
1457         m_formatter
.oneByteOp64(OP_MOV_GvEv
, dst
, base
, index
, scale
, offset
); 
1460     void movq_i32m(int imm
, int offset
, RegisterID base
) 
1462         m_formatter
.oneByteOp64(OP_GROUP11_EvIz
, GROUP11_MOV
, base
, offset
); 
1463         m_formatter
.immediate32(imm
); 
1466     void movq_i64r(int64_t imm
, RegisterID dst
) 
1468         m_formatter
.oneByteOp64(OP_MOV_EAXIv
, dst
); 
1469         m_formatter
.immediate64(imm
); 
1472     void movsxd_rr(RegisterID src
, RegisterID dst
) 
1474         m_formatter
.oneByteOp64(OP_MOVSXD_GvEv
, dst
, src
); 
1479     void movl_rm(RegisterID src
, const void* addr
) 
1481         if (src 
== X86Registers::eax
) 
1484             m_formatter
.oneByteOp(OP_MOV_EvGv
, src
, addr
); 
1487     void movl_mr(const void* addr
, RegisterID dst
) 
1489         if (dst 
== X86Registers::eax
) 
1492             m_formatter
.oneByteOp(OP_MOV_GvEv
, dst
, addr
); 
1495     void movl_i32m(int imm
, const void* addr
) 
1497         m_formatter
.oneByteOp(OP_GROUP11_EvIz
, GROUP11_MOV
, addr
); 
1498         m_formatter
.immediate32(imm
); 
1502     void movzwl_mr(int offset
, RegisterID base
, RegisterID dst
) 
1504         m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, offset
); 
1507     void movzwl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
) 
1509         m_formatter
.twoByteOp(OP2_MOVZX_GvEw
, dst
, base
, index
, scale
, offset
); 
1512     void movswl_mr(int offset
, RegisterID base
, RegisterID dst
) 
1514         m_formatter
.twoByteOp(OP2_MOVSX_GvEw
, dst
, base
, offset
); 
1517     void movswl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
) 
1519         m_formatter
.twoByteOp(OP2_MOVSX_GvEw
, dst
, base
, index
, scale
, offset
); 
1522     void movzbl_mr(int offset
, RegisterID base
, RegisterID dst
) 
1524         m_formatter
.twoByteOp(OP2_MOVZX_GvEb
, dst
, base
, offset
); 
1527     void movzbl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
) 
1529         m_formatter
.twoByteOp(OP2_MOVZX_GvEb
, dst
, base
, index
, scale
, offset
); 
1533     void movzbl_mr(const void* address
, RegisterID dst
) 
1535         m_formatter
.twoByteOp(OP2_MOVZX_GvEb
, dst
, address
); 
1539     void movsbl_mr(int offset
, RegisterID base
, RegisterID dst
) 
1541         m_formatter
.twoByteOp(OP2_MOVSX_GvEb
, dst
, base
, offset
); 
1544     void movsbl_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, RegisterID dst
) 
1546         m_formatter
.twoByteOp(OP2_MOVSX_GvEb
, dst
, base
, index
, scale
, offset
); 
1549     void movzbl_rr(RegisterID src
, RegisterID dst
) 
1551         // In 64-bit, this may cause an unnecessary REX to be planted (if the dst register 
1552         // is in the range ESP-EDI, and the src would not have required a REX).  Unneeded 
1553         // REX prefixes are defined to be silently ignored by the processor. 
1554         m_formatter
.twoByteOp8(OP2_MOVZX_GvEb
, dst
, src
); 
1557     void leal_mr(int offset
, RegisterID base
, RegisterID dst
) 
1559         m_formatter
.oneByteOp(OP_LEA
, dst
, base
, offset
); 
1562     void leaq_mr(int offset
, RegisterID base
, RegisterID dst
) 
1564         m_formatter
.oneByteOp64(OP_LEA
, dst
, base
, offset
); 
1570     AssemblerLabel 
call() 
1572         m_formatter
.oneByteOp(OP_CALL_rel32
); 
1573         return m_formatter
.immediateRel32(); 
1576     AssemblerLabel 
call(RegisterID dst
) 
1578         m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, dst
); 
1579         return m_formatter
.label(); 
1582     void call_m(int offset
, RegisterID base
) 
1584         m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_CALLN
, base
, offset
); 
1587     AssemblerLabel 
jmp() 
1589         m_formatter
.oneByteOp(OP_JMP_rel32
); 
1590         return m_formatter
.immediateRel32(); 
1593     // Return a AssemblerLabel so we have a label to the jump, so we can use this 
1594     // To make a tail recursive call on x86-64.  The MacroAssembler 
1595     // really shouldn't wrap this as a Jump, since it can't be linked. :-/ 
1596     AssemblerLabel 
jmp_r(RegisterID dst
) 
1598         m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, dst
); 
1599         return m_formatter
.label(); 
1602     void jmp_m(int offset
, RegisterID base
) 
1604         m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, base
, offset
); 
1608     void jmp_m(const void* address
) 
1610         m_formatter
.oneByteOp(OP_GROUP5_Ev
, GROUP5_OP_JMPN
, address
); 
1614     AssemblerLabel 
jne() 
1616         m_formatter
.twoByteOp(jccRel32(ConditionNE
)); 
1617         return m_formatter
.immediateRel32(); 
1620     AssemblerLabel 
jnz() 
1627         m_formatter
.twoByteOp(jccRel32(ConditionE
)); 
1628         return m_formatter
.immediateRel32(); 
1638         m_formatter
.twoByteOp(jccRel32(ConditionL
)); 
1639         return m_formatter
.immediateRel32(); 
1644         m_formatter
.twoByteOp(jccRel32(ConditionB
)); 
1645         return m_formatter
.immediateRel32(); 
1648     AssemblerLabel 
jle() 
1650         m_formatter
.twoByteOp(jccRel32(ConditionLE
)); 
1651         return m_formatter
.immediateRel32(); 
1654     AssemblerLabel 
jbe() 
1656         m_formatter
.twoByteOp(jccRel32(ConditionBE
)); 
1657         return m_formatter
.immediateRel32(); 
1660     AssemblerLabel 
jge() 
1662         m_formatter
.twoByteOp(jccRel32(ConditionGE
)); 
1663         return m_formatter
.immediateRel32(); 
1668         m_formatter
.twoByteOp(jccRel32(ConditionG
)); 
1669         return m_formatter
.immediateRel32(); 
1674         m_formatter
.twoByteOp(jccRel32(ConditionA
)); 
1675         return m_formatter
.immediateRel32(); 
1678     AssemblerLabel 
jae() 
1680         m_formatter
.twoByteOp(jccRel32(ConditionAE
)); 
1681         return m_formatter
.immediateRel32(); 
1686         m_formatter
.twoByteOp(jccRel32(ConditionO
)); 
1687         return m_formatter
.immediateRel32(); 
1690     AssemblerLabel 
jnp() 
1692         m_formatter
.twoByteOp(jccRel32(ConditionNP
)); 
1693         return m_formatter
.immediateRel32(); 
1698         m_formatter
.twoByteOp(jccRel32(ConditionP
)); 
1699         return m_formatter
.immediateRel32(); 
1704         m_formatter
.twoByteOp(jccRel32(ConditionS
)); 
1705         return m_formatter
.immediateRel32(); 
1708     AssemblerLabel 
jCC(Condition cond
) 
1710         m_formatter
.twoByteOp(jccRel32(cond
)); 
1711         return m_formatter
.immediateRel32(); 
1716     void addsd_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1718         m_formatter
.prefix(PRE_SSE_F2
); 
1719         m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
); 
1722     void addsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
) 
1724         m_formatter
.prefix(PRE_SSE_F2
); 
1725         m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, base
, offset
); 
1729     void addsd_mr(const void* address
, XMMRegisterID dst
) 
1731         m_formatter
.prefix(PRE_SSE_F2
); 
1732         m_formatter
.twoByteOp(OP2_ADDSD_VsdWsd
, (RegisterID
)dst
, address
); 
1736     void cvtsi2sd_rr(RegisterID src
, XMMRegisterID dst
) 
1738         m_formatter
.prefix(PRE_SSE_F2
); 
1739         m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, src
); 
1743     void cvtsi2sdq_rr(RegisterID src
, XMMRegisterID dst
) 
1745         m_formatter
.prefix(PRE_SSE_F2
); 
1746         m_formatter
.twoByteOp64(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, src
); 
1750     void cvtsi2sd_mr(int offset
, RegisterID base
, XMMRegisterID dst
) 
1752         m_formatter
.prefix(PRE_SSE_F2
); 
1753         m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, base
, offset
); 
1757     void cvtsi2sd_mr(const void* address
, XMMRegisterID dst
) 
1759         m_formatter
.prefix(PRE_SSE_F2
); 
1760         m_formatter
.twoByteOp(OP2_CVTSI2SD_VsdEd
, (RegisterID
)dst
, address
); 
1764     void cvttsd2si_rr(XMMRegisterID src
, RegisterID dst
) 
1766         m_formatter
.prefix(PRE_SSE_F2
); 
1767         m_formatter
.twoByteOp(OP2_CVTTSD2SI_GdWsd
, dst
, (RegisterID
)src
); 
1770     void cvtsd2ss_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1772         m_formatter
.prefix(PRE_SSE_F2
); 
1773         m_formatter
.twoByteOp(OP2_CVTSD2SS_VsdWsd
, dst
, (RegisterID
)src
); 
1776     void cvtss2sd_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1778         m_formatter
.prefix(PRE_SSE_F3
); 
1779         m_formatter
.twoByteOp(OP2_CVTSS2SD_VsdWsd
, dst
, (RegisterID
)src
); 
1783     void cvttsd2siq_rr(XMMRegisterID src
, RegisterID dst
) 
1785         m_formatter
.prefix(PRE_SSE_F2
); 
1786         m_formatter
.twoByteOp64(OP2_CVTTSD2SI_GdWsd
, dst
, (RegisterID
)src
); 
1790     void movd_rr(XMMRegisterID src
, RegisterID dst
) 
1792         m_formatter
.prefix(PRE_SSE_66
); 
1793         m_formatter
.twoByteOp(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
); 
1796     void movd_rr(RegisterID src
, XMMRegisterID dst
) 
1798         m_formatter
.prefix(PRE_SSE_66
); 
1799         m_formatter
.twoByteOp(OP2_MOVD_VdEd
, (RegisterID
)dst
, src
); 
1803     void movq_rr(XMMRegisterID src
, RegisterID dst
) 
1805         m_formatter
.prefix(PRE_SSE_66
); 
1806         m_formatter
.twoByteOp64(OP2_MOVD_EdVd
, (RegisterID
)src
, dst
); 
1809     void movq_rr(RegisterID src
, XMMRegisterID dst
) 
1811         m_formatter
.prefix(PRE_SSE_66
); 
1812         m_formatter
.twoByteOp64(OP2_MOVD_VdEd
, (RegisterID
)dst
, src
); 
1816     void movsd_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1818         m_formatter
.prefix(PRE_SSE_F2
); 
1819         m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
); 
1822     void movsd_rm(XMMRegisterID src
, int offset
, RegisterID base
) 
1824         m_formatter
.prefix(PRE_SSE_F2
); 
1825         m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, offset
); 
1828     void movsd_rm(XMMRegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1830         m_formatter
.prefix(PRE_SSE_F2
); 
1831         m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, index
, scale
, offset
); 
1834     void movss_rm(XMMRegisterID src
, int offset
, RegisterID base
, RegisterID index
, int scale
) 
1836         m_formatter
.prefix(PRE_SSE_F3
); 
1837         m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, base
, index
, scale
, offset
); 
1840     void movsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
) 
1842         m_formatter
.prefix(PRE_SSE_F2
); 
1843         m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
); 
1846     void movsd_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, XMMRegisterID dst
) 
1848         m_formatter
.prefix(PRE_SSE_F2
); 
1849         m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, dst
, base
, index
, scale
, offset
); 
1852     void movss_mr(int offset
, RegisterID base
, RegisterID index
, int scale
, XMMRegisterID dst
) 
1854         m_formatter
.prefix(PRE_SSE_F3
); 
1855         m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, dst
, base
, index
, scale
, offset
); 
1859     void movsd_mr(const void* address
, XMMRegisterID dst
) 
1861         m_formatter
.prefix(PRE_SSE_F2
); 
1862         m_formatter
.twoByteOp(OP2_MOVSD_VsdWsd
, (RegisterID
)dst
, address
); 
1864     void movsd_rm(XMMRegisterID src
, const void* address
) 
1866         m_formatter
.prefix(PRE_SSE_F2
); 
1867         m_formatter
.twoByteOp(OP2_MOVSD_WsdVsd
, (RegisterID
)src
, address
); 
1871     void mulsd_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1873         m_formatter
.prefix(PRE_SSE_F2
); 
1874         m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
); 
1877     void mulsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
) 
1879         m_formatter
.prefix(PRE_SSE_F2
); 
1880         m_formatter
.twoByteOp(OP2_MULSD_VsdWsd
, (RegisterID
)dst
, base
, offset
); 
1883     void pextrw_irr(int whichWord
, XMMRegisterID src
, RegisterID dst
) 
1885         m_formatter
.prefix(PRE_SSE_66
); 
1886         m_formatter
.twoByteOp(OP2_PEXTRW_GdUdIb
, (RegisterID
)dst
, (RegisterID
)src
); 
1887         m_formatter
.immediate8(whichWord
); 
1890     void psllq_i8r(int imm
, XMMRegisterID dst
) 
1892         m_formatter
.prefix(PRE_SSE_66
); 
1893         m_formatter
.twoByteOp8(OP2_PSLLQ_UdqIb
, GROUP14_OP_PSLLQ
, (RegisterID
)dst
); 
1894         m_formatter
.immediate8(imm
); 
1897     void psrlq_i8r(int imm
, XMMRegisterID dst
) 
1899         m_formatter
.prefix(PRE_SSE_66
); 
1900         m_formatter
.twoByteOp8(OP2_PSRLQ_UdqIb
, GROUP14_OP_PSRLQ
, (RegisterID
)dst
); 
1901         m_formatter
.immediate8(imm
); 
1904     void por_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1906         m_formatter
.prefix(PRE_SSE_66
); 
1907         m_formatter
.twoByteOp(OP2_POR_VdqWdq
, (RegisterID
)dst
, (RegisterID
)src
); 
1910     void subsd_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1912         m_formatter
.prefix(PRE_SSE_F2
); 
1913         m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
); 
1916     void subsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
) 
1918         m_formatter
.prefix(PRE_SSE_F2
); 
1919         m_formatter
.twoByteOp(OP2_SUBSD_VsdWsd
, (RegisterID
)dst
, base
, offset
); 
1922     void ucomisd_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1924         m_formatter
.prefix(PRE_SSE_66
); 
1925         m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
); 
1928     void ucomisd_mr(int offset
, RegisterID base
, XMMRegisterID dst
) 
1930         m_formatter
.prefix(PRE_SSE_66
); 
1931         m_formatter
.twoByteOp(OP2_UCOMISD_VsdWsd
, (RegisterID
)dst
, base
, offset
); 
1934     void divsd_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1936         m_formatter
.prefix(PRE_SSE_F2
); 
1937         m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
); 
1940     void divsd_mr(int offset
, RegisterID base
, XMMRegisterID dst
) 
1942         m_formatter
.prefix(PRE_SSE_F2
); 
1943         m_formatter
.twoByteOp(OP2_DIVSD_VsdWsd
, (RegisterID
)dst
, base
, offset
); 
1946     void xorpd_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1948         m_formatter
.prefix(PRE_SSE_66
); 
1949         m_formatter
.twoByteOp(OP2_XORPD_VpdWpd
, (RegisterID
)dst
, (RegisterID
)src
); 
1952     void andnpd_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1954         m_formatter
.prefix(PRE_SSE_66
); 
1955         m_formatter
.twoByteOp(OP2_ANDNPD_VpdWpd
, (RegisterID
)dst
, (RegisterID
)src
); 
1958     void sqrtsd_rr(XMMRegisterID src
, XMMRegisterID dst
) 
1960         m_formatter
.prefix(PRE_SSE_F2
); 
1961         m_formatter
.twoByteOp(OP2_SQRTSD_VsdWsd
, (RegisterID
)dst
, (RegisterID
)src
); 
1964     // Misc instructions: 
1968         m_formatter
.oneByteOp(OP_INT3
); 
1973         m_formatter
.oneByteOp(OP_RET
); 
1976     void predictNotTaken() 
1978         m_formatter
.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN
); 
1983         m_formatter
.threeByteOp(OP3_MFENCE
); 
1986     // Assembler admin methods: 
1988     size_t codeSize() const 
1990         return m_formatter
.codeSize(); 
1993     AssemblerLabel 
labelForWatchpoint() 
1995         AssemblerLabel result 
= m_formatter
.label(); 
1996         if (static_cast<int>(result
.m_offset
) != m_indexOfLastWatchpoint
) 
1998         m_indexOfLastWatchpoint 
= result
.m_offset
; 
1999         m_indexOfTailOfLastWatchpoint 
= result
.m_offset 
+ maxJumpReplacementSize(); 
2003     AssemblerLabel 
labelIgnoringWatchpoints() 
2005         return m_formatter
.label(); 
2008     AssemblerLabel 
label() 
2010         AssemblerLabel result 
= m_formatter
.label(); 
2011         while (UNLIKELY(static_cast<int>(result
.m_offset
) < m_indexOfTailOfLastWatchpoint
)) { 
2013             result 
= m_formatter
.label(); 
2018     AssemblerLabel 
align(int alignment
) 
2020         while (!m_formatter
.isAligned(alignment
)) 
2021             m_formatter
.oneByteOp(OP_HLT
); 
2026     // Linking & patching: 
2028     // 'link' and 'patch' methods are for use on unprotected code - such as the code 
2029     // within the AssemblerBuffer, and code being patched by the patch buffer.  Once 
2030     // code has been finalized it is (platform support permitting) within a non- 
2031     // writable region of memory; to modify the code in an execute-only execuable 
2032     // pool the 'repatch' and 'relink' methods should be used. 
2034     void linkJump(AssemblerLabel from
, AssemblerLabel to
) 
2036         ASSERT(from
.isSet()); 
2039         char* code 
= reinterpret_cast<char*>(m_formatter
.data()); 
2040         ASSERT(!reinterpret_cast<int32_t*>(code 
+ from
.m_offset
)[-1]); 
2041         setRel32(code 
+ from
.m_offset
, code 
+ to
.m_offset
); 
2044     static void linkJump(void* code
, AssemblerLabel from
, void* to
) 
2046         ASSERT(from
.isSet()); 
2048         setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
); 
2051     static void linkCall(void* code
, AssemblerLabel from
, void* to
) 
2053         ASSERT(from
.isSet()); 
2055         setRel32(reinterpret_cast<char*>(code
) + from
.m_offset
, to
); 
2058     static void linkPointer(void* code
, AssemblerLabel where
, void* value
) 
2060         ASSERT(where
.isSet()); 
2062         setPointer(reinterpret_cast<char*>(code
) + where
.m_offset
, value
); 
2065     static void relinkJump(void* from
, void* to
) 
2070     static void relinkCall(void* from
, void* to
) 
2075     static void repatchCompact(void* where
, int32_t value
) 
2077         ASSERT(value 
>= std::numeric_limits
<int8_t>::min()); 
2078         ASSERT(value 
<= std::numeric_limits
<int8_t>::max()); 
2079         setInt8(where
, value
); 
2082     static void repatchInt32(void* where
, int32_t value
) 
2084         setInt32(where
, value
); 
2087     static void repatchPointer(void* where
, void* value
) 
2089         setPointer(where
, value
); 
2092     static void* readPointer(void* where
) 
2094         return reinterpret_cast<void**>(where
)[-1]; 
2097     static void replaceWithJump(void* instructionStart
, void* to
) 
2099         uint8_t* ptr 
= reinterpret_cast<uint8_t*>(instructionStart
); 
2100         uint8_t* dstPtr 
= reinterpret_cast<uint8_t*>(to
); 
2101         intptr_t distance 
= (intptr_t)(dstPtr 
- (ptr 
+ 5)); 
2102         ptr
[0] = static_cast<uint8_t>(OP_JMP_rel32
); 
2103         *reinterpret_cast<int32_t*>(ptr 
+ 1) = static_cast<int32_t>(distance
); 
2106     static ptrdiff_t maxJumpReplacementSize() 
2112     static void revertJumpTo_movq_i64r(void* instructionStart
, int64_t imm
, RegisterID dst
) 
2114         const unsigned instructionSize 
= 10; // REX.W MOV IMM64 
2115         const int rexBytes 
= 1; 
2116         const int opcodeBytes 
= 1; 
2117         uint8_t* ptr 
= reinterpret_cast<uint8_t*>(instructionStart
); 
2118         ptr
[0] = PRE_REX 
| (1 << 3) | (dst 
>> 3); 
2119         ptr
[1] = OP_MOV_EAXIv 
| (dst 
& 7); 
2126         for (unsigned i 
= rexBytes 
+ opcodeBytes
; i 
< instructionSize
; ++i
) 
2127             ptr
[i
] = u
.asBytes
[i 
- rexBytes 
- opcodeBytes
]; 
2130     static void revertJumpTo_movl_i32r(void* instructionStart
, int32_t imm
, RegisterID dst
) 
2132         // We only revert jumps on inline caches, and inline caches always use the scratch register (r11). 
2133         // FIXME: If the above is ever false then we need to make this smarter with respect to emitting  
2135         ASSERT(dst 
== X86Registers::r11
); 
2136         const unsigned instructionSize 
= 6; // REX MOV IMM32 
2137         const int rexBytes 
= 1; 
2138         const int opcodeBytes 
= 1; 
2139         uint8_t* ptr 
= reinterpret_cast<uint8_t*>(instructionStart
); 
2140         ptr
[0] = PRE_REX 
| (dst 
>> 3); 
2141         ptr
[1] = OP_MOV_EAXIv 
| (dst 
& 7); 
2148         for (unsigned i 
= rexBytes 
+ opcodeBytes
; i 
< instructionSize
; ++i
) 
2149             ptr
[i
] = u
.asBytes
[i 
- rexBytes 
- opcodeBytes
]; 
2153     static void revertJumpTo_cmpl_ir_force32(void* instructionStart
, int32_t imm
, RegisterID dst
) 
2155         const int opcodeBytes 
= 1; 
2156         const int modRMBytes 
= 1; 
2157         ASSERT(opcodeBytes 
+ modRMBytes 
<= maxJumpReplacementSize()); 
2158         uint8_t* ptr 
= reinterpret_cast<uint8_t*>(instructionStart
); 
2159         ptr
[0] = OP_GROUP1_EvIz
; 
2160         ptr
[1] = (X86InstructionFormatter::ModRmRegister 
<< 6) | (GROUP1_OP_CMP 
<< 3) | dst
; 
2166         for (unsigned i 
= opcodeBytes 
+ modRMBytes
; i 
< static_cast<unsigned>(maxJumpReplacementSize()); ++i
) 
2167             ptr
[i
] = u
.asBytes
[i 
- opcodeBytes 
- modRMBytes
]; 
2170     static void revertJumpTo_cmpl_im_force32(void* instructionStart
, int32_t imm
, int offset
, RegisterID dst
) 
2172         ASSERT_UNUSED(offset
, !offset
); 
2173         const int opcodeBytes 
= 1; 
2174         const int modRMBytes 
= 1; 
2175         ASSERT(opcodeBytes 
+ modRMBytes 
<= maxJumpReplacementSize()); 
2176         uint8_t* ptr 
= reinterpret_cast<uint8_t*>(instructionStart
); 
2177         ptr
[0] = OP_GROUP1_EvIz
; 
2178         ptr
[1] = (X86InstructionFormatter::ModRmMemoryNoDisp 
<< 6) | (GROUP1_OP_CMP 
<< 3) | dst
; 
2184         for (unsigned i 
= opcodeBytes 
+ modRMBytes
; i 
< static_cast<unsigned>(maxJumpReplacementSize()); ++i
) 
2185             ptr
[i
] = u
.asBytes
[i 
- opcodeBytes 
- modRMBytes
]; 
2188     static void replaceWithLoad(void* instructionStart
) 
2190         uint8_t* ptr 
= reinterpret_cast<uint8_t*>(instructionStart
); 
2192         if ((*ptr 
& ~15) == PRE_REX
) 
2202             RELEASE_ASSERT_NOT_REACHED(); 
2206     static void replaceWithAddressComputation(void* instructionStart
) 
2208         uint8_t* ptr 
= reinterpret_cast<uint8_t*>(instructionStart
); 
2210         if ((*ptr 
& ~15) == PRE_REX
) 
2220             RELEASE_ASSERT_NOT_REACHED(); 
2224     static unsigned getCallReturnOffset(AssemblerLabel call
) 
2226         ASSERT(call
.isSet()); 
2227         return call
.m_offset
; 
2230     static void* getRelocatedAddress(void* code
, AssemblerLabel label
) 
2232         ASSERT(label
.isSet()); 
2233         return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code
) + label
.m_offset
); 
2236     static int getDifferenceBetweenLabels(AssemblerLabel a
, AssemblerLabel b
) 
2238         return b
.m_offset 
- a
.m_offset
; 
2241     unsigned debugOffset() { return m_formatter
.debugOffset(); } 
2245         m_formatter
.oneByteOp(OP_NOP
); 
2248     static void fillNops(void* base
, size_t size
) 
2251         static const uint8_t nops
[10][10] = { 
2259             {0x0f, 0x1f, 0x40, 0x08}, 
2260             // nopl 8(%[re]ax,%[re]ax,1) 
2261             {0x0f, 0x1f, 0x44, 0x00, 0x08}, 
2262             // nopw 8(%[re]ax,%[re]ax,1) 
2263             {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x08}, 
2264             // nopl 512(%[re]ax) 
2265             {0x0f, 0x1f, 0x80, 0x00, 0x02, 0x00, 0x00}, 
2266             // nopl 512(%[re]ax,%[re]ax,1) 
2267             {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}, 
2268             // nopw 512(%[re]ax,%[re]ax,1) 
2269             {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}, 
2270             // nopw %cs:512(%[re]ax,%[re]ax,1) 
2271             {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00} 
2274         uint8_t* where 
= reinterpret_cast<uint8_t*>(base
); 
2276             unsigned nopSize 
= static_cast<unsigned>(std::min
<size_t>(size
, 15)); 
2277             unsigned numPrefixes 
= nopSize 
<= 10 ? 0 : nopSize 
- 10; 
2278             for (unsigned i 
= 0; i 
!= numPrefixes
; ++i
) 
2281             unsigned nopRest 
= nopSize 
- numPrefixes
; 
2282             for (unsigned i 
= 0; i 
!= nopRest
; ++i
) 
2283                 *where
++ = nops
[nopRest
-1][i
]; 
2288         memset(base
, OP_NOP
, size
); 
2292     // This is a no-op on x86 
2293     ALWAYS_INLINE 
static void cacheFlush(void*, size_t) { } 
2297     static void setPointer(void* where
, void* value
) 
2299         reinterpret_cast<void**>(where
)[-1] = value
; 
2302     static void setInt32(void* where
, int32_t value
) 
2304         reinterpret_cast<int32_t*>(where
)[-1] = value
; 
2307     static void setInt8(void* where
, int8_t value
) 
2309         reinterpret_cast<int8_t*>(where
)[-1] = value
; 
2312     static void setRel32(void* from
, void* to
) 
2314         intptr_t offset 
= reinterpret_cast<intptr_t>(to
) - reinterpret_cast<intptr_t>(from
); 
2315         ASSERT(offset 
== static_cast<int32_t>(offset
)); 
2317         setInt32(from
, offset
); 
2320     class X86InstructionFormatter 
{ 
2322         static const int maxInstructionSize 
= 16; 
2333         // Legacy prefix bytes: 
2335         // These are emmitted prior to the instruction. 
2337         void prefix(OneByteOpcodeID pre
) 
2339             m_buffer
.putByte(pre
); 
2342         // Word-sized operands / no operand instruction formatters. 
2344         // In addition to the opcode, the following operand permutations are supported: 
2345         //   * None - instruction takes no operands. 
2346         //   * One register - the low three bits of the RegisterID are added into the opcode. 
2347         //   * Two registers - encode a register form ModRm (for all ModRm formats, the reg field is passed first, and a GroupOpcodeID may be passed in its place). 
2348         //   * Three argument ModRM - a register, and a register and an offset describing a memory operand. 
2349         //   * Five argument ModRM - a register, and a base register, an index, scale, and offset describing a memory operand. 
2351         // For 32-bit x86 targets, the address operand may also be provided as a void*. 
2352         // On 64-bit targets REX prefixes will be planted as necessary, where high numbered registers are used. 
2354         // The twoByteOp methods plant two-byte Intel instructions sequences (first opcode byte 0x0F). 
2356         void oneByteOp(OneByteOpcodeID opcode
) 
2358             m_buffer
.ensureSpace(maxInstructionSize
); 
2359             m_buffer
.putByteUnchecked(opcode
); 
2362         void oneByteOp(OneByteOpcodeID opcode
, RegisterID reg
) 
2364             m_buffer
.ensureSpace(maxInstructionSize
); 
2365             emitRexIfNeeded(0, 0, reg
); 
2366             m_buffer
.putByteUnchecked(opcode 
+ (reg 
& 7)); 
2369         void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID rm
) 
2371             m_buffer
.ensureSpace(maxInstructionSize
); 
2372             emitRexIfNeeded(reg
, 0, rm
); 
2373             m_buffer
.putByteUnchecked(opcode
); 
2374             registerModRM(reg
, rm
); 
2377         void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
) 
2379             m_buffer
.ensureSpace(maxInstructionSize
); 
2380             emitRexIfNeeded(reg
, 0, base
); 
2381             m_buffer
.putByteUnchecked(opcode
); 
2382             memoryModRM(reg
, base
, offset
); 
2385         void oneByteOp_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
) 
2387             m_buffer
.ensureSpace(maxInstructionSize
); 
2388             emitRexIfNeeded(reg
, 0, base
); 
2389             m_buffer
.putByteUnchecked(opcode
); 
2390             memoryModRM_disp32(reg
, base
, offset
); 
2393         void oneByteOp_disp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
) 
2395             m_buffer
.ensureSpace(maxInstructionSize
); 
2396             emitRexIfNeeded(reg
, 0, base
); 
2397             m_buffer
.putByteUnchecked(opcode
); 
2398             memoryModRM_disp8(reg
, base
, offset
); 
2401         void oneByteOp(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
) 
2403             m_buffer
.ensureSpace(maxInstructionSize
); 
2404             emitRexIfNeeded(reg
, index
, base
); 
2405             m_buffer
.putByteUnchecked(opcode
); 
2406             memoryModRM(reg
, base
, index
, scale
, offset
); 
2410         void oneByteOp(OneByteOpcodeID opcode
, int reg
, const void* address
) 
2412             m_buffer
.ensureSpace(maxInstructionSize
); 
2413             m_buffer
.putByteUnchecked(opcode
); 
2414             memoryModRM(reg
, address
); 
2418         void twoByteOp(TwoByteOpcodeID opcode
) 
2420             m_buffer
.ensureSpace(maxInstructionSize
); 
2421             m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
); 
2422             m_buffer
.putByteUnchecked(opcode
); 
2425         void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
) 
2427             m_buffer
.ensureSpace(maxInstructionSize
); 
2428             emitRexIfNeeded(reg
, 0, rm
); 
2429             m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
); 
2430             m_buffer
.putByteUnchecked(opcode
); 
2431             registerModRM(reg
, rm
); 
2434         void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
) 
2436             m_buffer
.ensureSpace(maxInstructionSize
); 
2437             emitRexIfNeeded(reg
, 0, base
); 
2438             m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
); 
2439             m_buffer
.putByteUnchecked(opcode
); 
2440             memoryModRM(reg
, base
, offset
); 
2443         void twoByteOp(TwoByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
) 
2445             m_buffer
.ensureSpace(maxInstructionSize
); 
2446             emitRexIfNeeded(reg
, index
, base
); 
2447             m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
); 
2448             m_buffer
.putByteUnchecked(opcode
); 
2449             memoryModRM(reg
, base
, index
, scale
, offset
); 
2453         void twoByteOp(TwoByteOpcodeID opcode
, int reg
, const void* address
) 
2455             m_buffer
.ensureSpace(maxInstructionSize
); 
2456             m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
); 
2457             m_buffer
.putByteUnchecked(opcode
); 
2458             memoryModRM(reg
, address
); 
2462         void threeByteOp(ThreeByteOpcodeID opcode
) 
2464             m_buffer
.ensureSpace(maxInstructionSize
); 
2465             m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
); 
2466             m_buffer
.putByteUnchecked(OP2_3BYTE_ESCAPE
); 
2467             m_buffer
.putByteUnchecked(opcode
); 
2471         // Quad-word-sized operands: 
2473         // Used to format 64-bit operantions, planting a REX.w prefix. 
2474         // When planting d64 or f64 instructions, not requiring a REX.w prefix, 
2475         // the normal (non-'64'-postfixed) formatters should be used. 
2477         void oneByteOp64(OneByteOpcodeID opcode
) 
2479             m_buffer
.ensureSpace(maxInstructionSize
); 
2481             m_buffer
.putByteUnchecked(opcode
); 
2484         void oneByteOp64(OneByteOpcodeID opcode
, RegisterID reg
) 
2486             m_buffer
.ensureSpace(maxInstructionSize
); 
2487             emitRexW(0, 0, reg
); 
2488             m_buffer
.putByteUnchecked(opcode 
+ (reg 
& 7)); 
2491         void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID rm
) 
2493             m_buffer
.ensureSpace(maxInstructionSize
); 
2494             emitRexW(reg
, 0, rm
); 
2495             m_buffer
.putByteUnchecked(opcode
); 
2496             registerModRM(reg
, rm
); 
2499         void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
) 
2501             m_buffer
.ensureSpace(maxInstructionSize
); 
2502             emitRexW(reg
, 0, base
); 
2503             m_buffer
.putByteUnchecked(opcode
); 
2504             memoryModRM(reg
, base
, offset
); 
2507         void oneByteOp64_disp32(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
) 
2509             m_buffer
.ensureSpace(maxInstructionSize
); 
2510             emitRexW(reg
, 0, base
); 
2511             m_buffer
.putByteUnchecked(opcode
); 
2512             memoryModRM_disp32(reg
, base
, offset
); 
2515         void oneByteOp64_disp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
) 
2517             m_buffer
.ensureSpace(maxInstructionSize
); 
2518             emitRexW(reg
, 0, base
); 
2519             m_buffer
.putByteUnchecked(opcode
); 
2520             memoryModRM_disp8(reg
, base
, offset
); 
2523         void oneByteOp64(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
) 
2525             m_buffer
.ensureSpace(maxInstructionSize
); 
2526             emitRexW(reg
, index
, base
); 
2527             m_buffer
.putByteUnchecked(opcode
); 
2528             memoryModRM(reg
, base
, index
, scale
, offset
); 
2531         void twoByteOp64(TwoByteOpcodeID opcode
, int reg
, RegisterID rm
) 
2533             m_buffer
.ensureSpace(maxInstructionSize
); 
2534             emitRexW(reg
, 0, rm
); 
2535             m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
); 
2536             m_buffer
.putByteUnchecked(opcode
); 
2537             registerModRM(reg
, rm
); 
2543         // These methods format byte operations.  Byte operations differ from the normal 
2544         // formatters in the circumstances under which they will decide to emit REX prefixes. 
2545         // These should be used where any register operand signifies a byte register. 
2547         // The disctinction is due to the handling of register numbers in the range 4..7 on 
2548         // x86-64.  These register numbers may either represent the second byte of the first 
2549         // four registers (ah..bh) or the first byte of the second four registers (spl..dil). 
2551         // Since ah..bh cannot be used in all permutations of operands (specifically cannot 
2552         // be accessed where a REX prefix is present), these are likely best treated as 
2553         // deprecated.  In order to ensure the correct registers spl..dil are selected a 
2554         // REX prefix will be emitted for any byte register operand in the range 4..15. 
2556         // These formatters may be used in instructions where a mix of operand sizes, in which 
2557         // case an unnecessary REX will be emitted, for example: 
2559         // In this case a REX will be planted since edi is 7 (and were this a byte operand 
2560         // a REX would be required to specify dil instead of bh).  Unneeded REX prefixes will 
2561         // be silently ignored by the processor. 
2563         // Address operands should still be checked using regRequiresRex(), while byteRegRequiresRex() 
2564         // is provided to check byte register operands. 
2566         void oneByteOp8(OneByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
) 
2568             m_buffer
.ensureSpace(maxInstructionSize
); 
2569             emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
); 
2570             m_buffer
.putByteUnchecked(opcode
); 
2571             registerModRM(groupOp
, rm
); 
2574         void oneByteOp8(OneByteOpcodeID opcode
, int reg
, RegisterID rm
) 
2576             m_buffer
.ensureSpace(maxInstructionSize
); 
2577             emitRexIf(byteRegRequiresRex(reg
) || byteRegRequiresRex(rm
), reg
, 0, rm
); 
2578             m_buffer
.putByteUnchecked(opcode
); 
2579             registerModRM(reg
, rm
); 
2582         void oneByteOp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, int offset
) 
2584             m_buffer
.ensureSpace(maxInstructionSize
); 
2585             emitRexIf(byteRegRequiresRex(reg
) || byteRegRequiresRex(base
), reg
, 0, base
); 
2586             m_buffer
.putByteUnchecked(opcode
); 
2587             memoryModRM(reg
, base
, offset
); 
2590         void oneByteOp8(OneByteOpcodeID opcode
, int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
) 
2592             m_buffer
.ensureSpace(maxInstructionSize
); 
2593             emitRexIf(byteRegRequiresRex(reg
) || regRequiresRex(index
) || regRequiresRex(base
), reg
, index
, base
); 
2594             m_buffer
.putByteUnchecked(opcode
); 
2595             memoryModRM(reg
, base
, index
, scale
, offset
); 
2598         void twoByteOp8(TwoByteOpcodeID opcode
, RegisterID reg
, RegisterID rm
) 
2600             m_buffer
.ensureSpace(maxInstructionSize
); 
2601             emitRexIf(byteRegRequiresRex(reg
)|byteRegRequiresRex(rm
), reg
, 0, rm
); 
2602             m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
); 
2603             m_buffer
.putByteUnchecked(opcode
); 
2604             registerModRM(reg
, rm
); 
2607         void twoByteOp8(TwoByteOpcodeID opcode
, GroupOpcodeID groupOp
, RegisterID rm
) 
2609             m_buffer
.ensureSpace(maxInstructionSize
); 
2610             emitRexIf(byteRegRequiresRex(rm
), 0, 0, rm
); 
2611             m_buffer
.putByteUnchecked(OP_2BYTE_ESCAPE
); 
2612             m_buffer
.putByteUnchecked(opcode
); 
2613             registerModRM(groupOp
, rm
); 
2618         // An immedaite should be appended where appropriate after an op has been emitted. 
2619         // The writes are unchecked since the opcode formatters above will have ensured space. 
2621         void immediate8(int imm
) 
2623             m_buffer
.putByteUnchecked(imm
); 
2626         void immediate16(int imm
) 
2628             m_buffer
.putShortUnchecked(imm
); 
2631         void immediate32(int imm
) 
2633             m_buffer
.putIntUnchecked(imm
); 
2636         void immediate64(int64_t imm
) 
2638             m_buffer
.putInt64Unchecked(imm
); 
2641         AssemblerLabel 
immediateRel32() 
2643             m_buffer
.putIntUnchecked(0); 
2647         // Administrative methods: 
2649         size_t codeSize() const { return m_buffer
.codeSize(); } 
2650         AssemblerLabel 
label() const { return m_buffer
.label(); } 
2651         bool isAligned(int alignment
) const { return m_buffer
.isAligned(alignment
); } 
2652         void* data() const { return m_buffer
.data(); } 
2654         unsigned debugOffset() { return m_buffer
.debugOffset(); } 
2658         // Internals; ModRm and REX formatters. 
2660         static const RegisterID noBase 
= X86Registers::ebp
; 
2661         static const RegisterID hasSib 
= X86Registers::esp
; 
2662         static const RegisterID noIndex 
= X86Registers::esp
; 
2664         static const RegisterID noBase2 
= X86Registers::r13
; 
2665         static const RegisterID hasSib2 
= X86Registers::r12
; 
2667         // Registers r8 & above require a REX prefixe. 
2668         inline bool regRequiresRex(int reg
) 
2670             return (reg 
>= X86Registers::r8
); 
2673         // Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed). 
2674         inline bool byteRegRequiresRex(int reg
) 
2676             return (reg 
>= X86Registers::esp
); 
2679         // Format a REX prefix byte. 
2680         inline void emitRex(bool w
, int r
, int x
, int b
) 
2685             m_buffer
.putByteUnchecked(PRE_REX 
| ((int)w 
<< 3) | ((r
>>3)<<2) | ((x
>>3)<<1) | (b
>>3)); 
2688         // Used to plant a REX byte with REX.w set (for 64-bit operations). 
2689         inline void emitRexW(int r
, int x
, int b
) 
2691             emitRex(true, r
, x
, b
); 
2694         // Used for operations with byte operands - use byteRegRequiresRex() to check register operands, 
2695         // regRequiresRex() to check other registers (i.e. address base & index). 
2696         inline void emitRexIf(bool condition
, int r
, int x
, int b
) 
2698             if (condition
) emitRex(false, r
, x
, b
); 
2701         // Used for word sized operations, will plant a REX prefix if necessary (if any register is r8 or above). 
2702         inline void emitRexIfNeeded(int r
, int x
, int b
) 
2704             emitRexIf(regRequiresRex(r
) || regRequiresRex(x
) || regRequiresRex(b
), r
, x
, b
); 
2707         // No REX prefix bytes on 32-bit x86. 
2708         inline bool regRequiresRex(int) { return false; } 
2709         inline bool byteRegRequiresRex(int) { return false; } 
2710         inline void emitRexIf(bool, int, int, int) {} 
2711         inline void emitRexIfNeeded(int, int, int) {} 
2714         void putModRm(ModRmMode mode
, int reg
, RegisterID rm
) 
2716             m_buffer
.putByteUnchecked((mode 
<< 6) | ((reg 
& 7) << 3) | (rm 
& 7)); 
2719         void putModRmSib(ModRmMode mode
, int reg
, RegisterID base
, RegisterID index
, int scale
) 
2721             ASSERT(mode 
!= ModRmRegister
); 
2723             putModRm(mode
, reg
, hasSib
); 
2724             m_buffer
.putByteUnchecked((scale 
<< 6) | ((index 
& 7) << 3) | (base 
& 7)); 
2727         void registerModRM(int reg
, RegisterID rm
) 
2729             putModRm(ModRmRegister
, reg
, rm
); 
2732         void memoryModRM(int reg
, RegisterID base
, int offset
) 
2734             // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. 
2736             if ((base 
== hasSib
) || (base 
== hasSib2
)) { 
2738             if (base 
== hasSib
) { 
2740                 if (!offset
) // No need to check if the base is noBase, since we know it is hasSib! 
2741                     putModRmSib(ModRmMemoryNoDisp
, reg
, base
, noIndex
, 0); 
2742                 else if (CAN_SIGN_EXTEND_8_32(offset
)) { 
2743                     putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0); 
2744                     m_buffer
.putByteUnchecked(offset
); 
2746                     putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0); 
2747                     m_buffer
.putIntUnchecked(offset
); 
2751                 if (!offset 
&& (base 
!= noBase
) && (base 
!= noBase2
)) 
2753                 if (!offset 
&& (base 
!= noBase
)) 
2755                     putModRm(ModRmMemoryNoDisp
, reg
, base
); 
2756                 else if (CAN_SIGN_EXTEND_8_32(offset
)) { 
2757                     putModRm(ModRmMemoryDisp8
, reg
, base
); 
2758                     m_buffer
.putByteUnchecked(offset
); 
2760                     putModRm(ModRmMemoryDisp32
, reg
, base
); 
2761                     m_buffer
.putIntUnchecked(offset
); 
2766         void memoryModRM_disp8(int reg
, RegisterID base
, int offset
) 
2768             // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. 
2769             ASSERT(CAN_SIGN_EXTEND_8_32(offset
)); 
2771             if ((base 
== hasSib
) || (base 
== hasSib2
)) { 
2773             if (base 
== hasSib
) { 
2775                 putModRmSib(ModRmMemoryDisp8
, reg
, base
, noIndex
, 0); 
2776                 m_buffer
.putByteUnchecked(offset
); 
2778                 putModRm(ModRmMemoryDisp8
, reg
, base
); 
2779                 m_buffer
.putByteUnchecked(offset
); 
2783         void memoryModRM_disp32(int reg
, RegisterID base
, int offset
) 
2785             // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there. 
2787             if ((base 
== hasSib
) || (base 
== hasSib2
)) { 
2789             if (base 
== hasSib
) { 
2791                 putModRmSib(ModRmMemoryDisp32
, reg
, base
, noIndex
, 0); 
2792                 m_buffer
.putIntUnchecked(offset
); 
2794                 putModRm(ModRmMemoryDisp32
, reg
, base
); 
2795                 m_buffer
.putIntUnchecked(offset
); 
2799         void memoryModRM(int reg
, RegisterID base
, RegisterID index
, int scale
, int offset
) 
2801             ASSERT(index 
!= noIndex
); 
2804             if (!offset 
&& (base 
!= noBase
) && (base 
!= noBase2
)) 
2806             if (!offset 
&& (base 
!= noBase
)) 
2808                 putModRmSib(ModRmMemoryNoDisp
, reg
, base
, index
, scale
); 
2809             else if (CAN_SIGN_EXTEND_8_32(offset
)) { 
2810                 putModRmSib(ModRmMemoryDisp8
, reg
, base
, index
, scale
); 
2811                 m_buffer
.putByteUnchecked(offset
); 
2813                 putModRmSib(ModRmMemoryDisp32
, reg
, base
, index
, scale
); 
2814                 m_buffer
.putIntUnchecked(offset
); 
2819         void memoryModRM(int reg
, const void* address
) 
2821             // noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32! 
2822             putModRm(ModRmMemoryNoDisp
, reg
, noBase
); 
2823             m_buffer
.putIntUnchecked(reinterpret_cast<int32_t>(address
)); 
2828         AssemblerBuffer m_buffer
; 
2830     int m_indexOfLastWatchpoint
; 
2831     int m_indexOfTailOfLastWatchpoint
; 
2836 #endif // ENABLE(ASSEMBLER) && CPU(X86) 
2838 #endif // X86Assembler_h