/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#ifndef X86Assembler_h
#define X86Assembler_h
-#include <wtf/Platform.h>
-
-#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
+#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
#include "AssemblerBuffer.h"
+#include "JITCompilationEffort.h"
+#include <limits.h>
#include <stdint.h>
#include <wtf/Assertions.h>
#include <wtf/Vector.h>
+#if USE(MASM_PROBE)
+#include <xmmintrin.h>
+#endif
+
namespace JSC {
inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
-#if PLATFORM(X86_64)
-inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
-inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
-#endif
-namespace X86 {
+namespace X86Registers {
typedef enum {
eax,
ecx,
esi,
edi,
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
r8,
r9,
r10,
xmm5,
xmm6,
xmm7,
+
+#if CPU(X86_64)
+ xmm8,
+ xmm9,
+ xmm10,
+ xmm11,
+ xmm12,
+ xmm13,
+ xmm14,
+ xmm15,
+#endif
} XMMRegisterID;
+
+#if USE(MASM_PROBE)
+ #define FOR_EACH_CPU_REGISTER(V) \
+ FOR_EACH_CPU_GPREGISTER(V) \
+ FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ FOR_EACH_CPU_FPREGISTER(V)
+
+ #define FOR_EACH_CPU_GPREGISTER(V) \
+ V(void*, eax) \
+ V(void*, ebx) \
+ V(void*, ecx) \
+ V(void*, edx) \
+ V(void*, esi) \
+ V(void*, edi) \
+ V(void*, ebp) \
+ V(void*, esp) \
+ FOR_EACH_X86_64_CPU_GPREGISTER(V)
+
+ #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ V(void*, eip) \
+ V(void*, eflags) \
+
+ #define FOR_EACH_CPU_FPREGISTER(V) \
+ V(__m128, xmm0) \
+ V(__m128, xmm1) \
+ V(__m128, xmm2) \
+ V(__m128, xmm3) \
+ V(__m128, xmm4) \
+ V(__m128, xmm5) \
+ V(__m128, xmm6) \
+ V(__m128, xmm7)
+
+#if CPU(X86)
+ #define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add.
+#elif CPU(X86_64)
+ #define FOR_EACH_X86_64_CPU_GPREGISTER(V) \
+ V(void*, r8) \
+ V(void*, r9) \
+ V(void*, r10) \
+ V(void*, r11) \
+ V(void*, r12) \
+ V(void*, r13) \
+ V(void*, r14) \
+ V(void*, r15)
+#endif // CPU(X86_64)
+#endif // USE(MASM_PROBE)
}
class X86Assembler {
public:
- typedef X86::RegisterID RegisterID;
- typedef X86::XMMRegisterID XMMRegisterID;
+ typedef X86Registers::RegisterID RegisterID;
+
+ static RegisterID firstRegister() { return X86Registers::eax; }
+ static RegisterID lastRegister()
+ {
+#if CPU(X86_64)
+ return X86Registers::r15;
+#else
+ return X86Registers::edi;
+#endif
+ }
+
+ typedef X86Registers::XMMRegisterID XMMRegisterID;
typedef XMMRegisterID FPRegisterID;
+
+ static FPRegisterID firstFPRegister() { return X86Registers::xmm0; }
+ static FPRegisterID lastFPRegister()
+ {
+#if CPU(X86_64)
+ return X86Registers::xmm15;
+#else
+ return X86Registers::xmm7;
+#endif
+ }
typedef enum {
ConditionO,
typedef enum {
OP_ADD_EvGv = 0x01,
OP_ADD_GvEv = 0x03,
+ OP_ADD_EAXIv = 0x05,
OP_OR_EvGv = 0x09,
OP_OR_GvEv = 0x0B,
+ OP_OR_EAXIv = 0x0D,
OP_2BYTE_ESCAPE = 0x0F,
OP_AND_EvGv = 0x21,
OP_AND_GvEv = 0x23,
OP_SUB_EvGv = 0x29,
OP_SUB_GvEv = 0x2B,
+ OP_SUB_EAXIv = 0x2D,
PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
OP_XOR_EvGv = 0x31,
OP_XOR_GvEv = 0x33,
+ OP_XOR_EAXIv = 0x35,
OP_CMP_EvGv = 0x39,
OP_CMP_GvEv = 0x3B,
-#if PLATFORM(X86_64)
+ OP_CMP_EAXIv = 0x3D,
+#if CPU(X86_64)
PRE_REX = 0x40,
#endif
OP_PUSH_EAX = 0x50,
OP_POP_EAX = 0x58,
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
OP_MOVSXD_GvEv = 0x63,
#endif
PRE_OPERAND_SIZE = 0x66,
PRE_SSE_66 = 0x66,
OP_PUSH_Iz = 0x68,
OP_IMUL_GvEvIz = 0x69,
+ OP_GROUP1_EbIb = 0x80,
OP_GROUP1_EvIz = 0x81,
OP_GROUP1_EvIb = 0x83,
+ OP_TEST_EbGb = 0x84,
OP_TEST_EvGv = 0x85,
OP_XCHG_EvGv = 0x87,
+ OP_MOV_EbGb = 0x88,
OP_MOV_EvGv = 0x89,
OP_MOV_GvEv = 0x8B,
OP_LEA = 0x8D,
OP_GROUP1A_Ev = 0x8F,
+ OP_NOP = 0x90,
+ OP_XCHG_EAX = 0x90,
OP_CDQ = 0x99,
OP_MOV_EAXOv = 0xA1,
OP_MOV_OvEAX = 0xA3,
+ OP_TEST_ALIb = 0xA8,
+ OP_TEST_EAXIv = 0xA9,
OP_MOV_EAXIv = 0xB8,
OP_GROUP2_EvIb = 0xC1,
OP_RET = 0xC3,
+ OP_GROUP11_EvIb = 0xC6,
OP_GROUP11_EvIz = 0xC7,
OP_INT3 = 0xCC,
OP_GROUP2_Ev1 = 0xD1,
OP_GROUP2_EvCL = 0xD3,
+ OP_ESCAPE_DD = 0xDD,
OP_CALL_rel32 = 0xE8,
OP_JMP_rel32 = 0xE9,
PRE_SSE_F2 = 0xF2,
+ PRE_SSE_F3 = 0xF3,
OP_HLT = 0xF4,
OP_GROUP3_EbIb = 0xF6,
OP_GROUP3_Ev = 0xF7,
typedef enum {
OP2_MOVSD_VsdWsd = 0x10,
OP2_MOVSD_WsdVsd = 0x11,
+ OP2_MOVSS_VsdWsd = 0x10,
+ OP2_MOVSS_WsdVsd = 0x11,
OP2_CVTSI2SD_VsdEd = 0x2A,
OP2_CVTTSD2SI_GdWsd = 0x2C,
OP2_UCOMISD_VsdWsd = 0x2E,
OP2_ADDSD_VsdWsd = 0x58,
OP2_MULSD_VsdWsd = 0x59,
+ OP2_CVTSD2SS_VsdWsd = 0x5A,
+ OP2_CVTSS2SD_VsdWsd = 0x5A,
OP2_SUBSD_VsdWsd = 0x5C,
OP2_DIVSD_VsdWsd = 0x5E,
+ OP2_SQRTSD_VsdWsd = 0x51,
+ OP2_ANDNPD_VpdWpd = 0x55,
OP2_XORPD_VpdWpd = 0x57,
OP2_MOVD_VdEd = 0x6E,
OP2_MOVD_EdVd = 0x7E,
OP2_JCC_rel32 = 0x80,
OP_SETCC = 0x90,
+ OP2_3BYTE_ESCAPE = 0xAE,
OP2_IMUL_GvEv = 0xAF,
OP2_MOVZX_GvEb = 0xB6,
+ OP2_MOVSX_GvEb = 0xBE,
OP2_MOVZX_GvEw = 0xB7,
+ OP2_MOVSX_GvEw = 0xBF,
OP2_PEXTRW_GdUdIb = 0xC5,
+ OP2_PSLLQ_UdqIb = 0x73,
+ OP2_PSRLQ_UdqIb = 0x73,
+ OP2_POR_VdqWdq = 0XEB,
} TwoByteOpcodeID;
+
+ typedef enum {
+ OP3_MFENCE = 0xF0,
+ } ThreeByteOpcodeID;
TwoByteOpcodeID jccRel32(Condition cond)
{
GROUP1_OP_CMP = 7,
GROUP1A_OP_POP = 0,
-
+
+ GROUP2_OP_ROL = 0,
+ GROUP2_OP_ROR = 1,
+ GROUP2_OP_RCL = 2,
+ GROUP2_OP_RCR = 3,
+
GROUP2_OP_SHL = 4,
+ GROUP2_OP_SHR = 5,
GROUP2_OP_SAR = 7,
GROUP3_OP_TEST = 0,
GROUP5_OP_PUSH = 6,
GROUP11_MOV = 0,
+
+ GROUP14_OP_PSLLQ = 6,
+ GROUP14_OP_PSRLQ = 2,
+
+ ESCAPE_DD_FSTP_doubleReal = 3,
} GroupOpcodeID;
class X86InstructionFormatter;
public:
- class JmpSrc {
- friend class X86Assembler;
- friend class X86InstructionFormatter;
- public:
- JmpSrc()
- : m_offset(-1)
- {
- }
-
- private:
- JmpSrc(int offset)
- : m_offset(offset)
- {
- }
-
- int m_offset;
- };
-
- class JmpDst {
- friend class X86Assembler;
- friend class X86InstructionFormatter;
- public:
- JmpDst()
- : m_offset(-1)
- , m_used(false)
- {
- }
-
- bool isUsed() const { return m_used; }
- void used() { m_used = true; }
- private:
- JmpDst(int offset)
- : m_offset(offset)
- , m_used(false)
- {
- ASSERT(m_offset == offset);
- }
-
- int m_offset : 31;
- bool m_used : 1;
- };
-
X86Assembler()
+ : m_indexOfLastWatchpoint(INT_MIN)
+ , m_indexOfTailOfLastWatchpoint(INT_MIN)
{
}
-
- size_t size() const { return m_formatter.size(); }
+
+ AssemblerBuffer& buffer() { return m_formatter.m_buffer; }
// Stack operations:
// Arithmetic operations:
-#if !PLATFORM(X86_64)
- void adcl_im(int imm, void* addr)
+#if !CPU(X86_64)
+ void adcl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
{
m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
}
+
+#if !CPU(X86_64)
+ void addl_mr(const void* addr, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_GvEv, dst, addr);
+ }
+#endif
void addl_rm(RegisterID src, int offset, RegisterID base)
{
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
m_formatter.immediate8(imm);
} else {
- m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_ADD_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
m_formatter.immediate32(imm);
}
}
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void addq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
}
+ void addq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset);
+ }
+
void addq_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, dst);
m_formatter.immediate8(imm);
} else {
- m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_ADD_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, dst);
m_formatter.immediate32(imm);
}
}
}
}
#else
- void addl_im(int imm, void* addr)
+ void addl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void andq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
}
}
#else
- void andl_im(int imm, void* addr)
+ void andl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
}
#endif
+ void dec_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_OR, dst);
+ }
+
+#if CPU(X86_64)
+ void decq_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_OR, dst);
+ }
+#endif // CPU(X86_64)
+
+ void inc_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
+ }
+
+#if CPU(X86_64)
+ void incq_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
+ }
+#endif // CPU(X86_64)
+
void negl_r(RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
}
+#if CPU(X86_64)
+ void negq_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+ }
+#endif
+
void negl_m(int offset, RegisterID base)
{
m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
m_formatter.immediate8(imm);
} else {
- m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_OR_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
m_formatter.immediate32(imm);
}
}
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void orq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_OR, dst);
m_formatter.immediate8(imm);
} else {
- m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_OR_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_OR, dst);
m_formatter.immediate32(imm);
}
}
#else
- void orl_im(int imm, void* addr)
+ void orl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
m_formatter.immediate32(imm);
}
}
+
+ void orl_rm(RegisterID src, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, addr);
+ }
#endif
void subl_rr(RegisterID src, RegisterID dst)
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
m_formatter.immediate8(imm);
} else {
- m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_SUB_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
m_formatter.immediate32(imm);
}
}
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void subq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_SUB, dst);
m_formatter.immediate8(imm);
} else {
- m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_SUB_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_SUB, dst);
m_formatter.immediate32(imm);
}
}
#else
- void subl_im(int imm, void* addr)
+ void subl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
m_formatter.immediate8(imm);
} else {
- m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_XOR_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
m_formatter.immediate32(imm);
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void xorq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_XOR, dst);
m_formatter.immediate8(imm);
} else {
- m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_XOR_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_XOR, dst);
m_formatter.immediate32(imm);
}
}
+
+ void xorq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_XOR_EvGv, src, base, offset);
+ }
+
+ void rorq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_ROR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_ROR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
#endif
void sarl_i8r(int imm, RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
}
+
+ void shrl_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
+ else {
+ m_formatter.oneByteOp(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
+ void shrl_CLr(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHR, dst);
+ }
void shll_i8r(int imm, RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void sarq_CLr(RegisterID dst)
{
m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
m_formatter.immediate8(imm);
}
}
-#endif
+
+ void shlq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHL, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHL, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+#endif // CPU(X86_64)
void imull_rr(RegisterID src, RegisterID dst)
{
m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
}
+#if CPU(X86_64)
+ void imulq_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.twoByteOp64(OP2_IMUL_GvEv, dst, src);
+ }
+#endif // CPU(X86_64)
+
void imull_mr(int offset, RegisterID base, RegisterID dst)
{
m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
m_formatter.immediate8(imm);
} else {
- m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_CMP_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
m_formatter.immediate32(imm);
}
}
m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
m_formatter.immediate32(imm);
}
-
+
void cmpl_im(int imm, int offset, RegisterID base)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.immediate32(imm);
}
}
+
+ void cmpb_im(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void cmpb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
+
+#if CPU(X86)
+ void cmpb_im(int imm, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP1_EbIb, GROUP1_OP_CMP, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
void cmpl_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.immediate32(imm);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void cmpq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
}
+ void cmpq_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, index, scale, offset);
+ }
+
void cmpq_mr(int offset, RegisterID base, RegisterID src)
{
m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
m_formatter.immediate8(imm);
} else {
- m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_CMP_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
m_formatter.immediate32(imm);
}
}
}
}
#else
- void cmpl_rm(RegisterID reg, void* addr)
+ void cmpl_rm(RegisterID reg, const void* addr)
{
m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
}
- void cmpl_im(int imm, void* addr)
+ void cmpl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
}
#endif
+ void cmpw_ir(int imm, RegisterID dst)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, dst);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, dst);
+ m_formatter.immediate16(imm);
+ }
+ }
+
void cmpw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.prefix(PRE_OPERAND_SIZE);
void testl_i32r(int imm, RegisterID dst)
{
- m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_TEST_EAXIv);
+ else
+ m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
m_formatter.immediate32(imm);
}
m_formatter.immediate32(imm);
}
+ void testb_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp8(OP_TEST_EbGb, src, dst);
+ }
+
+ void testb_im(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void testb_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
+
+#if CPU(X86)
+ void testb_im(int imm, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
void testl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
{
m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, index, scale, offset);
m_formatter.immediate32(imm);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void testq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
}
+ void testq_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_TEST_EvGv, src, base, offset);
+ }
+
void testq_i32r(int imm, RegisterID dst)
{
- m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_TEST_EAXIv);
+ else
+ m_formatter.oneByteOp64(OP_GROUP3_EvIz, GROUP3_OP_TEST, dst);
m_formatter.immediate32(imm);
}
void testb_i8r(int imm, RegisterID dst)
{
- m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
+ if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_TEST_ALIb);
+ else
+ m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
m_formatter.immediate8(imm);
}
m_formatter.oneByteOp(OP_CDQ);
}
+ void fstpl(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_ESCAPE_DD, ESCAPE_DD_FSTP_doubleReal, base, offset);
+ }
+
void xchgl_rr(RegisterID src, RegisterID dst)
{
- m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
+ if (src == X86Registers::eax)
+ m_formatter.oneByteOp(OP_XCHG_EAX, dst);
+ else if (dst == X86Registers::eax)
+ m_formatter.oneByteOp(OP_XCHG_EAX, src);
+ else
+ m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void xchgq_rr(RegisterID src, RegisterID dst)
{
- m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
+ if (src == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_XCHG_EAX, dst);
+ else if (dst == X86Registers::eax)
+ m_formatter.oneByteOp64(OP_XCHG_EAX, src);
+ else
+ m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
}
#endif
m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
}
- void movl_mEAX(void* addr)
+ void movl_mEAX(const void* addr)
{
m_formatter.oneByteOp(OP_MOV_EAXOv);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
#else
m_formatter.immediate32(reinterpret_cast<int>(addr));
{
m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
}
+
+ void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset);
+ }
void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
m_formatter.immediate32(imm);
}
+
+ void movl_i32m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, base, index, scale, offset);
+ m_formatter.immediate32(imm);
+ }
+
+#if !CPU(X86_64)
+ void movb_i8m(int imm, const void* addr)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, addr);
+ m_formatter.immediate8(imm);
+ }
+#endif
+
+ void movb_i8m(int imm, int offset, RegisterID base)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, offset);
+ m_formatter.immediate8(imm);
+ }
+
+ void movb_i8m(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ ASSERT(-128 <= imm && imm < 128);
+ m_formatter.oneByteOp(OP_GROUP11_EvIb, GROUP11_MOV, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ }
- void movl_EAXm(void* addr)
+#if !CPU(X86_64)
+ void movb_rm(RegisterID src, const void* addr)
+ {
+ m_formatter.oneByteOp(OP_MOV_EbGb, src, addr);
+ }
+#endif
+
+ void movb_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, offset);
+ }
+
+ void movb_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.oneByteOp8(OP_MOV_EbGb, src, base, index, scale, offset);
+ }
+
+ void movw_rm(RegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp8(OP_MOV_EvGv, src, base, index, scale, offset);
+ }
+
+ void movl_EAXm(const void* addr)
{
m_formatter.oneByteOp(OP_MOV_OvEAX);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
#else
m_formatter.immediate32(reinterpret_cast<int>(addr));
#endif
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void movq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
}
- void movq_mEAX(void* addr)
+ void movq_mEAX(const void* addr)
{
m_formatter.oneByteOp64(OP_MOV_EAXOv);
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
}
- void movq_EAXm(void* addr)
+ void movq_EAXm(const void* addr)
{
m_formatter.oneByteOp64(OP_MOV_OvEAX);
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
}
+ void movq_mr_disp8(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64_disp8(OP_MOV_GvEv, dst, base, offset);
+ }
+
void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
{
m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
#else
- void movl_rm(RegisterID src, void* addr)
+ void movl_rm(RegisterID src, const void* addr)
{
- if (src == X86::eax)
+ if (src == X86Registers::eax)
movl_EAXm(addr);
else
m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
}
- void movl_mr(void* addr, RegisterID dst)
+ void movl_mr(const void* addr, RegisterID dst)
{
- if (dst == X86::eax)
+ if (dst == X86Registers::eax)
movl_mEAX(addr);
else
m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
}
- void movl_i32m(int imm, void* addr)
+ void movl_i32m(int imm, const void* addr)
{
m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
m_formatter.immediate32(imm);
m_formatter.twoByteOp(OP2_MOVZX_GvEw, dst, base, index, scale, offset);
}
+ void movswl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, offset);
+ }
+
+ void movswl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEw, dst, base, index, scale, offset);
+ }
+
+ void movzbl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, offset);
+ }
+
+ void movzbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void movzbl_mr(const void* address, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVZX_GvEb, dst, address);
+ }
+#endif
+
+ void movsbl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, offset);
+ }
+
+ void movsbl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_MOVSX_GvEb, dst, base, index, scale, offset);
+ }
+
void movzbl_rr(RegisterID src, RegisterID dst)
{
// In 64-bit, this may cause an unnecessary REX to be planted (if the dst register
{
m_formatter.oneByteOp(OP_LEA, dst, base, offset);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void leaq_mr(int offset, RegisterID base, RegisterID dst)
{
m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
// Flow control:
- JmpSrc call()
+ AssemblerLabel call()
{
m_formatter.oneByteOp(OP_CALL_rel32);
return m_formatter.immediateRel32();
}
- JmpSrc call(RegisterID dst)
+ AssemblerLabel call(RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
- return JmpSrc(m_formatter.size());
+ return m_formatter.label();
}
void call_m(int offset, RegisterID base)
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
}
- JmpSrc jmp()
+ AssemblerLabel jmp()
{
m_formatter.oneByteOp(OP_JMP_rel32);
return m_formatter.immediateRel32();
}
- // Return a JmpSrc so we have a label to the jump, so we can use this
+ // Return a AssemblerLabel so we have a label to the jump, so we can use this
// To make a tail recursive call on x86-64. The MacroAssembler
// really shouldn't wrap this as a Jump, since it can't be linked. :-/
- JmpSrc jmp_r(RegisterID dst)
+ AssemblerLabel jmp_r(RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
- return JmpSrc(m_formatter.size());
+ return m_formatter.label();
}
void jmp_m(int offset, RegisterID base)
{
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
}
+
+#if !CPU(X86_64)
+ void jmp_m(const void* address)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, address);
+ }
+#endif
- JmpSrc jne()
+ AssemblerLabel jne()
{
m_formatter.twoByteOp(jccRel32(ConditionNE));
return m_formatter.immediateRel32();
}
- JmpSrc jnz()
+ AssemblerLabel jnz()
{
return jne();
}
- JmpSrc je()
+ AssemblerLabel je()
{
m_formatter.twoByteOp(jccRel32(ConditionE));
return m_formatter.immediateRel32();
}
- JmpSrc jz()
+ AssemblerLabel jz()
{
return je();
}
- JmpSrc jl()
+ AssemblerLabel jl()
{
m_formatter.twoByteOp(jccRel32(ConditionL));
return m_formatter.immediateRel32();
}
- JmpSrc jb()
+ AssemblerLabel jb()
{
m_formatter.twoByteOp(jccRel32(ConditionB));
return m_formatter.immediateRel32();
}
- JmpSrc jle()
+ AssemblerLabel jle()
{
m_formatter.twoByteOp(jccRel32(ConditionLE));
return m_formatter.immediateRel32();
}
- JmpSrc jbe()
+ AssemblerLabel jbe()
{
m_formatter.twoByteOp(jccRel32(ConditionBE));
return m_formatter.immediateRel32();
}
- JmpSrc jge()
+ AssemblerLabel jge()
{
m_formatter.twoByteOp(jccRel32(ConditionGE));
return m_formatter.immediateRel32();
}
- JmpSrc jg()
+ AssemblerLabel jg()
{
m_formatter.twoByteOp(jccRel32(ConditionG));
return m_formatter.immediateRel32();
}
- JmpSrc ja()
+ AssemblerLabel ja()
{
m_formatter.twoByteOp(jccRel32(ConditionA));
return m_formatter.immediateRel32();
}
- JmpSrc jae()
+ AssemblerLabel jae()
{
m_formatter.twoByteOp(jccRel32(ConditionAE));
return m_formatter.immediateRel32();
}
- JmpSrc jo()
+ AssemblerLabel jo()
{
m_formatter.twoByteOp(jccRel32(ConditionO));
return m_formatter.immediateRel32();
}
- JmpSrc jp()
+ AssemblerLabel jnp()
+ {
+ m_formatter.twoByteOp(jccRel32(ConditionNP));
+ return m_formatter.immediateRel32();
+ }
+
+ AssemblerLabel jp()
{
m_formatter.twoByteOp(jccRel32(ConditionP));
return m_formatter.immediateRel32();
}
- JmpSrc js()
+ AssemblerLabel js()
{
m_formatter.twoByteOp(jccRel32(ConditionS));
return m_formatter.immediateRel32();
}
- JmpSrc jCC(Condition cond)
+ AssemblerLabel jCC(Condition cond)
{
m_formatter.twoByteOp(jccRel32(cond));
return m_formatter.immediateRel32();
m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, base, offset);
}
+#if !CPU(X86_64)
+ void addsd_mr(const void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_ADDSD_VsdWsd, (RegisterID)dst, address);
+ }
+#endif
+
void cvtsi2sd_rr(RegisterID src, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
}
+#if CPU(X86_64)
+ void cvtsi2sdq_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp64(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
+ }
+#endif
+
void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
}
-#if !PLATFORM(X86_64)
- void cvtsi2sd_mr(void* address, XMMRegisterID dst)
+#if !CPU(X86_64)
+ void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
m_formatter.twoByteOp(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
}
+ void cvtsd2ss_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSD2SS_VsdWsd, dst, (RegisterID)src);
+ }
+
+ void cvtss2sd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_CVTSS2SD_VsdWsd, dst, (RegisterID)src);
+ }
+
+#if CPU(X86_64)
+ void cvttsd2siq_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp64(OP2_CVTTSD2SI_GdWsd, dst, (RegisterID)src);
+ }
+#endif
+
void movd_rr(XMMRegisterID src, RegisterID dst)
{
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
}
-#if PLATFORM(X86_64)
+ void movd_rr(RegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_MOVD_VdEd, (RegisterID)dst, src);
+ }
+
+#if CPU(X86_64)
void movq_rr(XMMRegisterID src, RegisterID dst)
{
m_formatter.prefix(PRE_SSE_66);
}
#endif
+ void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, offset);
}
-
+
+ void movsd_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
+ }
+
+ void movss_rm(XMMRegisterID src, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, base, index, scale, offset);
+ }
+
void movsd_mr(int offset, RegisterID base, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
}
-#if !PLATFORM(X86_64)
- void movsd_mr(void* address, XMMRegisterID dst)
+ void movsd_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
+ }
+
+ void movss_mr(int offset, RegisterID base, RegisterID index, int scale, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F3);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, dst, base, index, scale, offset);
+ }
+
+#if !CPU(X86_64)
+ void movsd_mr(const void* address, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
}
+ void movsd_rm(XMMRegisterID src, const void* address)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_WsdVsd, (RegisterID)src, address);
+ }
#endif
void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
m_formatter.immediate8(whichWord);
}
+ void psllq_i8r(int imm, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp8(OP2_PSLLQ_UdqIb, GROUP14_OP_PSLLQ, (RegisterID)dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void psrlq_i8r(int imm, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp8(OP2_PSRLQ_UdqIb, GROUP14_OP_PSRLQ, (RegisterID)dst);
+ m_formatter.immediate8(imm);
+ }
+
+ void por_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_POR_VdqWdq, (RegisterID)dst, (RegisterID)src);
+ }
+
void subsd_rr(XMMRegisterID src, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
}
+ void andnpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_ANDNPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void sqrtsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_SQRTSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
// Misc instructions:
void int3()
{
m_formatter.prefix(PRE_PREDICT_BRANCH_NOT_TAKEN);
}
+
+ void mfence()
+ {
+ m_formatter.threeByteOp(OP3_MFENCE);
+ }
// Assembler admin methods:
- JmpDst label()
+ size_t codeSize() const
{
- return JmpDst(m_formatter.size());
+ return m_formatter.codeSize();
}
- static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0)
+ AssemblerLabel labelForWatchpoint()
{
- return JmpDst(jump.m_offset + offset);
+ AssemblerLabel result = m_formatter.label();
+ if (static_cast<int>(result.m_offset) != m_indexOfLastWatchpoint)
+ result = label();
+ m_indexOfLastWatchpoint = result.m_offset;
+ m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize();
+ return result;
}
- JmpDst align(int alignment)
+ AssemblerLabel labelIgnoringWatchpoints()
+ {
+ return m_formatter.label();
+ }
+
+ AssemblerLabel label()
+ {
+ AssemblerLabel result = m_formatter.label();
+ while (UNLIKELY(static_cast<int>(result.m_offset) < m_indexOfTailOfLastWatchpoint)) {
+ nop();
+ result = m_formatter.label();
+ }
+ return result;
+ }
+
+ AssemblerLabel align(int alignment)
{
while (!m_formatter.isAligned(alignment))
m_formatter.oneByteOp(OP_HLT);
// writable region of memory; to modify the code in an execute-only execuable
// pool the 'repatch' and 'relink' methods should be used.
- void linkJump(JmpSrc from, JmpDst to)
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
{
- ASSERT(from.m_offset != -1);
- ASSERT(to.m_offset != -1);
+ ASSERT(from.isSet());
+ ASSERT(to.isSet());
char* code = reinterpret_cast<char*>(m_formatter.data());
+ ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
setRel32(code + from.m_offset, code + to.m_offset);
}
- static void linkJump(void* code, JmpSrc from, void* to)
+ static void linkJump(void* code, AssemblerLabel from, void* to)
{
- ASSERT(from.m_offset != -1);
+ ASSERT(from.isSet());
setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
- static void linkCall(void* code, JmpSrc from, void* to)
+ static void linkCall(void* code, AssemblerLabel from, void* to)
{
- ASSERT(from.m_offset != -1);
+ ASSERT(from.isSet());
setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
- static void linkPointer(void* code, JmpDst where, void* value)
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
{
- ASSERT(where.m_offset != -1);
+ ASSERT(where.isSet());
setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
}
{
setRel32(from, to);
}
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ASSERT(value >= std::numeric_limits<int8_t>::min());
+ ASSERT(value <= std::numeric_limits<int8_t>::max());
+ setInt8(where, value);
+ }
static void repatchInt32(void* where, int32_t value)
{
{
setPointer(where, value);
}
-
- static void repatchLoadPtrToLEA(void* where)
- {
-#if PLATFORM(X86_64)
- // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
- // Skip over the prefix byte.
- where = reinterpret_cast<char*>(where) + 1;
-#endif
- *reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
- }
- static unsigned getCallReturnOffset(JmpSrc call)
+ static void* readPointer(void* where)
{
- ASSERT(call.m_offset >= 0);
- return call.m_offset;
+ return reinterpret_cast<void**>(where)[-1];
}
- static void* getRelocatedAddress(void* code, JmpSrc jump)
+ static void replaceWithJump(void* instructionStart, void* to)
{
- ASSERT(jump.m_offset != -1);
-
- return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ uint8_t* dstPtr = reinterpret_cast<uint8_t*>(to);
+ intptr_t distance = (intptr_t)(dstPtr - (ptr + 5));
+ ptr[0] = static_cast<uint8_t>(OP_JMP_rel32);
+ *reinterpret_cast<int32_t*>(ptr + 1) = static_cast<int32_t>(distance);
}
- static void* getRelocatedAddress(void* code, JmpDst destination)
+ static ptrdiff_t maxJumpReplacementSize()
{
- ASSERT(destination.m_offset != -1);
+ return 5;
+ }
+
+#if CPU(X86_64)
+ static void revertJumpTo_movq_i64r(void* instructionStart, int64_t imm, RegisterID dst)
+ {
+ const unsigned instructionSize = 10; // REX.W MOV IMM64
+ const int rexBytes = 1;
+ const int opcodeBytes = 1;
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = PRE_REX | (1 << 3) | (dst >> 3);
+ ptr[1] = OP_MOV_EAXIv | (dst & 7);
+
+ union {
+ uint64_t asWord;
+ uint8_t asBytes[8];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
+ ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
+ }
+
+ static void revertJumpTo_movl_i32r(void* instructionStart, int32_t imm, RegisterID dst)
+ {
+ // We only revert jumps on inline caches, and inline caches always use the scratch register (r11).
+ // FIXME: If the above is ever false then we need to make this smarter with respect to emitting
+ // the REX byte.
+ ASSERT(dst == X86Registers::r11);
+ const unsigned instructionSize = 6; // REX MOV IMM32
+ const int rexBytes = 1;
+ const int opcodeBytes = 1;
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = PRE_REX | (dst >> 3);
+ ptr[1] = OP_MOV_EAXIv | (dst & 7);
+
+ union {
+ uint32_t asWord;
+ uint8_t asBytes[4];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = rexBytes + opcodeBytes; i < instructionSize; ++i)
+ ptr[i] = u.asBytes[i - rexBytes - opcodeBytes];
+ }
+#endif
- return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
+ static void revertJumpTo_cmpl_ir_force32(void* instructionStart, int32_t imm, RegisterID dst)
+ {
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = OP_GROUP1_EvIz;
+ ptr[1] = (X86InstructionFormatter::ModRmRegister << 6) | (GROUP1_OP_CMP << 3) | dst;
+ union {
+ uint32_t asWord;
+ uint8_t asBytes[4];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
+ ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
+ }
+
+ static void revertJumpTo_cmpl_im_force32(void* instructionStart, int32_t imm, int offset, RegisterID dst)
+ {
+ ASSERT_UNUSED(offset, !offset);
+ const int opcodeBytes = 1;
+ const int modRMBytes = 1;
+ ASSERT(opcodeBytes + modRMBytes <= maxJumpReplacementSize());
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+ ptr[0] = OP_GROUP1_EvIz;
+ ptr[1] = (X86InstructionFormatter::ModRmMemoryNoDisp << 6) | (GROUP1_OP_CMP << 3) | dst;
+ union {
+ uint32_t asWord;
+ uint8_t asBytes[4];
+ } u;
+ u.asWord = imm;
+ for (unsigned i = opcodeBytes + modRMBytes; i < static_cast<unsigned>(maxJumpReplacementSize()); ++i)
+ ptr[i] = u.asBytes[i - opcodeBytes - modRMBytes];
}
- static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
+ static void replaceWithLoad(void* instructionStart)
{
- return dst.m_offset - src.m_offset;
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+#if CPU(X86_64)
+ if ((*ptr & ~15) == PRE_REX)
+ ptr++;
+#endif
+ switch (*ptr) {
+ case OP_MOV_GvEv:
+ break;
+ case OP_LEA:
+ *ptr = OP_MOV_GvEv;
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
}
- static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
+ static void replaceWithAddressComputation(void* instructionStart)
{
- return dst.m_offset - src.m_offset;
+ uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart);
+#if CPU(X86_64)
+ if ((*ptr & ~15) == PRE_REX)
+ ptr++;
+#endif
+ switch (*ptr) {
+ case OP_MOV_GvEv:
+ *ptr = OP_LEA;
+ break;
+ case OP_LEA:
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
}
- static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
+ static unsigned getCallReturnOffset(AssemblerLabel call)
{
- return dst.m_offset - src.m_offset;
+ ASSERT(call.isSet());
+ return call.m_offset;
+ }
+
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
+ {
+ ASSERT(label.isSet());
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
}
- void* executableCopy(ExecutablePool* allocator)
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
{
- void* copy = m_formatter.executableCopy(allocator);
- ASSERT(copy);
- return copy;
+ return b.m_offset - a.m_offset;
}
+
+ unsigned debugOffset() { return m_formatter.debugOffset(); }
+
+ void nop()
+ {
+ m_formatter.oneByteOp(OP_NOP);
+ }
+
+ static void fillNops(void* base, size_t size)
+ {
+#if CPU(X86_64)
+ static const uint8_t nops[10][10] = {
+ // nop
+ {0x90},
+ // xchg %ax,%ax
+ {0x66, 0x90},
+ // nopl (%[re]ax)
+ {0x0f, 0x1f, 0x00},
+ // nopl 8(%[re]ax)
+ {0x0f, 0x1f, 0x40, 0x08},
+ // nopl 8(%[re]ax,%[re]ax,1)
+ {0x0f, 0x1f, 0x44, 0x00, 0x08},
+ // nopw 8(%[re]ax,%[re]ax,1)
+ {0x66, 0x0f, 0x1f, 0x44, 0x00, 0x08},
+ // nopl 512(%[re]ax)
+ {0x0f, 0x1f, 0x80, 0x00, 0x02, 0x00, 0x00},
+ // nopl 512(%[re]ax,%[re]ax,1)
+ {0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
+ // nopw 512(%[re]ax,%[re]ax,1)
+ {0x66, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00},
+ // nopw %cs:512(%[re]ax,%[re]ax,1)
+ {0x66, 0x2e, 0x0f, 0x1f, 0x84, 0x00, 0x00, 0x02, 0x00, 0x00}
+ };
+
+ uint8_t* where = reinterpret_cast<uint8_t*>(base);
+ while (size) {
+ unsigned nopSize = static_cast<unsigned>(std::min<size_t>(size, 15));
+ unsigned numPrefixes = nopSize <= 10 ? 0 : nopSize - 10;
+ for (unsigned i = 0; i != numPrefixes; ++i)
+ *where++ = 0x66;
+
+ unsigned nopRest = nopSize - numPrefixes;
+ for (unsigned i = 0; i != nopRest; ++i)
+ *where++ = nops[nopRest-1][i];
+
+ size -= nopSize;
+ }
+#else
+ memset(base, OP_NOP, size);
+#endif
+ }
+
+ // This is a no-op on x86
+ ALWAYS_INLINE static void cacheFlush(void*, size_t) { }
private:
{
reinterpret_cast<int32_t*>(where)[-1] = value;
}
+
+ static void setInt8(void* where, int8_t value)
+ {
+ reinterpret_cast<int8_t*>(where)[-1] = value;
+ }
static void setRel32(void* from, void* to)
{
public:
+ enum ModRmMode {
+ ModRmMemoryNoDisp,
+ ModRmMemoryDisp8,
+ ModRmMemoryDisp32,
+ ModRmRegister,
+ };
+
// Legacy prefix bytes:
//
// These are emmitted prior to the instruction.
m_buffer.putByteUnchecked(opcode);
memoryModRM_disp32(reg, base, offset);
}
+
+ void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp8(reg, base, offset);
+ }
void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
{
memoryModRM(reg, base, index, scale, offset);
}
-#if !PLATFORM(X86_64)
- void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
+#if !CPU(X86_64)
+ void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
{
m_buffer.ensureSpace(maxInstructionSize);
m_buffer.putByteUnchecked(opcode);
memoryModRM(reg, base, index, scale, offset);
}
-#if !PLATFORM(X86_64)
- void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address)
+#if !CPU(X86_64)
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, const void* address)
{
m_buffer.ensureSpace(maxInstructionSize);
m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
}
#endif
-#if PLATFORM(X86_64)
+ void threeByteOp(ThreeByteOpcodeID opcode)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(OP2_3BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ }
+
+#if CPU(X86_64)
// Quad-word-sized operands:
//
// Used to format 64-bit operantions, planting a REX.w prefix.
m_buffer.putByteUnchecked(opcode);
memoryModRM_disp32(reg, base, offset);
}
+
+ void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp8(reg, base, offset);
+ }
void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
{
registerModRM(groupOp, rm);
}
+ void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID rm)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(rm), reg, 0, rm);
+ m_buffer.putByteUnchecked(opcode);
+ registerModRM(reg, rm);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || byteRegRequiresRex(base), reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, offset);
+ }
+
+ void oneByteOp8(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIf(byteRegRequiresRex(reg) || regRequiresRex(index) || regRequiresRex(base), reg, index, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, base, index, scale, offset);
+ }
+
void twoByteOp8(TwoByteOpcodeID opcode, RegisterID reg, RegisterID rm)
{
m_buffer.ensureSpace(maxInstructionSize);
m_buffer.putInt64Unchecked(imm);
}
- JmpSrc immediateRel32()
+ AssemblerLabel immediateRel32()
{
m_buffer.putIntUnchecked(0);
- return JmpSrc(m_buffer.size());
+ return label();
}
// Administrative methods:
- size_t size() const { return m_buffer.size(); }
+ size_t codeSize() const { return m_buffer.codeSize(); }
+ AssemblerLabel label() const { return m_buffer.label(); }
bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
void* data() const { return m_buffer.data(); }
- void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
+
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
private:
// Internals; ModRm and REX formatters.
- static const RegisterID noBase = X86::ebp;
- static const RegisterID hasSib = X86::esp;
- static const RegisterID noIndex = X86::esp;
-#if PLATFORM(X86_64)
- static const RegisterID noBase2 = X86::r13;
- static const RegisterID hasSib2 = X86::r12;
+ static const RegisterID noBase = X86Registers::ebp;
+ static const RegisterID hasSib = X86Registers::esp;
+ static const RegisterID noIndex = X86Registers::esp;
+#if CPU(X86_64)
+ static const RegisterID noBase2 = X86Registers::r13;
+ static const RegisterID hasSib2 = X86Registers::r12;
// Registers r8 & above require a REX prefixe.
inline bool regRequiresRex(int reg)
{
- return (reg >= X86::r8);
+ return (reg >= X86Registers::r8);
}
// Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
inline bool byteRegRequiresRex(int reg)
{
- return (reg >= X86::esp);
+ return (reg >= X86Registers::esp);
}
// Format a REX prefix byte.
inline void emitRex(bool w, int r, int x, int b)
{
+ ASSERT(r >= 0);
+ ASSERT(x >= 0);
+ ASSERT(b >= 0);
m_buffer.putByteUnchecked(PRE_REX | ((int)w << 3) | ((r>>3)<<2) | ((x>>3)<<1) | (b>>3));
}
inline void emitRexIfNeeded(int, int, int) {}
#endif
- enum ModRmMode {
- ModRmMemoryNoDisp,
- ModRmMemoryDisp8,
- ModRmMemoryDisp32,
- ModRmRegister,
- };
-
void putModRm(ModRmMode mode, int reg, RegisterID rm)
{
m_buffer.putByteUnchecked((mode << 6) | ((reg & 7) << 3) | (rm & 7));
void memoryModRM(int reg, RegisterID base, int offset)
{
// A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if ((base == hasSib) || (base == hasSib2)) {
#else
if (base == hasSib) {
m_buffer.putIntUnchecked(offset);
}
} else {
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if (!offset && (base != noBase) && (base != noBase2))
#else
if (!offset && (base != noBase))
}
}
}
-
+
+ void memoryModRM_disp8(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+ ASSERT(CAN_SIGN_EXTEND_8_32(offset));
+#if CPU(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp8, reg, base);
+ m_buffer.putByteUnchecked(offset);
+ }
+ }
+
void memoryModRM_disp32(int reg, RegisterID base, int offset)
{
// A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if ((base == hasSib) || (base == hasSib2)) {
#else
if (base == hasSib) {
{
ASSERT(index != noIndex);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if (!offset && (base != noBase) && (base != noBase2))
#else
if (!offset && (base != noBase))
}
}
-#if !PLATFORM(X86_64)
- void memoryModRM(int reg, void* address)
+#if !CPU(X86_64)
+ void memoryModRM(int reg, const void* address)
{
// noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
putModRm(ModRmMemoryNoDisp, reg, noBase);
}
#endif
+ public:
AssemblerBuffer m_buffer;
} m_formatter;
+ int m_indexOfLastWatchpoint;
+ int m_indexOfTailOfLastWatchpoint;
};
} // namespace JSC
-#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
+#endif // ENABLE(ASSEMBLER) && CPU(X86)
#endif // X86Assembler_h