#include <wtf/Platform.h>
-#if ENABLE(ASSEMBLER) && (PLATFORM(X86) || PLATFORM(X86_64))
+#if ENABLE(ASSEMBLER) && (CPU(X86) || CPU(X86_64))
#include "AssemblerBuffer.h"
#include <stdint.h>
namespace JSC {
inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
-#if PLATFORM(X86_64)
-inline bool CAN_SIGN_EXTEND_32_64(intptr_t value) { return value == (intptr_t)(int32_t)value; }
-inline bool CAN_SIGN_EXTEND_U32_64(intptr_t value) { return value == (intptr_t)(uint32_t)value; }
-#endif
-namespace X86 {
+namespace X86Registers {
typedef enum {
eax,
ecx,
esi,
edi,
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
r8,
r9,
r10,
class X86Assembler {
public:
- typedef X86::RegisterID RegisterID;
- typedef X86::XMMRegisterID XMMRegisterID;
+ typedef X86Registers::RegisterID RegisterID;
+ typedef X86Registers::XMMRegisterID XMMRegisterID;
+ typedef XMMRegisterID FPRegisterID;
+
+ typedef enum {
+ ConditionO,
+ ConditionNO,
+ ConditionB,
+ ConditionAE,
+ ConditionE,
+ ConditionNE,
+ ConditionBE,
+ ConditionA,
+ ConditionS,
+ ConditionNS,
+ ConditionP,
+ ConditionNP,
+ ConditionL,
+ ConditionGE,
+ ConditionLE,
+ ConditionG,
+
+ ConditionC = ConditionB,
+ ConditionNC = ConditionAE,
+ } Condition;
+private:
typedef enum {
OP_ADD_EvGv = 0x01,
OP_ADD_GvEv = 0x03,
OP_OR_GvEv = 0x0B,
OP_2BYTE_ESCAPE = 0x0F,
OP_AND_EvGv = 0x21,
+ OP_AND_GvEv = 0x23,
OP_SUB_EvGv = 0x29,
OP_SUB_GvEv = 0x2B,
PRE_PREDICT_BRANCH_NOT_TAKEN = 0x2E,
OP_XOR_EvGv = 0x31,
+ OP_XOR_GvEv = 0x33,
OP_CMP_EvGv = 0x39,
OP_CMP_GvEv = 0x3B,
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
PRE_REX = 0x40,
#endif
OP_PUSH_EAX = 0x50,
OP_POP_EAX = 0x58,
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
OP_MOVSXD_GvEv = 0x63,
#endif
PRE_OPERAND_SIZE = 0x66,
OP2_ADDSD_VsdWsd = 0x58,
OP2_MULSD_VsdWsd = 0x59,
OP2_SUBSD_VsdWsd = 0x5C,
+ OP2_DIVSD_VsdWsd = 0x5E,
+ OP2_XORPD_VpdWpd = 0x57,
OP2_MOVD_VdEd = 0x6E,
OP2_MOVD_EdVd = 0x7E,
- OP2_JO_rel32 = 0x80,
- OP2_JB_rel32 = 0x82,
- OP2_JAE_rel32 = 0x83,
- OP2_JE_rel32 = 0x84,
- OP2_JNE_rel32 = 0x85,
- OP2_JBE_rel32 = 0x86,
- OP2_JA_rel32 = 0x87,
- OP2_JS_rel32 = 0x88,
- OP2_JP_rel32 = 0x8A,
- OP2_JL_rel32 = 0x8C,
- OP2_JGE_rel32 = 0x8D,
- OP2_JLE_rel32 = 0x8E,
- OP2_JG_rel32 = 0x8F,
- OP_SETE = 0x94,
- OP_SETNE = 0x95,
+ OP2_JCC_rel32 = 0x80,
+ OP_SETCC = 0x90,
OP2_IMUL_GvEv = 0xAF,
OP2_MOVZX_GvEb = 0xB6,
OP2_MOVZX_GvEw = 0xB7,
OP2_PEXTRW_GdUdIb = 0xC5,
} TwoByteOpcodeID;
+ TwoByteOpcodeID jccRel32(Condition cond)
+ {
+ return (TwoByteOpcodeID)(OP2_JCC_rel32 + cond);
+ }
+
+ TwoByteOpcodeID setccOpcode(Condition cond)
+ {
+ return (TwoByteOpcodeID)(OP_SETCC + cond);
+ }
+
typedef enum {
GROUP1_OP_ADD = 0,
GROUP1_OP_OR = 1,
+ GROUP1_OP_ADC = 2,
GROUP1_OP_AND = 4,
GROUP1_OP_SUB = 5,
GROUP1_OP_XOR = 6,
GROUP3_OP_TEST = 0,
GROUP3_OP_NOT = 2,
+ GROUP3_OP_NEG = 3,
GROUP3_OP_IDIV = 7,
GROUP5_OP_CALLN = 2,
GROUP11_MOV = 0,
} GroupOpcodeID;
- // Opaque label types
-
-private:
class X86InstructionFormatter;
public:
public:
JmpDst()
: m_offset(-1)
+ , m_used(false)
{
}
+ bool isUsed() const { return m_used; }
+ void used() { m_used = true; }
private:
JmpDst(int offset)
: m_offset(offset)
+ , m_used(false)
{
+ ASSERT(m_offset == offset);
}
- int m_offset;
+ int m_offset : 31;
+ bool m_used : 1;
};
X86Assembler()
// Arithmetic operations:
+#if !CPU(X86_64)
+ void adcl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_ADC, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
+#endif
+
void addl_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp(OP_ADD_EvGv, src, dst);
m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
}
+ void addl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_ADD_EvGv, src, base, offset);
+ }
+
void addl_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void addq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
m_formatter.immediate32(imm);
}
}
+
+ void addq_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIb, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp64(OP_GROUP1_EvIz, GROUP1_OP_ADD, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
#else
void addl_im(int imm, void* addr)
{
m_formatter.oneByteOp(OP_AND_EvGv, src, dst);
}
+ void andl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_AND_GvEv, dst, base, offset);
+ }
+
+ void andl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_AND_EvGv, src, base, offset);
+ }
+
void andl_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
}
}
-#if PLATFORM(X86_64)
+ void andl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
void andq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_AND_EvGv, src, dst);
m_formatter.immediate32(imm);
}
}
+#else
+ void andl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_AND, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
#endif
+ void negl_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+ }
+
+ void negl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
+ }
+
void notl_r(RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, dst);
}
+ void notl_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NOT, base, offset);
+ }
+
void orl_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp(OP_OR_EvGv, src, dst);
m_formatter.oneByteOp(OP_OR_GvEv, dst, base, offset);
}
+ void orl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_OR_EvGv, src, base, offset);
+ }
+
void orl_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
}
}
-#if PLATFORM(X86_64)
+ void orl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
+#if CPU(X86_64)
void orq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_OR_EvGv, src, dst);
m_formatter.immediate32(imm);
}
}
+#else
+ void orl_im(int imm, void* addr)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_OR, addr);
+ m_formatter.immediate32(imm);
+ }
+ }
#endif
void subl_rr(RegisterID src, RegisterID dst)
m_formatter.oneByteOp(OP_SUB_GvEv, dst, base, offset);
}
+ void subl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_SUB_EvGv, src, base, offset);
+ }
+
void subl_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void subq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_SUB_EvGv, src, dst);
m_formatter.oneByteOp(OP_XOR_EvGv, src, dst);
}
+ void xorl_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_XOR_GvEv, dst, base, offset);
+ }
+
+ void xorl_rm(RegisterID src, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_XOR_EvGv, src, base, offset);
+ }
+
+ void xorl_im(int imm, int offset, RegisterID base)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_XOR, base, offset);
+ m_formatter.immediate32(imm);
+ }
+ }
+
void xorl_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
}
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void xorq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_XOR_EvGv, src, dst);
m_formatter.oneByteOp(OP_GROUP2_EvCL, GROUP2_OP_SHL, dst);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void sarq_CLr(RegisterID dst)
{
m_formatter.oneByteOp64(OP_GROUP2_EvCL, GROUP2_OP_SAR, dst);
{
m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, src);
}
-
+
+ void imull_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_IMUL_GvEv, dst, base, offset);
+ }
+
void imull_i32r(RegisterID src, int32_t value, RegisterID dst)
{
m_formatter.oneByteOp(OP_IMUL_GvEvIz, dst, src);
m_formatter.immediate32(imm);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void cmpq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_CMP_EvGv, src, dst);
m_formatter.oneByteOp64(OP_CMP_EvGv, src, base, offset);
}
+ void cmpq_mr(int offset, RegisterID base, RegisterID src)
+ {
+ m_formatter.oneByteOp64(OP_CMP_GvEv, src, base, offset);
+ }
+
void cmpq_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_CMP_EvGv, src, base, index, scale, offset);
}
+ void cmpw_im(int imm, int offset, RegisterID base, RegisterID index, int scale)
+ {
+ if (CAN_SIGN_EXTEND_8_32(imm)) {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate8(imm);
+ } else {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_GROUP1_EvIz, GROUP1_OP_CMP, base, index, scale, offset);
+ m_formatter.immediate16(imm);
+ }
+ }
+
void testl_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
m_formatter.immediate32(imm);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void testq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_TEST_EvGv, src, dst);
}
#endif
+ void testw_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_OPERAND_SIZE);
+ m_formatter.oneByteOp(OP_TEST_EvGv, src, dst);
+ }
+
void testb_i8r(int imm, RegisterID dst)
{
m_formatter.oneByteOp8(OP_GROUP3_EbIb, GROUP3_OP_TEST, dst);
m_formatter.immediate8(imm);
}
+ void setCC_r(Condition cond, RegisterID dst)
+ {
+ m_formatter.twoByteOp8(setccOpcode(cond), (GroupOpcodeID)0, dst);
+ }
+
void sete_r(RegisterID dst)
{
- m_formatter.twoByteOp8(OP_SETE, (GroupOpcodeID)0, dst);
+ m_formatter.twoByteOp8(setccOpcode(ConditionE), (GroupOpcodeID)0, dst);
}
void setz_r(RegisterID dst)
void setne_r(RegisterID dst)
{
- m_formatter.twoByteOp8(OP_SETNE, (GroupOpcodeID)0, dst);
+ m_formatter.twoByteOp8(setccOpcode(ConditionNE), (GroupOpcodeID)0, dst);
}
void setnz_r(RegisterID dst)
m_formatter.oneByteOp(OP_XCHG_EvGv, src, dst);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void xchgq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_XCHG_EvGv, src, dst);
void movl_mEAX(void* addr)
{
m_formatter.oneByteOp(OP_MOV_EAXOv);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
#else
m_formatter.immediate32(reinterpret_cast<int>(addr));
void movl_EAXm(void* addr)
{
m_formatter.oneByteOp(OP_MOV_OvEAX);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
#else
m_formatter.immediate32(reinterpret_cast<int>(addr));
#endif
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void movq_rr(RegisterID src, RegisterID dst)
{
m_formatter.oneByteOp64(OP_MOV_EvGv, src, dst);
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
}
+ void movq_EAXm(void* addr)
+ {
+ m_formatter.oneByteOp64(OP_MOV_OvEAX);
+ m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
+ }
+
void movq_mr(int offset, RegisterID base, RegisterID dst)
{
m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, offset);
m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
}
+ void movq_i32m(int imm, int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP11_EvIz, GROUP11_MOV, base, offset);
+ m_formatter.immediate32(imm);
+ }
+
void movq_i64r(int64_t imm, RegisterID dst)
{
m_formatter.oneByteOp64(OP_MOV_EAXIv, dst);
#else
+ void movl_rm(RegisterID src, void* addr)
+ {
+ if (src == X86Registers::eax)
+ movl_EAXm(addr);
+ else
+ m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
+ }
+
void movl_mr(void* addr, RegisterID dst)
{
- if (dst == X86::eax)
+ if (dst == X86Registers::eax)
movl_mEAX(addr);
else
m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
{
m_formatter.oneByteOp(OP_LEA, dst, base, offset);
}
+#if CPU(X86_64)
+ void leaq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_LEA, dst, base, offset);
+ }
+#endif
// Flow control:
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
return JmpSrc(m_formatter.size());
}
+
+ void call_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
+ }
JmpSrc jmp()
{
return m_formatter.immediateRel32();
}
- void jmp_r(RegisterID dst)
+ // Return a JmpSrc so we have a label to the jump, so we can use this
+ // To make a tail recursive call on x86-64. The MacroAssembler
+ // really shouldn't wrap this as a Jump, since it can't be linked. :-/
+ JmpSrc jmp_r(RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
+ return JmpSrc(m_formatter.size());
}
void jmp_m(int offset, RegisterID base)
JmpSrc jne()
{
- m_formatter.twoByteOp(OP2_JNE_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionNE));
return m_formatter.immediateRel32();
}
JmpSrc je()
{
- m_formatter.twoByteOp(OP2_JE_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionE));
return m_formatter.immediateRel32();
}
+ JmpSrc jz()
+ {
+ return je();
+ }
+
JmpSrc jl()
{
- m_formatter.twoByteOp(OP2_JL_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionL));
return m_formatter.immediateRel32();
}
JmpSrc jb()
{
- m_formatter.twoByteOp(OP2_JB_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionB));
return m_formatter.immediateRel32();
}
JmpSrc jle()
{
- m_formatter.twoByteOp(OP2_JLE_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionLE));
return m_formatter.immediateRel32();
}
JmpSrc jbe()
{
- m_formatter.twoByteOp(OP2_JBE_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionBE));
return m_formatter.immediateRel32();
}
JmpSrc jge()
{
- m_formatter.twoByteOp(OP2_JGE_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionGE));
return m_formatter.immediateRel32();
}
JmpSrc jg()
{
- m_formatter.twoByteOp(OP2_JG_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionG));
return m_formatter.immediateRel32();
}
JmpSrc ja()
{
- m_formatter.twoByteOp(OP2_JA_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionA));
return m_formatter.immediateRel32();
}
JmpSrc jae()
{
- m_formatter.twoByteOp(OP2_JAE_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionAE));
return m_formatter.immediateRel32();
}
JmpSrc jo()
{
- m_formatter.twoByteOp(OP2_JO_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionO));
return m_formatter.immediateRel32();
}
JmpSrc jp()
{
- m_formatter.twoByteOp(OP2_JP_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionP));
return m_formatter.immediateRel32();
}
JmpSrc js()
{
- m_formatter.twoByteOp(OP2_JS_rel32);
+ m_formatter.twoByteOp(jccRel32(ConditionS));
+ return m_formatter.immediateRel32();
+ }
+
+ JmpSrc jCC(Condition cond)
+ {
+ m_formatter.twoByteOp(jccRel32(cond));
return m_formatter.immediateRel32();
}
m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, src);
}
+ void cvtsi2sd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, base, offset);
+ }
+
+#if !CPU(X86_64)
+ void cvtsi2sd_mr(void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
+ }
+#endif
+
void cvttsd2si_rr(XMMRegisterID src, RegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_MOVD_EdVd, (RegisterID)src, dst);
}
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
void movq_rr(XMMRegisterID src, RegisterID dst)
{
m_formatter.prefix(PRE_SSE_66);
m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, base, offset);
}
+#if !CPU(X86_64)
+ void movsd_mr(void* address, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, address);
+ }
+#endif
+
void mulsd_rr(XMMRegisterID src, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, (RegisterID)src);
}
+ void ucomisd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_UCOMISD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void divsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
+ void divsd_mr(int offset, RegisterID base, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_DIVSD_VsdWsd, (RegisterID)dst, base, offset);
+ }
+
+ void xorpd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp(OP2_XORPD_VpdWpd, (RegisterID)dst, (RegisterID)src);
+ }
+
// Misc instructions:
void int3()
return JmpDst(m_formatter.size());
}
+ static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0)
+ {
+ return JmpDst(jump.m_offset + offset);
+ }
+
JmpDst align(int alignment)
{
while (!m_formatter.isAligned(alignment))
}
// Linking & patching:
+ //
+ // 'link' and 'patch' methods are for use on unprotected code - such as the code
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // code has been finalized it is (platform support permitting) within a non-
+ // writable region of memory; to modify the code in an execute-only execuable
+ // pool the 'repatch' and 'relink' methods should be used.
- void link(JmpSrc from, JmpDst to)
+ void linkJump(JmpSrc from, JmpDst to)
{
- ASSERT(to.m_offset != -1);
ASSERT(from.m_offset != -1);
-
- reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(m_formatter.data()) + from.m_offset)[-1] = to.m_offset - from.m_offset;
+ ASSERT(to.m_offset != -1);
+
+ char* code = reinterpret_cast<char*>(m_formatter.data());
+ setRel32(code + from.m_offset, code + to.m_offset);
}
- static void patchAddress(void* code, JmpDst position, void* value)
+ static void linkJump(void* code, JmpSrc from, void* to)
{
- ASSERT(position.m_offset != -1);
-
- reinterpret_cast<void**>(reinterpret_cast<ptrdiff_t>(code) + position.m_offset)[-1] = value;
+ ASSERT(from.m_offset != -1);
+
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
-
- static void link(void* code, JmpSrc from, void* to)
+
+ static void linkCall(void* code, JmpSrc from, void* to)
{
ASSERT(from.m_offset != -1);
-
- reinterpret_cast<int*>(reinterpret_cast<ptrdiff_t>(code) + from.m_offset)[-1] = reinterpret_cast<ptrdiff_t>(to) - (reinterpret_cast<ptrdiff_t>(code) + from.m_offset);
+
+ setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
+ }
+
+ static void linkPointer(void* code, JmpDst where, void* value)
+ {
+ ASSERT(where.m_offset != -1);
+
+ setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
+ }
+
+ static void relinkJump(void* from, void* to)
+ {
+ setRel32(from, to);
}
+ static void relinkCall(void* from, void* to)
+ {
+ setRel32(from, to);
+ }
+
+ static void repatchInt32(void* where, int32_t value)
+ {
+ setInt32(where, value);
+ }
+
+ static void repatchPointer(void* where, void* value)
+ {
+ setPointer(where, value);
+ }
+
+ static void repatchLoadPtrToLEA(void* where)
+ {
+#if CPU(X86_64)
+ // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
+ // Skip over the prefix byte.
+ where = reinterpret_cast<char*>(where) + 1;
+#endif
+ *reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
+ }
+
+ static unsigned getCallReturnOffset(JmpSrc call)
+ {
+ ASSERT(call.m_offset >= 0);
+ return call.m_offset;
+ }
+
static void* getRelocatedAddress(void* code, JmpSrc jump)
{
+ ASSERT(jump.m_offset != -1);
+
return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
}
return dst.m_offset - src.m_offset;
}
- static void patchImmediate(intptr_t where, int32_t value)
- {
- reinterpret_cast<int32_t*>(where)[-1] = value;
- }
-
- static void patchPointer(intptr_t where, intptr_t value)
- {
- reinterpret_cast<intptr_t*>(where)[-1] = value;
- }
-
- static void patchBranchOffset(intptr_t where, void* destination)
- {
- intptr_t offset = reinterpret_cast<intptr_t>(destination) - where;
- ASSERT(offset == static_cast<int32_t>(offset));
- reinterpret_cast<int32_t*>(where)[-1] = static_cast<int32_t>(offset);
- }
-
void* executableCopy(ExecutablePool* allocator)
{
void* copy = m_formatter.executableCopy(allocator);
private:
+ static void setPointer(void* where, void* value)
+ {
+ reinterpret_cast<void**>(where)[-1] = value;
+ }
+
+ static void setInt32(void* where, int32_t value)
+ {
+ reinterpret_cast<int32_t*>(where)[-1] = value;
+ }
+
+ static void setRel32(void* from, void* to)
+ {
+ intptr_t offset = reinterpret_cast<intptr_t>(to) - reinterpret_cast<intptr_t>(from);
+ ASSERT(offset == static_cast<int32_t>(offset));
+
+ setInt32(from, offset);
+ }
+
class X86InstructionFormatter {
static const int maxInstructionSize = 16;
memoryModRM(reg, base, index, scale, offset);
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
{
m_buffer.ensureSpace(maxInstructionSize);
memoryModRM(reg, base, index, scale, offset);
}
-#if PLATFORM(X86_64)
+#if !CPU(X86_64)
+ void twoByteOp(TwoByteOpcodeID opcode, int reg, void* address)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ m_buffer.putByteUnchecked(OP_2BYTE_ESCAPE);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM(reg, address);
+ }
+#endif
+
+#if CPU(X86_64)
// Quad-word-sized operands:
//
// Used to format 64-bit operantions, planting a REX.w prefix.
m_buffer.putByteUnchecked(imm);
}
+ void immediate16(int imm)
+ {
+ m_buffer.putShortUnchecked(imm);
+ }
+
void immediate32(int imm)
{
m_buffer.putIntUnchecked(imm);
// Internals; ModRm and REX formatters.
- static const RegisterID noBase = X86::ebp;
- static const RegisterID hasSib = X86::esp;
- static const RegisterID noIndex = X86::esp;
-#if PLATFORM(X86_64)
- static const RegisterID noBase2 = X86::r13;
- static const RegisterID hasSib2 = X86::r12;
+ static const RegisterID noBase = X86Registers::ebp;
+ static const RegisterID hasSib = X86Registers::esp;
+ static const RegisterID noIndex = X86Registers::esp;
+#if CPU(X86_64)
+ static const RegisterID noBase2 = X86Registers::r13;
+ static const RegisterID hasSib2 = X86Registers::r12;
// Registers r8 & above require a REX prefixe.
inline bool regRequiresRex(int reg)
{
- return (reg >= X86::r8);
+ return (reg >= X86Registers::r8);
}
// Byte operand register spl & above require a REX prefix (to prevent the 'H' registers be accessed).
inline bool byteRegRequiresRex(int reg)
{
- return (reg >= X86::esp);
+ return (reg >= X86Registers::esp);
}
// Format a REX prefix byte.
{
ASSERT(mode != ModRmRegister);
- // Encode sacle of (1,2,4,8) -> (0,1,2,3)
- int shift = 0;
- while (scale >>= 1)
- shift++;
-
putModRm(mode, reg, hasSib);
- m_buffer.putByteUnchecked((shift << 6) | ((index & 7) << 3) | (base & 7));
+ m_buffer.putByteUnchecked((scale << 6) | ((index & 7) << 3) | (base & 7));
}
void registerModRM(int reg, RegisterID rm)
void memoryModRM(int reg, RegisterID base, int offset)
{
// A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if ((base == hasSib) || (base == hasSib2)) {
#else
if (base == hasSib) {
m_buffer.putIntUnchecked(offset);
}
} else {
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if (!offset && (base != noBase) && (base != noBase2))
#else
if (!offset && (base != noBase))
void memoryModRM_disp32(int reg, RegisterID base, int offset)
{
// A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if ((base == hasSib) || (base == hasSib2)) {
#else
if (base == hasSib) {
{
ASSERT(index != noIndex);
-#if PLATFORM(X86_64)
+#if CPU(X86_64)
if (!offset && (base != noBase) && (base != noBase2))
#else
if (!offset && (base != noBase))
}
}
-#if !PLATFORM(X86_64)
+#if !CPU(X86_64)
void memoryModRM(int reg, void* address)
{
// noBase + ModRmMemoryNoDisp means noBase + ModRmMemoryDisp32!
} // namespace JSC
-#endif // ENABLE(ASSEMBLER) && PLATFORM(X86)
+#endif // ENABLE(ASSEMBLER) && CPU(X86)
#endif // X86Assembler_h