OP_GROUP1_EbIb = 0x80,
OP_GROUP1_EvIz = 0x81,
OP_GROUP1_EvIb = 0x83,
+ OP_TEST_EbGb = 0x84,
OP_TEST_EvGv = 0x85,
OP_XCHG_EvGv = 0x87,
OP_MOV_EvGv = 0x89,
OP_MOV_GvEv = 0x8B,
OP_LEA = 0x8D,
OP_GROUP1A_Ev = 0x8F,
+ OP_NOP = 0x90,
OP_CDQ = 0x99,
OP_MOV_EAXOv = 0xA1,
OP_MOV_OvEAX = 0xA3,
class X86InstructionFormatter;
public:
- class JmpSrc {
- friend class X86Assembler;
- friend class X86InstructionFormatter;
- public:
- JmpSrc()
- : m_offset(-1)
- {
- }
-
- private:
- JmpSrc(int offset)
- : m_offset(offset)
- {
- }
-
- int m_offset;
- };
-
- class JmpDst {
- friend class X86Assembler;
- friend class X86InstructionFormatter;
- public:
- JmpDst()
- : m_offset(-1)
- , m_used(false)
- {
- }
-
- bool isUsed() const { return m_used; }
- void used() { m_used = true; }
- private:
- JmpDst(int offset)
- : m_offset(offset)
- , m_used(false)
- {
- ASSERT(m_offset == offset);
- }
-
- int m_offset : 31;
- bool m_used : 1;
- };
-
X86Assembler()
{
}
- size_t size() const { return m_formatter.size(); }
-
// Stack operations:
void push_r(RegisterID reg)
// Arithmetic operations:
#if !CPU(X86_64)
- void adcl_im(int imm, void* addr)
+ void adcl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADC, addr);
}
}
#else
- void addl_im(int imm, void* addr)
+ void addl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_ADD, addr);
}
}
#else
- void andl_im(int imm, void* addr)
+ void andl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_AND, addr);
}
}
#else
- void orl_im(int imm, void* addr)
+ void orl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_OR, addr);
}
}
#else
- void subl_im(int imm, void* addr)
+ void subl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_SUB, addr);
}
}
#else
- void cmpl_rm(RegisterID reg, void* addr)
+ void cmpl_rm(RegisterID reg, const void* addr)
{
m_formatter.oneByteOp(OP_CMP_EvGv, reg, addr);
}
- void cmpl_im(int imm, void* addr)
+ void cmpl_im(int imm, const void* addr)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
m_formatter.oneByteOp(OP_GROUP1_EvIb, GROUP1_OP_CMP, addr);
m_formatter.oneByteOp(OP_GROUP3_EvIz, GROUP3_OP_TEST, base, offset);
m_formatter.immediate32(imm);
}
-
+
+ void testb_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_TEST_EbGb, src, dst);
+ }
+
void testb_im(int imm, int offset, RegisterID base)
{
m_formatter.oneByteOp(OP_GROUP3_EbIb, GROUP3_OP_TEST, base, offset);
m_formatter.oneByteOp(OP_MOV_EvGv, src, base, index, scale, offset);
}
- void movl_mEAX(void* addr)
+ void movl_mEAX(const void* addr)
{
m_formatter.oneByteOp(OP_MOV_EAXOv);
#if CPU(X86_64)
{
m_formatter.oneByteOp_disp32(OP_MOV_GvEv, dst, base, offset);
}
+
+ void movl_mr_disp8(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp_disp8(OP_MOV_GvEv, dst, base, offset);
+ }
void movl_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
{
m_formatter.immediate32(imm);
}
- void movl_EAXm(void* addr)
+ void movl_EAXm(const void* addr)
{
m_formatter.oneByteOp(OP_MOV_OvEAX);
#if CPU(X86_64)
m_formatter.oneByteOp64(OP_MOV_EvGv, src, base, index, scale, offset);
}
- void movq_mEAX(void* addr)
+ void movq_mEAX(const void* addr)
{
m_formatter.oneByteOp64(OP_MOV_EAXOv);
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
}
- void movq_EAXm(void* addr)
+ void movq_EAXm(const void* addr)
{
m_formatter.oneByteOp64(OP_MOV_OvEAX);
m_formatter.immediate64(reinterpret_cast<int64_t>(addr));
m_formatter.oneByteOp64_disp32(OP_MOV_GvEv, dst, base, offset);
}
+ void movq_mr_disp8(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64_disp8(OP_MOV_GvEv, dst, base, offset);
+ }
+
void movq_mr(int offset, RegisterID base, RegisterID index, int scale, RegisterID dst)
{
m_formatter.oneByteOp64(OP_MOV_GvEv, dst, base, index, scale, offset);
#else
- void movl_rm(RegisterID src, void* addr)
+ void movl_rm(RegisterID src, const void* addr)
{
if (src == X86Registers::eax)
movl_EAXm(addr);
m_formatter.oneByteOp(OP_MOV_EvGv, src, addr);
}
- void movl_mr(void* addr, RegisterID dst)
+ void movl_mr(const void* addr, RegisterID dst)
{
if (dst == X86Registers::eax)
movl_mEAX(addr);
m_formatter.oneByteOp(OP_MOV_GvEv, dst, addr);
}
- void movl_i32m(int imm, void* addr)
+ void movl_i32m(int imm, const void* addr)
{
m_formatter.oneByteOp(OP_GROUP11_EvIz, GROUP11_MOV, addr);
m_formatter.immediate32(imm);
// Flow control:
- JmpSrc call()
+ AssemblerLabel call()
{
m_formatter.oneByteOp(OP_CALL_rel32);
return m_formatter.immediateRel32();
}
- JmpSrc call(RegisterID dst)
+ AssemblerLabel call(RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, dst);
- return JmpSrc(m_formatter.size());
+ return m_formatter.label();
}
void call_m(int offset, RegisterID base)
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_CALLN, base, offset);
}
- JmpSrc jmp()
+ AssemblerLabel jmp()
{
m_formatter.oneByteOp(OP_JMP_rel32);
return m_formatter.immediateRel32();
}
- // Return a JmpSrc so we have a label to the jump, so we can use this
+ // Return a AssemblerLabel so we have a label to the jump, so we can use this
// To make a tail recursive call on x86-64. The MacroAssembler
// really shouldn't wrap this as a Jump, since it can't be linked. :-/
- JmpSrc jmp_r(RegisterID dst)
+ AssemblerLabel jmp_r(RegisterID dst)
{
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, dst);
- return JmpSrc(m_formatter.size());
+ return m_formatter.label();
}
void jmp_m(int offset, RegisterID base)
m_formatter.oneByteOp(OP_GROUP5_Ev, GROUP5_OP_JMPN, base, offset);
}
- JmpSrc jne()
+ AssemblerLabel jne()
{
m_formatter.twoByteOp(jccRel32(ConditionNE));
return m_formatter.immediateRel32();
}
- JmpSrc jnz()
+ AssemblerLabel jnz()
{
return jne();
}
- JmpSrc je()
+ AssemblerLabel je()
{
m_formatter.twoByteOp(jccRel32(ConditionE));
return m_formatter.immediateRel32();
}
- JmpSrc jz()
+ AssemblerLabel jz()
{
return je();
}
- JmpSrc jl()
+ AssemblerLabel jl()
{
m_formatter.twoByteOp(jccRel32(ConditionL));
return m_formatter.immediateRel32();
}
- JmpSrc jb()
+ AssemblerLabel jb()
{
m_formatter.twoByteOp(jccRel32(ConditionB));
return m_formatter.immediateRel32();
}
- JmpSrc jle()
+ AssemblerLabel jle()
{
m_formatter.twoByteOp(jccRel32(ConditionLE));
return m_formatter.immediateRel32();
}
- JmpSrc jbe()
+ AssemblerLabel jbe()
{
m_formatter.twoByteOp(jccRel32(ConditionBE));
return m_formatter.immediateRel32();
}
- JmpSrc jge()
+ AssemblerLabel jge()
{
m_formatter.twoByteOp(jccRel32(ConditionGE));
return m_formatter.immediateRel32();
}
- JmpSrc jg()
+ AssemblerLabel jg()
{
m_formatter.twoByteOp(jccRel32(ConditionG));
return m_formatter.immediateRel32();
}
- JmpSrc ja()
+ AssemblerLabel ja()
{
m_formatter.twoByteOp(jccRel32(ConditionA));
return m_formatter.immediateRel32();
}
- JmpSrc jae()
+ AssemblerLabel jae()
{
m_formatter.twoByteOp(jccRel32(ConditionAE));
return m_formatter.immediateRel32();
}
- JmpSrc jo()
+ AssemblerLabel jo()
{
m_formatter.twoByteOp(jccRel32(ConditionO));
return m_formatter.immediateRel32();
}
- JmpSrc jp()
+ AssemblerLabel jp()
{
m_formatter.twoByteOp(jccRel32(ConditionP));
return m_formatter.immediateRel32();
}
- JmpSrc js()
+ AssemblerLabel js()
{
m_formatter.twoByteOp(jccRel32(ConditionS));
return m_formatter.immediateRel32();
}
- JmpSrc jCC(Condition cond)
+ AssemblerLabel jCC(Condition cond)
{
m_formatter.twoByteOp(jccRel32(cond));
return m_formatter.immediateRel32();
}
#if !CPU(X86_64)
- void cvtsi2sd_mr(void* address, XMMRegisterID dst)
+ void cvtsi2sd_mr(const void* address, XMMRegisterID dst)
{
m_formatter.prefix(PRE_SSE_F2);
m_formatter.twoByteOp(OP2_CVTSI2SD_VsdEd, (RegisterID)dst, address);
}
#endif
+ void movsd_rr(XMMRegisterID src, XMMRegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_F2);
+ m_formatter.twoByteOp(OP2_MOVSD_VsdWsd, (RegisterID)dst, (RegisterID)src);
+ }
+
void movsd_rm(XMMRegisterID src, int offset, RegisterID base)
{
m_formatter.prefix(PRE_SSE_F2);
// Assembler admin methods:
- JmpDst label()
+ size_t codeSize() const
{
- return JmpDst(m_formatter.size());
+ return m_formatter.codeSize();
}
-
- static JmpDst labelFor(JmpSrc jump, intptr_t offset = 0)
+
+ AssemblerLabel label()
{
- return JmpDst(jump.m_offset + offset);
+ return m_formatter.label();
}
-
- JmpDst align(int alignment)
+
+ AssemblerLabel align(int alignment)
{
while (!m_formatter.isAligned(alignment))
m_formatter.oneByteOp(OP_HLT);
// writable region of memory; to modify the code in an execute-only execuable
// pool the 'repatch' and 'relink' methods should be used.
- void linkJump(JmpSrc from, JmpDst to)
+ void linkJump(AssemblerLabel from, AssemblerLabel to)
{
- ASSERT(from.m_offset != -1);
- ASSERT(to.m_offset != -1);
+ ASSERT(from.isSet());
+ ASSERT(to.isSet());
char* code = reinterpret_cast<char*>(m_formatter.data());
+ ASSERT(!reinterpret_cast<int32_t*>(code + from.m_offset)[-1]);
setRel32(code + from.m_offset, code + to.m_offset);
}
- static void linkJump(void* code, JmpSrc from, void* to)
+ static void linkJump(void* code, AssemblerLabel from, void* to)
{
- ASSERT(from.m_offset != -1);
+ ASSERT(from.isSet());
setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
- static void linkCall(void* code, JmpSrc from, void* to)
+ static void linkCall(void* code, AssemblerLabel from, void* to)
{
- ASSERT(from.m_offset != -1);
+ ASSERT(from.isSet());
setRel32(reinterpret_cast<char*>(code) + from.m_offset, to);
}
- static void linkPointer(void* code, JmpDst where, void* value)
+ static void linkPointer(void* code, AssemblerLabel where, void* value)
{
- ASSERT(where.m_offset != -1);
+ ASSERT(where.isSet());
setPointer(reinterpret_cast<char*>(code) + where.m_offset, value);
}
{
setRel32(from, to);
}
+
+ static void repatchCompact(void* where, int32_t value)
+ {
+ ASSERT(value >= 0);
+ ASSERT(value <= std::numeric_limits<int8_t>::max());
+ setInt8(where, value);
+ }
static void repatchInt32(void* where, int32_t value)
{
{
setPointer(where, value);
}
-
- static void repatchLoadPtrToLEA(void* where)
- {
-#if CPU(X86_64)
- // On x86-64 pointer memory accesses require a 64-bit operand, and as such a REX prefix.
- // Skip over the prefix byte.
- where = reinterpret_cast<char*>(where) + 1;
-#endif
- *reinterpret_cast<unsigned char*>(where) = static_cast<unsigned char>(OP_LEA);
- }
- static unsigned getCallReturnOffset(JmpSrc call)
+ static void* readPointer(void* where)
{
- ASSERT(call.m_offset >= 0);
- return call.m_offset;
+ return reinterpret_cast<void**>(where)[-1];
}
- static void* getRelocatedAddress(void* code, JmpSrc jump)
+ static unsigned getCallReturnOffset(AssemblerLabel call)
{
- ASSERT(jump.m_offset != -1);
-
- return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + jump.m_offset);
+ ASSERT(call.isSet());
+ return call.m_offset;
}
-
- static void* getRelocatedAddress(void* code, JmpDst destination)
- {
- ASSERT(destination.m_offset != -1);
- return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + destination.m_offset);
- }
-
- static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst)
+ static void* getRelocatedAddress(void* code, AssemblerLabel label)
{
- return dst.m_offset - src.m_offset;
+ ASSERT(label.isSet());
+ return reinterpret_cast<void*>(reinterpret_cast<ptrdiff_t>(code) + label.m_offset);
}
- static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst)
+ static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b)
{
- return dst.m_offset - src.m_offset;
+ return b.m_offset - a.m_offset;
}
- static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst)
+ void* executableCopy(JSGlobalData& globalData, ExecutablePool* allocator)
{
- return dst.m_offset - src.m_offset;
+ return m_formatter.executableCopy(globalData, allocator);
}
-
- void* executableCopy(ExecutablePool* allocator)
+
+ void rewindToLabel(AssemblerLabel rewindTo) { m_formatter.rewindToLabel(rewindTo); }
+
+#ifndef NDEBUG
+ unsigned debugOffset() { return m_formatter.debugOffset(); }
+#endif
+
+ void nop()
{
- void* copy = m_formatter.executableCopy(allocator);
- ASSERT(copy);
- return copy;
+ m_formatter.oneByteOp(OP_NOP);
}
private:
{
reinterpret_cast<int32_t*>(where)[-1] = value;
}
+
+ static void setInt8(void* where, int8_t value)
+ {
+ reinterpret_cast<int8_t*>(where)[-1] = value;
+ }
static void setRel32(void* from, void* to)
{
m_buffer.putByteUnchecked(opcode);
memoryModRM_disp32(reg, base, offset);
}
+
+ void oneByteOp_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexIfNeeded(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp8(reg, base, offset);
+ }
void oneByteOp(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
{
}
#if !CPU(X86_64)
- void oneByteOp(OneByteOpcodeID opcode, int reg, void* address)
+ void oneByteOp(OneByteOpcodeID opcode, int reg, const void* address)
{
m_buffer.ensureSpace(maxInstructionSize);
m_buffer.putByteUnchecked(opcode);
m_buffer.putByteUnchecked(opcode);
memoryModRM_disp32(reg, base, offset);
}
+
+ void oneByteOp64_disp8(OneByteOpcodeID opcode, int reg, RegisterID base, int offset)
+ {
+ m_buffer.ensureSpace(maxInstructionSize);
+ emitRexW(reg, 0, base);
+ m_buffer.putByteUnchecked(opcode);
+ memoryModRM_disp8(reg, base, offset);
+ }
void oneByteOp64(OneByteOpcodeID opcode, int reg, RegisterID base, RegisterID index, int scale, int offset)
{
m_buffer.putInt64Unchecked(imm);
}
- JmpSrc immediateRel32()
+ AssemblerLabel immediateRel32()
{
m_buffer.putIntUnchecked(0);
- return JmpSrc(m_buffer.size());
+ return label();
}
// Administrative methods:
- size_t size() const { return m_buffer.size(); }
+ size_t codeSize() const { return m_buffer.codeSize(); }
+ AssemblerLabel label() const { return m_buffer.label(); }
bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); }
void* data() const { return m_buffer.data(); }
- void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); }
+
+ void* executableCopy(JSGlobalData& globalData, ExecutablePool* allocator)
+ {
+ return m_buffer.executableCopy(globalData, allocator);
+ }
+
+ void rewindToLabel(AssemblerLabel rewindTo) { m_buffer.rewindToLabel(rewindTo); }
+
+#ifndef NDEBUG
+ unsigned debugOffset() { return m_buffer.debugOffset(); }
+#endif
private:
}
}
}
-
+
+ void memoryModRM_disp8(int reg, RegisterID base, int offset)
+ {
+ // A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.
+ ASSERT(CAN_SIGN_EXTEND_8_32(offset));
+#if CPU(X86_64)
+ if ((base == hasSib) || (base == hasSib2)) {
+#else
+ if (base == hasSib) {
+#endif
+ putModRmSib(ModRmMemoryDisp8, reg, base, noIndex, 0);
+ m_buffer.putByteUnchecked(offset);
+ } else {
+ putModRm(ModRmMemoryDisp8, reg, base);
+ m_buffer.putByteUnchecked(offset);
+ }
+ }
+
void memoryModRM_disp32(int reg, RegisterID base, int offset)
{
// A base of esp or r12 would be interpreted as a sib, so force a sib with no index & put the base in there.