/*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#include <wtf/Assertions.h>
#include <wtf/Vector.h>
-#if USE(MASM_PROBE)
-#include <xmmintrin.h>
-#endif
-
namespace JSC {
inline bool CAN_SIGN_EXTEND_8_32(int32_t value) { return value == (int32_t)(signed char)value; }
namespace X86Registers {
- typedef enum {
- eax,
- ecx,
- edx,
- ebx,
- esp,
- ebp,
- esi,
- edi,
-#if CPU(X86_64)
- r8,
- r9,
- r10,
- r11,
- r12,
- r13,
- r14,
- r15,
-#endif
- } RegisterID;
+#define FOR_EACH_CPU_REGISTER(V) \
+ FOR_EACH_CPU_GPREGISTER(V) \
+ FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ FOR_EACH_CPU_FPREGISTER(V)
+
+// The following are defined as pairs of the following value:
+// 1. type of the storage needed to save the register value by the JIT probe.
+// 2. name of the register.
+#define FOR_EACH_CPU_GPREGISTER(V) \
+ V(void*, eax) \
+ V(void*, ecx) \
+ V(void*, edx) \
+ V(void*, ebx) \
+ V(void*, esp) \
+ V(void*, ebp) \
+ V(void*, esi) \
+ V(void*, edi) \
+ FOR_EACH_X86_64_CPU_GPREGISTER(V)
+
+#define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
+ V(void*, eip) \
+ V(void*, eflags) \
+
+// Note: the JITs only stores double values in the FP registers.
+#define FOR_EACH_CPU_FPREGISTER(V) \
+ V(double, xmm0) \
+ V(double, xmm1) \
+ V(double, xmm2) \
+ V(double, xmm3) \
+ V(double, xmm4) \
+ V(double, xmm5) \
+ V(double, xmm6) \
+ V(double, xmm7) \
+ FOR_EACH_X86_64_CPU_FPREGISTER(V)
- typedef enum {
- xmm0,
- xmm1,
- xmm2,
- xmm3,
- xmm4,
- xmm5,
- xmm6,
- xmm7,
+#if CPU(X86)
-#if CPU(X86_64)
- xmm8,
- xmm9,
- xmm10,
- xmm11,
- xmm12,
- xmm13,
- xmm14,
- xmm15,
-#endif
- } XMMRegisterID;
-
-#if USE(MASM_PROBE)
- #define FOR_EACH_CPU_REGISTER(V) \
- FOR_EACH_CPU_GPREGISTER(V) \
- FOR_EACH_CPU_SPECIAL_REGISTER(V) \
- FOR_EACH_CPU_FPREGISTER(V)
-
- #define FOR_EACH_CPU_GPREGISTER(V) \
- V(void*, eax) \
- V(void*, ebx) \
- V(void*, ecx) \
- V(void*, edx) \
- V(void*, esi) \
- V(void*, edi) \
- V(void*, ebp) \
- V(void*, esp) \
- FOR_EACH_X86_64_CPU_GPREGISTER(V)
-
- #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \
- V(void*, eip) \
- V(void*, eflags) \
-
- #define FOR_EACH_CPU_FPREGISTER(V) \
- V(__m128, xmm0) \
- V(__m128, xmm1) \
- V(__m128, xmm2) \
- V(__m128, xmm3) \
- V(__m128, xmm4) \
- V(__m128, xmm5) \
- V(__m128, xmm6) \
- V(__m128, xmm7)
+#define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add.
+#define FOR_EACH_X86_64_CPU_FPREGISTER(V) // Nothing to add.
-#if CPU(X86)
- #define FOR_EACH_X86_64_CPU_GPREGISTER(V) // Nothing to add.
#elif CPU(X86_64)
- #define FOR_EACH_X86_64_CPU_GPREGISTER(V) \
- V(void*, r8) \
- V(void*, r9) \
- V(void*, r10) \
- V(void*, r11) \
- V(void*, r12) \
- V(void*, r13) \
- V(void*, r14) \
- V(void*, r15)
+
+#define FOR_EACH_X86_64_CPU_GPREGISTER(V) \
+ V(void*, r8) \
+ V(void*, r9) \
+ V(void*, r10) \
+ V(void*, r11) \
+ V(void*, r12) \
+ V(void*, r13) \
+ V(void*, r14) \
+ V(void*, r15)
+
+#define FOR_EACH_X86_64_CPU_FPREGISTER(V) \
+ V(double, xmm8) \
+ V(double, xmm9) \
+ V(double, xmm10) \
+ V(double, xmm11) \
+ V(double, xmm12) \
+ V(double, xmm13) \
+ V(double, xmm14) \
+ V(double, xmm15)
+
#endif // CPU(X86_64)
-#endif // USE(MASM_PROBE)
-}
+
+typedef enum {
+ #define DECLARE_REGISTER(_type, _regName) _regName,
+ FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+} RegisterID;
+
+typedef enum {
+ #define DECLARE_REGISTER(_type, _regName) _regName,
+ FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+} XMMRegisterID;
+
+} // namespace X86Register
class X86Assembler {
public:
OP2_CVTSS2SD_VsdWsd = 0x5A,
OP2_SUBSD_VsdWsd = 0x5C,
OP2_DIVSD_VsdWsd = 0x5E,
+ OP2_MOVMSKPD_VdEd = 0x50,
OP2_SQRTSD_VsdWsd = 0x51,
OP2_ANDNPD_VpdWpd = 0x55,
OP2_XORPD_VpdWpd = 0x57,
OP2_3BYTE_ESCAPE = 0xAE,
OP2_IMUL_GvEv = 0xAF,
OP2_MOVZX_GvEb = 0xB6,
+ OP2_BSR = 0xBD,
OP2_MOVSX_GvEb = 0xBE,
OP2_MOVZX_GvEw = 0xB7,
OP2_MOVSX_GvEw = 0xBF,
{
m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, dst);
}
+
+ void incq_m(int offset, RegisterID base)
+ {
+ m_formatter.oneByteOp64(OP_GROUP5_Ev, GROUP1_OP_ADD, base, offset);
+ }
#endif // CPU(X86_64)
void negl_r(RegisterID dst)
#endif
+ void bsr_rr(RegisterID src, RegisterID dst)
+ {
+ m_formatter.twoByteOp(OP2_BSR, dst, src);
+ }
+
void sarl_i8r(int imm, RegisterID dst)
{
if (imm == 1)
}
}
+ void shrq_i8r(int imm, RegisterID dst)
+ {
+ if (imm == 1)
+ m_formatter.oneByteOp64(OP_GROUP2_Ev1, GROUP2_OP_SHR, dst);
+ else {
+ m_formatter.oneByteOp64(OP_GROUP2_EvIb, GROUP2_OP_SHR, dst);
+ m_formatter.immediate8(imm);
+ }
+ }
+
void shlq_i8r(int imm, RegisterID dst)
{
if (imm == 1)
}
#if CPU(X86_64)
+ void movmskpd_rr(XMMRegisterID src, RegisterID dst)
+ {
+ m_formatter.prefix(PRE_SSE_66);
+ m_formatter.twoByteOp64(OP2_MOVMSKPD_VdEd, dst, (RegisterID)src);
+ }
+
void movq_rr(XMMRegisterID src, RegisterID dst)
{
m_formatter.prefix(PRE_SSE_66);