X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/b80e619319b1def83d1e8b4f84042b661be1be7f..refs/heads/master:/assembler/ARMv7Assembler.h diff --git a/assembler/ARMv7Assembler.h b/assembler/ARMv7Assembler.h index 13ad3e0..1d731f9 100644 --- a/assembler/ARMv7Assembler.h +++ b/assembler/ARMv7Assembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2010 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2010, 2012, 2013, 2014 Apple Inc. All rights reserved. * Copyright (C) 2010 University of Szeged * * Redistribution and use in source and binary forms, with or without @@ -30,6 +30,7 @@ #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) #include "AssemblerBuffer.h" +#include #include #include #include @@ -37,23 +38,83 @@ namespace JSC { namespace ARMRegisters { + + #define FOR_EACH_CPU_REGISTER(V) \ + FOR_EACH_CPU_GPREGISTER(V) \ + FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + FOR_EACH_CPU_FPREGISTER(V) + + // The following are defined as pairs of the following value: + // 1. type of the storage needed to save the register value by the JIT probe. + // 2. name of the register. + #define FOR_EACH_CPU_GPREGISTER(V) \ + V(void*, r0) \ + V(void*, r1) \ + V(void*, r2) \ + V(void*, r3) \ + V(void*, r4) \ + V(void*, r5) \ + V(void*, r6) \ + V(void*, r7) \ + V(void*, r8) \ + V(void*, r9) \ + V(void*, r10) \ + V(void*, r11) \ + V(void*, ip) \ + V(void*, sp) \ + V(void*, lr) \ + V(void*, pc) + + #define FOR_EACH_CPU_SPECIAL_REGISTER(V) \ + V(void*, apsr) \ + V(void*, fpscr) \ + + #define FOR_EACH_CPU_FPREGISTER(V) \ + V(double, d0) \ + V(double, d1) \ + V(double, d2) \ + V(double, d3) \ + V(double, d4) \ + V(double, d5) \ + V(double, d6) \ + V(double, d7) \ + V(double, d8) \ + V(double, d9) \ + V(double, d10) \ + V(double, d11) \ + V(double, d12) \ + V(double, d13) \ + V(double, d14) \ + V(double, d15) \ + V(double, d16) \ + V(double, d17) \ + V(double, d18) \ + V(double, d19) \ + V(double, d20) \ + V(double, d21) \ + V(double, d22) \ + V(double, d23) \ + V(double, d24) \ + V(double, d25) \ + V(double, d26) \ + V(double, d27) \ + V(double, d28) \ + V(double, d29) \ + V(double, d30) \ + V(double, d31) + typedef enum { - r0, - r1, - r2, - r3, - r4, - r5, - r6, - r7, wr = r7, // thumb work register - r8, - r9, sb = r9, // static base - r10, sl = r10, // stack limit - r11, fp = r11, // frame pointer - r12, ip = r12, - r13, sp = r13, - r14, lr = r14, - r15, pc = r15, + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_GPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER + + fp = r7, // frame pointer + sb = r9, // static base + sl = r10, // stack limit + r12 = ip, + r13 = sp, + r14 = lr, + r15 = pc } RegisterID; typedef enum { @@ -92,38 +153,9 @@ namespace ARMRegisters { } FPSingleRegisterID; typedef enum { - d0, - d1, - d2, - d3, - d4, - d5, - d6, - d7, - d8, - d9, - d10, - d11, - d12, - d13, - d14, - d15, - d16, - d17, - d18, - d19, - d20, - d21, - d22, - d23, - d24, - d25, - d26, - d27, - d28, - d29, - d30, - d31, + #define DECLARE_REGISTER(_type, _regName) _regName, + FOR_EACH_CPU_FPREGISTER(DECLARE_REGISTER) + #undef DECLARE_REGISTER } FPDoubleRegisterID; typedef enum { @@ -172,7 +204,8 @@ namespace ARMRegisters { ASSERT(!(reg & 1)); return (FPDoubleRegisterID)(reg >> 1); } -} + +} // namespace ARMRegisters class ARMv7Assembler; class ARMThumbImmediate { @@ -340,6 +373,8 @@ public: return m_type != TypeInvalid; } + uint16_t asUInt16() const { return m_value.asInt; } + // These methods rely on the format of encoded byte values. bool isUInt3() { return !(m_value.asInt & 0xfff8); } bool isUInt4() { return !(m_value.asInt & 0xfff0); } @@ -357,8 +392,8 @@ public: uint8_t getUInt6() { ASSERT(isUInt6()); return m_value.asInt; } uint8_t getUInt7() { ASSERT(isUInt7()); return m_value.asInt; } uint8_t getUInt8() { ASSERT(isUInt8()); return m_value.asInt; } - uint8_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; } - uint8_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; } + uint16_t getUInt9() { ASSERT(isUInt9()); return m_value.asInt; } + uint16_t getUInt10() { ASSERT(isUInt10()); return m_value.asInt; } uint16_t getUInt12() { ASSERT(isUInt12()); return m_value.asInt; } uint16_t getUInt16() { ASSERT(isUInt16()); return m_value.asInt; } @@ -369,40 +404,6 @@ private: ThumbImmediateValue m_value; }; -class VFPImmediate { -public: - VFPImmediate(double d) - : m_value(-1) - { - union { - uint64_t i; - double d; - } u; - - u.d = d; - - int sign = static_cast(u.i >> 63); - int exponent = static_cast(u.i >> 52) & 0x7ff; - uint64_t mantissa = u.i & 0x000fffffffffffffull; - - if ((exponent >= 0x3fc) && (exponent <= 0x403) && !(mantissa & 0x0000ffffffffffffull)) - m_value = (sign << 7) | ((exponent & 7) << 4) | (int)(mantissa >> 48); - } - - bool isValid() - { - return m_value != -1; - } - - uint8_t value() - { - return (uint8_t)m_value; - } - -private: - int m_value; -}; - typedef enum { SRType_LSL, SRType_LSR, @@ -412,7 +413,6 @@ typedef enum { SRType_RRX = SRType_ROR } ARMShiftType; -class ARMv7Assembler; class ShiftTypeAndAmount { friend class ARMv7Assembler; @@ -447,131 +447,109 @@ private: class ARMv7Assembler { public: - ~ARMv7Assembler() - { - ASSERT(m_jumpsToLink.isEmpty()); - } - typedef ARMRegisters::RegisterID RegisterID; typedef ARMRegisters::FPSingleRegisterID FPSingleRegisterID; typedef ARMRegisters::FPDoubleRegisterID FPDoubleRegisterID; typedef ARMRegisters::FPQuadRegisterID FPQuadRegisterID; + typedef FPDoubleRegisterID FPRegisterID; + + static RegisterID firstRegister() { return ARMRegisters::r0; } + static RegisterID lastRegister() { return ARMRegisters::r13; } + + static FPRegisterID firstFPRegister() { return ARMRegisters::d0; } + static FPRegisterID lastFPRegister() { return ARMRegisters::d31; } // (HS, LO, HI, LS) -> (AE, B, A, BE) // (VS, VC) -> (O, NO) typedef enum { - ConditionEQ, - ConditionNE, - ConditionHS, - ConditionLO, - ConditionMI, - ConditionPL, - ConditionVS, - ConditionVC, - ConditionHI, - ConditionLS, - ConditionGE, - ConditionLT, - ConditionGT, - ConditionLE, - ConditionAL, - - ConditionCS = ConditionHS, - ConditionCC = ConditionLO, + ConditionEQ, // Zero / Equal. + ConditionNE, // Non-zero / Not equal. + ConditionHS, ConditionCS = ConditionHS, // Unsigned higher or same. + ConditionLO, ConditionCC = ConditionLO, // Unsigned lower. + ConditionMI, // Negative. + ConditionPL, // Positive or zero. + ConditionVS, // Overflowed. + ConditionVC, // Not overflowed. + ConditionHI, // Unsigned higher. + ConditionLS, // Unsigned lower or same. + ConditionGE, // Signed greater than or equal. + ConditionLT, // Signed less than. + ConditionGT, // Signed greater than. + ConditionLE, // Signed less than or equal. + ConditionAL, // Unconditional / Always execute. + ConditionInvalid } Condition; - enum JumpType { JumpFixed, JumpNoCondition, JumpCondition, JumpNoConditionFixedSize, JumpConditionFixedSize, JumpTypeCount }; - enum JumpLinkType { LinkInvalid, LinkJumpT1, LinkJumpT2, LinkJumpT3, - LinkJumpT4, LinkConditionalJumpT4, LinkBX, LinkConditionalBX, JumpLinkTypeCount }; - static const int JumpSizes[JumpLinkTypeCount]; - static const int JumpPaddingSizes[JumpTypeCount]; +#define JUMP_ENUM_WITH_SIZE(index, value) (((value) << 3) | (index)) +#define JUMP_ENUM_SIZE(jump) ((jump) >> 3) + enum JumpType { JumpFixed = JUMP_ENUM_WITH_SIZE(0, 0), + JumpNoCondition = JUMP_ENUM_WITH_SIZE(1, 5 * sizeof(uint16_t)), + JumpCondition = JUMP_ENUM_WITH_SIZE(2, 6 * sizeof(uint16_t)), + JumpNoConditionFixedSize = JUMP_ENUM_WITH_SIZE(3, 5 * sizeof(uint16_t)), + JumpConditionFixedSize = JUMP_ENUM_WITH_SIZE(4, 6 * sizeof(uint16_t)) + }; + enum JumpLinkType { + LinkInvalid = JUMP_ENUM_WITH_SIZE(0, 0), + LinkJumpT1 = JUMP_ENUM_WITH_SIZE(1, sizeof(uint16_t)), + LinkJumpT2 = JUMP_ENUM_WITH_SIZE(2, sizeof(uint16_t)), + LinkJumpT3 = JUMP_ENUM_WITH_SIZE(3, 2 * sizeof(uint16_t)), + LinkJumpT4 = JUMP_ENUM_WITH_SIZE(4, 2 * sizeof(uint16_t)), + LinkConditionalJumpT4 = JUMP_ENUM_WITH_SIZE(5, 3 * sizeof(uint16_t)), + LinkBX = JUMP_ENUM_WITH_SIZE(6, 5 * sizeof(uint16_t)), + LinkConditionalBX = JUMP_ENUM_WITH_SIZE(7, 6 * sizeof(uint16_t)) + }; + class LinkRecord { public: LinkRecord(intptr_t from, intptr_t to, JumpType type, Condition condition) - : m_from(from) - , m_to(to) - , m_type(type) - , m_linkType(LinkInvalid) - , m_condition(condition) { + data.realTypes.m_from = from; + data.realTypes.m_to = to; + data.realTypes.m_type = type; + data.realTypes.m_linkType = LinkInvalid; + data.realTypes.m_condition = condition; } - intptr_t from() const { return m_from; } - void setFrom(intptr_t from) { m_from = from; } - intptr_t to() const { return m_to; } - JumpType type() const { return m_type; } - JumpLinkType linkType() const { return m_linkType; } - void setLinkType(JumpLinkType linkType) { ASSERT(m_linkType == LinkInvalid); m_linkType = linkType; } - Condition condition() const { return m_condition; } - private: - intptr_t m_from : 31; - intptr_t m_to : 31; - JumpType m_type : 3; - JumpLinkType m_linkType : 4; - Condition m_condition : 16; - }; - - class JmpSrc { - friend class ARMv7Assembler; - friend class ARMInstructionFormatter; - friend class LinkBuffer; - public: - JmpSrc() - : m_offset(-1) + void operator=(const LinkRecord& other) { + data.copyTypes.content[0] = other.data.copyTypes.content[0]; + data.copyTypes.content[1] = other.data.copyTypes.content[1]; + data.copyTypes.content[2] = other.data.copyTypes.content[2]; } - + intptr_t from() const { return data.realTypes.m_from; } + void setFrom(intptr_t from) { data.realTypes.m_from = from; } + intptr_t to() const { return data.realTypes.m_to; } + JumpType type() const { return data.realTypes.m_type; } + JumpLinkType linkType() const { return data.realTypes.m_linkType; } + void setLinkType(JumpLinkType linkType) { ASSERT(data.realTypes.m_linkType == LinkInvalid); data.realTypes.m_linkType = linkType; } + Condition condition() const { return data.realTypes.m_condition; } private: - JmpSrc(int offset, JumpType type) - : m_offset(offset) - , m_condition(0xffff) - , m_type(type) - { - ASSERT(m_type == JumpFixed || m_type == JumpNoCondition || m_type == JumpNoConditionFixedSize); - } - - JmpSrc(int offset, JumpType type, Condition condition) - : m_offset(offset) - , m_condition(condition) - , m_type(type) - { - ASSERT(m_type == JumpFixed || m_type == JumpCondition || m_type == JumpConditionFixedSize); - } - - int m_offset; - Condition m_condition : 16; - JumpType m_type : 16; - + union { + struct RealTypes { + intptr_t m_from : 31; + intptr_t m_to : 31; + JumpType m_type : 8; + JumpLinkType m_linkType : 8; + Condition m_condition : 16; + } realTypes; + struct CopyTypes { + uint32_t content[3]; + } copyTypes; + COMPILE_ASSERT(sizeof(RealTypes) == sizeof(CopyTypes), LinkRecordCopyStructSizeEqualsRealStruct); + } data; }; - - class JmpDst { - friend class ARMv7Assembler; - friend class ARMInstructionFormatter; - friend class LinkBuffer; - public: - JmpDst() - : m_offset(-1) - , m_used(false) - { - } - bool isUsed() const { return m_used; } - void used() { m_used = true; } - private: - JmpDst(int offset) - : m_offset(offset) - , m_used(false) - { - ASSERT(m_offset == offset); - } + ARMv7Assembler() + : m_indexOfLastWatchpoint(INT_MIN) + , m_indexOfTailOfLastWatchpoint(INT_MIN) + { + } - int m_offset : 31; - int m_used : 1; - }; + AssemblerBuffer& buffer() { return m_formatter.m_buffer; } private: // ARMv7, Appx-A.6.3 - bool BadReg(RegisterID reg) + static bool BadReg(RegisterID reg) { return (reg == ARMRegisters::sp) || (reg == ARMRegisters::pc); } @@ -613,18 +591,26 @@ private: OP_BLX = 0x4700, OP_BX = 0x4700, OP_STR_reg_T1 = 0x5000, + OP_STRH_reg_T1 = 0x5200, + OP_STRB_reg_T1 = 0x5400, + OP_LDRSB_reg_T1 = 0x5600, OP_LDR_reg_T1 = 0x5800, OP_LDRH_reg_T1 = 0x5A00, OP_LDRB_reg_T1 = 0x5C00, + OP_LDRSH_reg_T1 = 0x5E00, OP_STR_imm_T1 = 0x6000, OP_LDR_imm_T1 = 0x6800, + OP_STRB_imm_T1 = 0x7000, OP_LDRB_imm_T1 = 0x7800, + OP_STRH_imm_T1 = 0x8000, OP_LDRH_imm_T1 = 0x8800, OP_STR_imm_T2 = 0x9000, OP_LDR_imm_T2 = 0x9800, OP_ADD_SP_imm_T1 = 0xA800, OP_ADD_SP_imm_T2 = 0xB000, OP_SUB_SP_imm_T1 = 0xB080, + OP_PUSH_T1 = 0xB400, + OP_POP_T1 = 0xBC00, OP_BKPT = 0xBE00, OP_IT = 0xBF00, OP_NOP_T1 = 0xBF00, @@ -633,6 +619,8 @@ private: typedef enum { OP_B_T1 = 0xD000, OP_B_T2 = 0xE000, + OP_POP_T2 = 0xE8BD, + OP_PUSH_T2 = 0xE92D, OP_AND_reg_T2 = 0xEA00, OP_TST_reg_T2 = 0xEA10, OP_ORR_reg_T2 = 0xEA40, @@ -648,18 +636,28 @@ private: OP_SUB_reg_T2 = 0xEBA0, OP_SUB_S_reg_T2 = 0xEBB0, OP_CMP_reg_T2 = 0xEBB0, + OP_VMOV_CtoD = 0xEC00, + OP_VMOV_DtoC = 0xEC10, + OP_FSTS = 0xED00, OP_VSTR = 0xED00, + OP_FLDS = 0xED10, OP_VLDR = 0xED10, - OP_VMOV_StoC = 0xEE00, - OP_VMOV_CtoS = 0xEE10, + OP_VMOV_CtoS = 0xEE00, + OP_VMOV_StoC = 0xEE10, OP_VMUL_T2 = 0xEE20, OP_VADD_T2 = 0xEE30, OP_VSUB_T2 = 0xEE30, OP_VDIV = 0xEE80, + OP_VABS_T2 = 0xEEB0, OP_VCMP = 0xEEB0, OP_VCVT_FPIVFP = 0xEEB0, + OP_VMOV_T2 = 0xEEB0, OP_VMOV_IMM_T2 = 0xEEB0, OP_VMRS = 0xEEB0, + OP_VNEG_T2 = 0xEEB0, + OP_VSQRT_T1 = 0xEEB0, + OP_VCVTSD_T1 = 0xEEB0, + OP_VCVTDS_T1 = 0xEEB0, OP_B_T3a = 0xF000, OP_B_T4a = 0xF000, OP_AND_imm_T1 = 0xF000, @@ -671,48 +669,76 @@ private: OP_ADD_imm_T3 = 0xF100, OP_ADD_S_imm_T3 = 0xF110, OP_CMN_imm = 0xF110, + OP_ADC_imm = 0xF140, OP_SUB_imm_T3 = 0xF1A0, OP_SUB_S_imm_T3 = 0xF1B0, OP_CMP_imm_T2 = 0xF1B0, OP_RSB_imm_T2 = 0xF1C0, + OP_RSB_S_imm_T2 = 0xF1D0, OP_ADD_imm_T4 = 0xF200, OP_MOV_imm_T3 = 0xF240, OP_SUB_imm_T4 = 0xF2A0, OP_MOVT = 0xF2C0, + OP_UBFX_T1 = 0xF3C0, OP_NOP_T2a = 0xF3AF, + OP_DMB_SY_T2a = 0xF3BF, + OP_STRB_imm_T3 = 0xF800, + OP_STRB_reg_T2 = 0xF800, OP_LDRB_imm_T3 = 0xF810, OP_LDRB_reg_T2 = 0xF810, + OP_STRH_imm_T3 = 0xF820, + OP_STRH_reg_T2 = 0xF820, OP_LDRH_reg_T2 = 0xF830, OP_LDRH_imm_T3 = 0xF830, OP_STR_imm_T4 = 0xF840, OP_STR_reg_T2 = 0xF840, OP_LDR_imm_T4 = 0xF850, OP_LDR_reg_T2 = 0xF850, + OP_STRB_imm_T2 = 0xF880, OP_LDRB_imm_T2 = 0xF890, + OP_STRH_imm_T2 = 0xF8A0, OP_LDRH_imm_T2 = 0xF8B0, OP_STR_imm_T3 = 0xF8C0, OP_LDR_imm_T3 = 0xF8D0, + OP_LDRSB_reg_T2 = 0xF910, + OP_LDRSH_reg_T2 = 0xF930, OP_LSL_reg_T2 = 0xFA00, OP_LSR_reg_T2 = 0xFA20, OP_ASR_reg_T2 = 0xFA40, OP_ROR_reg_T2 = 0xFA60, + OP_CLZ = 0xFAB0, OP_SMULL_T1 = 0xFB80, +#if HAVE(ARM_IDIV_INSTRUCTIONS) + OP_SDIV_T1 = 0xFB90, + OP_UDIV_T1 = 0xFBB0, +#endif } OpcodeID1; typedef enum { OP_VADD_T2b = 0x0A00, OP_VDIVb = 0x0A00, + OP_FLDSb = 0x0A00, OP_VLDRb = 0x0A00, OP_VMOV_IMM_T2b = 0x0A00, + OP_VMOV_T2b = 0x0A40, OP_VMUL_T2b = 0x0A00, + OP_FSTSb = 0x0A00, OP_VSTRb = 0x0A00, - OP_VMOV_CtoSb = 0x0A10, OP_VMOV_StoCb = 0x0A10, + OP_VMOV_CtoSb = 0x0A10, + OP_VMOV_DtoCb = 0x0A10, + OP_VMOV_CtoDb = 0x0A10, OP_VMRSb = 0x0A10, + OP_VABS_T2b = 0x0A40, OP_VCMPb = 0x0A40, OP_VCVT_FPIVFPb = 0x0A40, + OP_VNEG_T2b = 0x0A40, OP_VSUB_T2b = 0x0A40, + OP_VSQRT_T1b = 0x0A40, + OP_VCVTSD_T1b = 0x0A40, + OP_VCVTDS_T1b = 0x0A40, OP_NOP_T2b = 0x8000, + OP_DMB_SY_T2b = 0x8F5F, OP_B_T3b = 0x8000, OP_B_T4b = 0x9000, } OpcodeID2; @@ -740,11 +766,11 @@ private: class ARMInstructionFormatter; // false means else! - bool ifThenElseConditionBit(Condition condition, bool isIf) + static bool ifThenElseConditionBit(Condition condition, bool isIf) { return isIf ? (condition & 1) : !(condition & 1); } - uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if) + static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if, bool inst4if) { int mask = (ifThenElseConditionBit(condition, inst2if) << 3) | (ifThenElseConditionBit(condition, inst3if) << 2) @@ -753,7 +779,7 @@ private: ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); return (condition << 4) | mask; } - uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if) + static uint8_t ifThenElse(Condition condition, bool inst2if, bool inst3if) { int mask = (ifThenElseConditionBit(condition, inst2if) << 3) | (ifThenElseConditionBit(condition, inst3if) << 2) @@ -761,7 +787,7 @@ private: ASSERT((condition != ConditionAL) || !(mask & (mask - 1))); return (condition << 4) | mask; } - uint8_t ifThenElse(Condition condition, bool inst2if) + static uint8_t ifThenElse(Condition condition, bool inst2if) { int mask = (ifThenElseConditionBit(condition, inst2if) << 3) | 4; @@ -769,7 +795,7 @@ private: return (condition << 4) | mask; } - uint8_t ifThenElse(Condition condition) + static uint8_t ifThenElse(Condition condition) { int mask = 8; return (condition << 4) | mask; @@ -777,6 +803,17 @@ private: public: + void adc(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) + { + // Rd can only be SP if Rn is also SP. + ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); + ASSERT(imm.isEncodedImm()); + + m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ADC_imm, rn, rd, imm); + } + void add(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) { // Rd can only be SP if Rn is also SP. @@ -785,12 +822,13 @@ public: ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isValid()); - if (rn == ARMRegisters::sp) { + if (rn == ARMRegisters::sp && imm.isUInt16()) { + ASSERT(!(imm.getUInt16() & 3)); if (!(rd & 8) && imm.isUInt10()) { - m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, imm.getUInt10() >> 2); + m_formatter.oneWordOp5Reg3Imm8(OP_ADD_SP_imm_T1, rd, static_cast(imm.getUInt10() >> 2)); return; } else if ((rd == ARMRegisters::sp) && imm.isUInt9()) { - m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, imm.getUInt9() >> 2); + m_formatter.oneWordOp9Imm7(OP_ADD_SP_imm_T2, static_cast(imm.getUInt9() >> 2)); return; } } else if (!((rd | rn) & 8)) { @@ -811,7 +849,7 @@ public: } } - void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) + ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); ASSERT(rd != ARMRegisters::pc); @@ -821,8 +859,13 @@ public: } // NOTE: In an IT block, add doesn't modify the flags register. - void add(RegisterID rd, RegisterID rn, RegisterID rm) + ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm) { + if (rd == ARMRegisters::sp) { + mov(rd, rn); + rn = rd; + } + if (rd == rn) m_formatter.oneWordOp8RegReg143(OP_ADD_reg_T2, rm, rd); else if (rd == rm) @@ -834,7 +877,7 @@ public: } // Not allowed in an IT (if then) block. - void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) + ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) { // Rd can only be SP if Rn is also SP. ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); @@ -856,7 +899,7 @@ public: } // Not allowed in an IT (if then) block? - void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) + ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); ASSERT(rd != ARMRegisters::pc); @@ -866,7 +909,7 @@ public: } // Not allowed in an IT (if then) block. - void add_S(RegisterID rd, RegisterID rn, RegisterID rm) + ALWAYS_INLINE void add_S(RegisterID rd, RegisterID rn, RegisterID rm) { if (!((rd | rn | rm) & 8)) m_formatter.oneWordOp7Reg3Reg3Reg3(OP_ADD_reg_T1, rm, rn, rd); @@ -874,7 +917,7 @@ public: add_S(rd, rn, rm, ShiftTypeAndAmount()); } - void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) + ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); @@ -882,7 +925,7 @@ public: m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_AND_imm_T1, rn, rd, imm); } - void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) + ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); @@ -890,7 +933,7 @@ public: m_formatter.twoWordOp12Reg4FourFours(OP_AND_reg_T2, rn, FourFours(shift.hi4(), rd, shift.lo4(), rm)); } - void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm) + ALWAYS_INLINE void ARM_and(RegisterID rd, RegisterID rn, RegisterID rm) { if ((rd == rn) && !((rd | rm) & 8)) m_formatter.oneWordOp10Reg3Reg3(OP_AND_reg_T1, rm, rd); @@ -900,7 +943,7 @@ public: ARM_and(rd, rn, rm, ShiftTypeAndAmount()); } - void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount) + ALWAYS_INLINE void asr(RegisterID rd, RegisterID rm, int32_t shiftAmount) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rm)); @@ -908,7 +951,7 @@ public: m_formatter.twoWordOp16FourFours(OP_ASR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); } - void asr(RegisterID rd, RegisterID rn, RegisterID rm) + ALWAYS_INLINE void asr(RegisterID rd, RegisterID rn, RegisterID rm) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); @@ -917,39 +960,40 @@ public: } // Only allowed in IT (if then) block if last instruction. - JmpSrc b(JumpType type) + ALWAYS_INLINE AssemblerLabel b() { m_formatter.twoWordOp16Op16(OP_B_T4a, OP_B_T4b); - return JmpSrc(m_formatter.size(), type); + return m_formatter.label(); } // Only allowed in IT (if then) block if last instruction. - JmpSrc blx(RegisterID rm, JumpType type) + ALWAYS_INLINE AssemblerLabel blx(RegisterID rm) { ASSERT(rm != ARMRegisters::pc); m_formatter.oneWordOp8RegReg143(OP_BLX, rm, (RegisterID)8); - return JmpSrc(m_formatter.size(), type); + return m_formatter.label(); } // Only allowed in IT (if then) block if last instruction. - JmpSrc bx(RegisterID rm, JumpType type, Condition condition) + ALWAYS_INLINE AssemblerLabel bx(RegisterID rm) { m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0); - return JmpSrc(m_formatter.size(), type, condition); + return m_formatter.label(); } - JmpSrc bx(RegisterID rm, JumpType type) + void bkpt(uint8_t imm = 0) { - m_formatter.oneWordOp8RegReg143(OP_BX, rm, (RegisterID)0); - return JmpSrc(m_formatter.size(), type); + m_formatter.oneWordOp8Imm8(OP_BKPT, imm); } - void bkpt(uint8_t imm=0) + ALWAYS_INLINE void clz(RegisterID rd, RegisterID rm) { - m_formatter.oneWordOp8Imm8(OP_BKPT, imm); + ASSERT(!BadReg(rd)); + ASSERT(!BadReg(rm)); + m_formatter.twoWordOp12Reg4FourFours(OP_CLZ, rm, FourFours(0xf, rd, 8, rm)); } - void cmn(RegisterID rn, ARMThumbImmediate imm) + ALWAYS_INLINE void cmn(RegisterID rn, ARMThumbImmediate imm) { ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isEncodedImm()); @@ -957,7 +1001,7 @@ public: m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMN_imm, rn, (RegisterID)0xf, imm); } - void cmp(RegisterID rn, ARMThumbImmediate imm) + ALWAYS_INLINE void cmp(RegisterID rn, ARMThumbImmediate imm) { ASSERT(rn != ARMRegisters::pc); ASSERT(imm.isEncodedImm()); @@ -968,14 +1012,14 @@ public: m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_CMP_imm_T2, rn, (RegisterID)0xf, imm); } - void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) + ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { ASSERT(rn != ARMRegisters::pc); ASSERT(!BadReg(rm)); m_formatter.twoWordOp12Reg4FourFours(OP_CMP_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm)); } - void cmp(RegisterID rn, RegisterID rm) + ALWAYS_INLINE void cmp(RegisterID rn, RegisterID rm) { if ((rn | rm) & 8) cmp(rn, rm, ShiftTypeAndAmount()); @@ -984,7 +1028,7 @@ public: } // xor is not spelled with an 'e'. :-( - void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) + ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); @@ -993,7 +1037,7 @@ public: } // xor is not spelled with an 'e'. :-( - void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) + ALWAYS_INLINE void eor(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); @@ -1012,28 +1056,28 @@ public: eor(rd, rn, rm, ShiftTypeAndAmount()); } - void it(Condition cond) + ALWAYS_INLINE void it(Condition cond) { m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond)); } - void it(Condition cond, bool inst2if) + ALWAYS_INLINE void it(Condition cond, bool inst2if) { m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if)); } - void it(Condition cond, bool inst2if, bool inst3if) + ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if) { m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if)); } - void it(Condition cond, bool inst2if, bool inst3if, bool inst4if) + ALWAYS_INLINE void it(Condition cond, bool inst2if, bool inst3if, bool inst4if) { m_formatter.oneWordOp8Imm8(OP_IT, ifThenElse(cond, inst2if, inst3if, inst4if)); } // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. - void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) { ASSERT(rn != ARMRegisters::pc); // LDR (literal) ASSERT(imm.isUInt12()); @@ -1041,10 +1085,24 @@ public: if (!((rt | rn) & 8) && imm.isUInt7()) m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt); else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10()) - m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, imm.getUInt10() >> 2); + m_formatter.oneWordOp5Reg3Imm8(OP_LDR_imm_T2, rt, static_cast(imm.getUInt10() >> 2)); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12()); } + + ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate) + { + ASSERT(rn != ARMRegisters::pc); + m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate); + } + + ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) + { + ASSERT(rn != ARMRegisters::pc); // LDR (literal) + ASSERT(imm.isUInt7()); + ASSERT(!((rt | rn) & 8)); + m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDR_imm_T1, imm.getUInt7() >> 2, rn, rt); + } // If index is set, this is a regular offset or a pre-indexed load; // if index is not set then is is a post-index load. @@ -1057,7 +1115,7 @@ public: // _tmp = _reg + offset // MEM[index ? _tmp : _reg] = REG[rt] // if (wback) REG[rn] = _tmp - void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) { ASSERT(rt != ARMRegisters::pc); ASSERT(rn != ARMRegisters::pc); @@ -1080,7 +1138,7 @@ public: } // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. - void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) + ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0) { ASSERT(rn != ARMRegisters::pc); // LDR (literal) ASSERT(!BadReg(rm)); @@ -1093,13 +1151,14 @@ public: } // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. - void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) { ASSERT(rn != ARMRegisters::pc); // LDR (literal) ASSERT(imm.isUInt12()); + ASSERT(!(imm.getUInt12() & 1)); if (!((rt | rn) & 8) && imm.isUInt6()) - m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 2, rn, rt); + m_formatter.oneWordOp5Imm5Reg3Reg3(OP_LDRH_imm_T1, imm.getUInt6() >> 1, rn, rt); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T2, rn, rt, imm.getUInt12()); } @@ -1115,7 +1174,7 @@ public: // _tmp = _reg + offset // MEM[index ? _tmp : _reg] = REG[rt] // if (wback) REG[rn] = _tmp - void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) { ASSERT(rt != ARMRegisters::pc); ASSERT(rn != ARMRegisters::pc); @@ -1137,7 +1196,7 @@ public: m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRH_imm_T3, rn, rt, offset); } - void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) + ALWAYS_INLINE void ldrh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0) { ASSERT(!BadReg(rt)); // Memory hint ASSERT(rn != ARMRegisters::pc); // LDRH (literal) @@ -1184,7 +1243,7 @@ public: m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDRB_imm_T3, rn, rt, offset); } - void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0) + ALWAYS_INLINE void ldrb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0) { ASSERT(rn != ARMRegisters::pc); // LDR (literal) ASSERT(!BadReg(rm)); @@ -1195,6 +1254,30 @@ public: else m_formatter.twoWordOp12Reg4FourFours(OP_LDRB_reg_T2, rn, FourFours(rt, 0, shift, rm)); } + + void ldrsb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0) + { + ASSERT(rn != ARMRegisters::pc); + ASSERT(!BadReg(rm)); + ASSERT(shift <= 3); + + if (!shift && !((rt | rn | rm) & 8)) + m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSB_reg_T1, rm, rn, rt); + else + m_formatter.twoWordOp12Reg4FourFours(OP_LDRSB_reg_T2, rn, FourFours(rt, 0, shift, rm)); + } + + void ldrsh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0) + { + ASSERT(rn != ARMRegisters::pc); + ASSERT(!BadReg(rm)); + ASSERT(shift <= 3); + + if (!shift && !((rt | rn | rm) & 8)) + m_formatter.oneWordOp7Reg3Reg3Reg3(OP_LDRSH_reg_T1, rm, rn, rt); + else + m_formatter.twoWordOp12Reg4FourFours(OP_LDRSH_reg_T2, rn, FourFours(rt, 0, shift, rm)); + } void lsl(RegisterID rd, RegisterID rm, int32_t shiftAmount) { @@ -1204,7 +1287,7 @@ public: m_formatter.twoWordOp16FourFours(OP_LSL_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); } - void lsl(RegisterID rd, RegisterID rn, RegisterID rm) + ALWAYS_INLINE void lsl(RegisterID rd, RegisterID rn, RegisterID rm) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); @@ -1212,7 +1295,7 @@ public: m_formatter.twoWordOp12Reg4FourFours(OP_LSL_reg_T2, rn, FourFours(0xf, rd, 0, rm)); } - void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount) + ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rm, int32_t shiftAmount) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rm)); @@ -1220,7 +1303,7 @@ public: m_formatter.twoWordOp16FourFours(OP_LSR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); } - void lsr(RegisterID rd, RegisterID rn, RegisterID rm) + ALWAYS_INLINE void lsr(RegisterID rd, RegisterID rn, RegisterID rm) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); @@ -1228,7 +1311,7 @@ public: m_formatter.twoWordOp12Reg4FourFours(OP_LSR_reg_T2, rn, FourFours(0xf, rd, 0, rm)); } - void movT3(RegisterID rd, ARMThumbImmediate imm) + ALWAYS_INLINE void movT3(RegisterID rd, ARMThumbImmediate imm) { ASSERT(imm.isValid()); ASSERT(!imm.isEncodedImm()); @@ -1236,8 +1319,35 @@ public: m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOV_imm_T3, imm.m_value.imm4, rd, imm); } + +#if OS(LINUX) + static void revertJumpTo_movT3movtcmpT2(void* instructionStart, RegisterID left, RegisterID right, uintptr_t imm) + { + uint16_t* address = static_cast(instructionStart); + ARMThumbImmediate lo16 = ARMThumbImmediate::makeUInt16(static_cast(imm)); + ARMThumbImmediate hi16 = ARMThumbImmediate::makeUInt16(static_cast(imm >> 16)); + address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, lo16); + address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, lo16); + address[2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); + address[3] = twoWordOp5i6Imm4Reg4EncodedImmSecond(right, hi16); + address[4] = OP_CMP_reg_T2 | left; + cacheFlush(address, sizeof(uint16_t) * 5); + } +#else + static void revertJumpTo_movT3(void* instructionStart, RegisterID rd, ARMThumbImmediate imm) + { + ASSERT(imm.isValid()); + ASSERT(!imm.isEncodedImm()); + ASSERT(!BadReg(rd)); + + uint16_t* address = static_cast(instructionStart); + address[0] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOV_imm_T3, imm); + address[1] = twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, imm); + cacheFlush(address, sizeof(uint16_t) * 2); + } +#endif - void mov(RegisterID rd, ARMThumbImmediate imm) + ALWAYS_INLINE void mov(RegisterID rd, ARMThumbImmediate imm) { ASSERT(imm.isValid()); ASSERT(!BadReg(rd)); @@ -1250,19 +1360,19 @@ public: movT3(rd, imm); } - void mov(RegisterID rd, RegisterID rm) + ALWAYS_INLINE void mov(RegisterID rd, RegisterID rm) { m_formatter.oneWordOp8RegReg143(OP_MOV_reg_T1, rm, rd); } - void movt(RegisterID rd, ARMThumbImmediate imm) + ALWAYS_INLINE void movt(RegisterID rd, ARMThumbImmediate imm) { ASSERT(imm.isUInt16()); ASSERT(!BadReg(rd)); m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MOVT, imm.m_value.imm4, rd, imm); } - void mvn(RegisterID rd, ARMThumbImmediate imm) + ALWAYS_INLINE void mvn(RegisterID rd, ARMThumbImmediate imm) { ASSERT(imm.isEncodedImm()); ASSERT(!BadReg(rd)); @@ -1270,14 +1380,14 @@ public: m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_MVN_imm, 0xf, rd, imm); } - void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift) + ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm, ShiftTypeAndAmount shift) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rm)); m_formatter.twoWordOp16FourFours(OP_MVN_reg_T2, FourFours(shift.hi4(), rd, shift.lo4(), rm)); } - void mvn(RegisterID rd, RegisterID rm) + ALWAYS_INLINE void mvn(RegisterID rd, RegisterID rm) { if (!((rd | rm) & 8)) m_formatter.oneWordOp10Reg3Reg3(OP_MVN_reg_T1, rm, rd); @@ -1285,13 +1395,13 @@ public: mvn(rd, rm, ShiftTypeAndAmount()); } - void neg(RegisterID rd, RegisterID rm) + ALWAYS_INLINE void neg(RegisterID rd, RegisterID rm) { ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0); sub(rd, zero, rm); } - void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) + ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); @@ -1299,7 +1409,7 @@ public: m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_ORR_imm_T1, rn, rd, imm); } - void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) + ALWAYS_INLINE void orr(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); @@ -1317,7 +1427,7 @@ public: orr(rd, rn, rm, ShiftTypeAndAmount()); } - void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) + ALWAYS_INLINE void orr_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); @@ -1335,7 +1445,7 @@ public: orr_S(rd, rn, rm, ShiftTypeAndAmount()); } - void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount) + ALWAYS_INLINE void ror(RegisterID rd, RegisterID rm, int32_t shiftAmount) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rm)); @@ -1343,7 +1453,7 @@ public: m_formatter.twoWordOp16FourFours(OP_ROR_imm_T1, FourFours(shift.hi4(), rd, shift.lo4(), rm)); } - void ror(RegisterID rd, RegisterID rn, RegisterID rm) + ALWAYS_INLINE void ror(RegisterID rd, RegisterID rn, RegisterID rm) { ASSERT(!BadReg(rd)); ASSERT(!BadReg(rn)); @@ -1351,7 +1461,57 @@ public: m_formatter.twoWordOp12Reg4FourFours(OP_ROR_reg_T2, rn, FourFours(0xf, rd, 0, rm)); } - void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm) + ALWAYS_INLINE void pop(RegisterID dest) + { + if (dest < ARMRegisters::r8) + m_formatter.oneWordOp7Imm9(OP_POP_T1, 1 << dest); + else { + // Load postindexed with writeback. + ldr(dest, ARMRegisters::sp, sizeof(void*), false, true); + } + } + + ALWAYS_INLINE void pop(uint32_t registerList) + { + ASSERT(WTF::bitCount(registerList) > 1); + ASSERT(!((1 << ARMRegisters::pc) & registerList) || !((1 << ARMRegisters::lr) & registerList)); + ASSERT(!((1 << ARMRegisters::sp) & registerList)); + m_formatter.twoWordOp16Imm16(OP_POP_T2, registerList); + } + + ALWAYS_INLINE void push(RegisterID src) + { + if (src < ARMRegisters::r8) + m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 1 << src); + else if (src == ARMRegisters::lr) + m_formatter.oneWordOp7Imm9(OP_PUSH_T1, 0x100); + else { + // Store preindexed with writeback. + str(src, ARMRegisters::sp, -sizeof(void*), true, true); + } + } + + ALWAYS_INLINE void push(uint32_t registerList) + { + ASSERT(WTF::bitCount(registerList) > 1); + ASSERT(!((1 << ARMRegisters::pc) & registerList)); + ASSERT(!((1 << ARMRegisters::sp) & registerList)); + m_formatter.twoWordOp16Imm16(OP_PUSH_T2, registerList); + } + +#if HAVE(ARM_IDIV_INSTRUCTIONS) + template + ALWAYS_INLINE void sdiv(RegisterID rd, RegisterID rn, RegisterID rm) + { + static_assert(datasize == 32, "sdiv datasize must be 32 for armv7s"); + ASSERT(!BadReg(rd)); + ASSERT(!BadReg(rn)); + ASSERT(!BadReg(rm)); + m_formatter.twoWordOp12Reg4FourFours(OP_SDIV_T1, rn, FourFours(0xf, rd, 0xf, rm)); + } +#endif + + ALWAYS_INLINE void smull(RegisterID rdLo, RegisterID rdHi, RegisterID rn, RegisterID rm) { ASSERT(!BadReg(rdLo)); ASSERT(!BadReg(rdHi)); @@ -1362,7 +1522,7 @@ public: } // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. - void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) { ASSERT(rt != ARMRegisters::pc); ASSERT(rn != ARMRegisters::pc); @@ -1371,7 +1531,7 @@ public: if (!((rt | rn) & 8) && imm.isUInt7()) m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STR_imm_T1, imm.getUInt7() >> 2, rn, rt); else if ((rn == ARMRegisters::sp) && !(rt & 8) && imm.isUInt10()) - m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, imm.getUInt10() >> 2); + m_formatter.oneWordOp5Reg3Imm8(OP_STR_imm_T2, rt, static_cast(imm.getUInt10() >> 2)); else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STR_imm_T3, rn, rt, imm.getUInt12()); } @@ -1387,7 +1547,7 @@ public: // _tmp = _reg + offset // MEM[index ? _tmp : _reg] = REG[rt] // if (wback) REG[rn] = _tmp - void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) { ASSERT(rt != ARMRegisters::pc); ASSERT(rn != ARMRegisters::pc); @@ -1410,7 +1570,7 @@ public: } // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. - void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift=0) + ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0) { ASSERT(rn != ARMRegisters::pc); ASSERT(!BadReg(rm)); @@ -1422,7 +1582,125 @@ public: m_formatter.twoWordOp12Reg4FourFours(OP_STR_reg_T2, rn, FourFours(rt, 0, shift, rm)); } - void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) + // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) + { + ASSERT(rt != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); + ASSERT(imm.isUInt12()); + + if (!((rt | rn) & 8) && imm.isUInt7()) + m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRB_imm_T1, imm.getUInt7() >> 2, rn, rt); + else + m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T2, rn, rt, imm.getUInt12()); + } + + // If index is set, this is a regular offset or a pre-indexed store; + // if index is not set then is is a post-index store. + // + // If wback is set rn is updated - this is a pre or post index store, + // if wback is not set this is a regular offset memory access. + // + // (-255 <= offset <= 255) + // _reg = REG[rn] + // _tmp = _reg + offset + // MEM[index ? _tmp : _reg] = REG[rt] + // if (wback) REG[rn] = _tmp + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) + { + ASSERT(rt != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); + ASSERT(index || wback); + ASSERT(!wback | (rt != rn)); + + bool add = true; + if (offset < 0) { + add = false; + offset = -offset; + } + ASSERT((offset & ~0xff) == 0); + + offset |= (wback << 8); + offset |= (add << 9); + offset |= (index << 10); + offset |= (1 << 11); + + m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRB_imm_T3, rn, rt, offset); + } + + // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. + ALWAYS_INLINE void strb(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0) + { + ASSERT(rn != ARMRegisters::pc); + ASSERT(!BadReg(rm)); + ASSERT(shift <= 3); + + if (!shift && !((rt | rn | rm) & 8)) + m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRB_reg_T1, rm, rn, rt); + else + m_formatter.twoWordOp12Reg4FourFours(OP_STRB_reg_T2, rn, FourFours(rt, 0, shift, rm)); + } + + // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) + { + ASSERT(rt != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); + ASSERT(imm.isUInt12()); + + if (!((rt | rn) & 8) && imm.isUInt6()) + m_formatter.oneWordOp5Imm5Reg3Reg3(OP_STRH_imm_T1, imm.getUInt6() >> 1, rn, rt); + else + m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T2, rn, rt, imm.getUInt12()); + } + + // If index is set, this is a regular offset or a pre-indexed store; + // if index is not set then is is a post-index store. + // + // If wback is set rn is updated - this is a pre or post index store, + // if wback is not set this is a regular offset memory access. + // + // (-255 <= offset <= 255) + // _reg = REG[rn] + // _tmp = _reg + offset + // MEM[index ? _tmp : _reg] = REG[rt] + // if (wback) REG[rn] = _tmp + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, int offset, bool index, bool wback) + { + ASSERT(rt != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); + ASSERT(index || wback); + ASSERT(!wback | (rt != rn)); + + bool add = true; + if (offset < 0) { + add = false; + offset = -offset; + } + ASSERT(!(offset & ~0xff)); + + offset |= (wback << 8); + offset |= (add << 9); + offset |= (index << 10); + offset |= (1 << 11); + + m_formatter.twoWordOp12Reg4Reg4Imm12(OP_STRH_imm_T3, rn, rt, offset); + } + + // rt == ARMRegisters::pc only allowed if last instruction in IT (if then) block. + ALWAYS_INLINE void strh(RegisterID rt, RegisterID rn, RegisterID rm, unsigned shift = 0) + { + ASSERT(rn != ARMRegisters::pc); + ASSERT(!BadReg(rm)); + ASSERT(shift <= 3); + + if (!shift && !((rt | rn | rm) & 8)) + m_formatter.oneWordOp7Reg3Reg3Reg3(OP_STRH_reg_T1, rm, rn, rt); + else + m_formatter.twoWordOp12Reg4FourFours(OP_STRH_reg_T2, rn, FourFours(rt, 0, shift, rm)); + } + + ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, ARMThumbImmediate imm) { // Rd can only be SP if Rn is also SP. ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); @@ -1431,7 +1709,8 @@ public: ASSERT(imm.isValid()); if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) { - m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2); + ASSERT(!(imm.getUInt16() & 3)); + m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast(imm.getUInt9() >> 2)); return; } else if (!((rd | rn) & 8)) { if (imm.isUInt3()) { @@ -1451,7 +1730,7 @@ public: } } - void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn) + ALWAYS_INLINE void sub(RegisterID rd, ARMThumbImmediate imm, RegisterID rn) { ASSERT(rd != ARMRegisters::pc); ASSERT(rn != ARMRegisters::pc); @@ -1464,7 +1743,7 @@ public: m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_imm_T2, rn, rd, imm); } - void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) + ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); ASSERT(rd != ARMRegisters::pc); @@ -1474,7 +1753,7 @@ public: } // NOTE: In an IT block, add doesn't modify the flags register. - void sub(RegisterID rd, RegisterID rn, RegisterID rm) + ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm) { if (!((rd | rn | rm) & 8)) m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd); @@ -1492,7 +1771,8 @@ public: ASSERT(imm.isValid()); if ((rn == ARMRegisters::sp) && (rd == ARMRegisters::sp) && imm.isUInt9()) { - m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, imm.getUInt9() >> 2); + ASSERT(!(imm.getUInt16() & 3)); + m_formatter.oneWordOp9Imm7(OP_SUB_SP_imm_T1, static_cast(imm.getUInt9() >> 2)); return; } else if (!((rd | rn) & 8)) { if (imm.isUInt3()) { @@ -1507,8 +1787,18 @@ public: m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_SUB_S_imm_T3, rn, rd, imm); } + ALWAYS_INLINE void sub_S(RegisterID rd, ARMThumbImmediate imm, RegisterID rn) + { + ASSERT(rd != ARMRegisters::pc); + ASSERT(rn != ARMRegisters::pc); + ASSERT(imm.isValid()); + ASSERT(imm.isUInt12()); + + m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_RSB_S_imm_T2, rn, rd, imm); + } + // Not allowed in an IT (if then) block? - void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) + ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { ASSERT((rd != ARMRegisters::sp) || (rn == ARMRegisters::sp)); ASSERT(rd != ARMRegisters::pc); @@ -1518,7 +1808,7 @@ public: } // Not allowed in an IT (if then) block. - void sub_S(RegisterID rd, RegisterID rn, RegisterID rm) + ALWAYS_INLINE void sub_S(RegisterID rd, RegisterID rn, RegisterID rm) { if (!((rd | rn | rm) & 8)) m_formatter.oneWordOp7Reg3Reg3Reg3(OP_SUB_reg_T1, rm, rn, rd); @@ -1526,7 +1816,7 @@ public: sub_S(rd, rn, rm, ShiftTypeAndAmount()); } - void tst(RegisterID rn, ARMThumbImmediate imm) + ALWAYS_INLINE void tst(RegisterID rn, ARMThumbImmediate imm) { ASSERT(!BadReg(rn)); ASSERT(imm.isEncodedImm()); @@ -1534,14 +1824,14 @@ public: m_formatter.twoWordOp5i6Imm4Reg4EncodedImm(OP_TST_imm, rn, (RegisterID)0xf, imm); } - void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) + ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm, ShiftTypeAndAmount shift) { ASSERT(!BadReg(rn)); ASSERT(!BadReg(rm)); m_formatter.twoWordOp12Reg4FourFours(OP_TST_reg_T2, rn, FourFours(shift.hi4(), 0xf, shift.lo4(), rm)); } - void tst(RegisterID rn, RegisterID rm) + ALWAYS_INLINE void tst(RegisterID rn, RegisterID rm) { if ((rn | rm) & 8) tst(rn, rm, ShiftTypeAndAmount()); @@ -1549,34 +1839,58 @@ public: m_formatter.oneWordOp10Reg3Reg3(OP_TST_reg_T1, rm, rn); } - void vadd_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) + ALWAYS_INLINE void ubfx(RegisterID rd, RegisterID rn, unsigned lsb, unsigned width) + { + ASSERT(lsb < 32); + ASSERT((width >= 1) && (width <= 32)); + ASSERT((lsb + width) <= 32); + m_formatter.twoWordOp12Reg40Imm3Reg4Imm20Imm5(OP_UBFX_T1, rd, rn, (lsb & 0x1c) << 10, (lsb & 0x3) << 6, (width - 1) & 0x1f); + } + +#if HAVE(ARM_IDIV_INSTRUCTIONS) + ALWAYS_INLINE void udiv(RegisterID rd, RegisterID rn, RegisterID rm) + { + ASSERT(!BadReg(rd)); + ASSERT(!BadReg(rn)); + ASSERT(!BadReg(rm)); + m_formatter.twoWordOp12Reg4FourFours(OP_UDIV_T1, rn, FourFours(0xf, rd, 0xf, rm)); + } +#endif + + void vadd(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) { m_formatter.vfpOp(OP_VADD_T2, OP_VADD_T2b, true, rn, rd, rm); } - void vcmp_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rm) + void vcmp(FPDoubleRegisterID rd, FPDoubleRegisterID rm) { m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(4), rd, rm); } - void vcmpz_F64(FPDoubleRegisterID rd) + void vcmpz(FPDoubleRegisterID rd) { m_formatter.vfpOp(OP_VCMP, OP_VCMPb, true, VFPOperand(5), rd, VFPOperand(0)); } - void vcvt_F64_S32(FPDoubleRegisterID rd, FPSingleRegisterID rm) + void vcvt_signedToFloatingPoint(FPDoubleRegisterID rd, FPSingleRegisterID rm) { // boolean values are 64bit (toInt, unsigned, roundZero) m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(false, false, false), rd, rm); } - void vcvtr_S32_F64(FPSingleRegisterID rd, FPDoubleRegisterID rm) + void vcvt_floatingPointToSigned(FPSingleRegisterID rd, FPDoubleRegisterID rm) { // boolean values are 64bit (toInt, unsigned, roundZero) m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, false, true), rd, rm); } + + void vcvt_floatingPointToUnsigned(FPSingleRegisterID rd, FPDoubleRegisterID rm) + { + // boolean values are 64bit (toInt, unsigned, roundZero) + m_formatter.vfpOp(OP_VCVT_FPIVFP, OP_VCVT_FPIVFPb, true, vcvtOp(true, true, true), rd, rm); + } - void vdiv_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) + void vdiv(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) { m_formatter.vfpOp(OP_VDIV, OP_VDIVb, true, rn, rd, rm); } @@ -1585,17 +1899,41 @@ public: { m_formatter.vfpMemOp(OP_VLDR, OP_VLDRb, true, rn, rd, imm); } + + void flds(FPSingleRegisterID rd, RegisterID rn, int32_t imm) + { + m_formatter.vfpMemOp(OP_FLDS, OP_FLDSb, false, rn, rd, imm); + } void vmov(RegisterID rd, FPSingleRegisterID rn) { ASSERT(!BadReg(rd)); - m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rn, rd, VFPOperand(0)); + m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rn, rd, VFPOperand(0)); } void vmov(FPSingleRegisterID rd, RegisterID rn) { ASSERT(!BadReg(rn)); - m_formatter.vfpOp(OP_VMOV_StoC, OP_VMOV_StoCb, false, rd, rn, VFPOperand(0)); + m_formatter.vfpOp(OP_VMOV_CtoS, OP_VMOV_CtoSb, false, rd, rn, VFPOperand(0)); + } + + void vmov(RegisterID rd1, RegisterID rd2, FPDoubleRegisterID rn) + { + ASSERT(!BadReg(rd1)); + ASSERT(!BadReg(rd2)); + m_formatter.vfpOp(OP_VMOV_DtoC, OP_VMOV_DtoCb, true, rd2, VFPOperand(rd1 | 16), rn); + } + + void vmov(FPDoubleRegisterID rd, RegisterID rn1, RegisterID rn2) + { + ASSERT(!BadReg(rn1)); + ASSERT(!BadReg(rn2)); + m_formatter.vfpOp(OP_VMOV_CtoD, OP_VMOV_CtoDb, true, rn2, VFPOperand(rn1 | 16), rd); + } + + void vmov(FPDoubleRegisterID rd, FPDoubleRegisterID rn) + { + m_formatter.vfpOp(OP_VMOV_T2, OP_VMOV_T2b, true, VFPOperand(0), rd, rn); } void vmrs(RegisterID reg = ARMRegisters::pc) @@ -1604,7 +1942,7 @@ public: m_formatter.vfpOp(OP_VMRS, OP_VMRSb, false, VFPOperand(1), VFPOperand(0x10 | reg), VFPOperand(0)); } - void vmul_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) + void vmul(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) { m_formatter.vfpOp(OP_VMUL_T2, OP_VMUL_T2b, true, rn, rd, rm); } @@ -1614,75 +1952,113 @@ public: m_formatter.vfpMemOp(OP_VSTR, OP_VSTRb, true, rn, rd, imm); } - void vsub_F64(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) + void fsts(FPSingleRegisterID rd, RegisterID rn, int32_t imm) + { + m_formatter.vfpMemOp(OP_FSTS, OP_FSTSb, false, rn, rd, imm); + } + + void vsub(FPDoubleRegisterID rd, FPDoubleRegisterID rn, FPDoubleRegisterID rm) { m_formatter.vfpOp(OP_VSUB_T2, OP_VSUB_T2b, true, rn, rd, rm); } - JmpDst label() + void vabs(FPDoubleRegisterID rd, FPDoubleRegisterID rm) { - return JmpDst(m_formatter.size()); + m_formatter.vfpOp(OP_VABS_T2, OP_VABS_T2b, true, VFPOperand(16), rd, rm); } - - JmpDst align(int alignment) + + void vneg(FPDoubleRegisterID rd, FPDoubleRegisterID rm) { - while (!m_formatter.isAligned(alignment)) - bkpt(); + m_formatter.vfpOp(OP_VNEG_T2, OP_VNEG_T2b, true, VFPOperand(1), rd, rm); + } - return label(); + void vsqrt(FPDoubleRegisterID rd, FPDoubleRegisterID rm) + { + m_formatter.vfpOp(OP_VSQRT_T1, OP_VSQRT_T1b, true, VFPOperand(17), rd, rm); } - static void* getRelocatedAddress(void* code, JmpSrc jump) + void vcvtds(FPDoubleRegisterID rd, FPSingleRegisterID rm) { - ASSERT(jump.m_offset != -1); + m_formatter.vfpOp(OP_VCVTDS_T1, OP_VCVTDS_T1b, false, VFPOperand(23), rd, rm); + } - return reinterpret_cast(reinterpret_cast(code) + jump.m_offset); + void vcvtsd(FPSingleRegisterID rd, FPDoubleRegisterID rm) + { + m_formatter.vfpOp(OP_VCVTSD_T1, OP_VCVTSD_T1b, true, VFPOperand(23), rd, rm); } - - static void* getRelocatedAddress(void* code, JmpDst destination) + + void nop() { - ASSERT(destination.m_offset != -1); + m_formatter.oneWordOp8Imm8(OP_NOP_T1, 0); + } - return reinterpret_cast(reinterpret_cast(code) + destination.m_offset); + void nopw() + { + m_formatter.twoWordOp16Op16(OP_NOP_T2a, OP_NOP_T2b); } - static int getDifferenceBetweenLabels(JmpDst src, JmpDst dst) + void dmbSY() { - return dst.m_offset - src.m_offset; + m_formatter.twoWordOp16Op16(OP_DMB_SY_T2a, OP_DMB_SY_T2b); } - - static int getDifferenceBetweenLabels(JmpDst src, JmpSrc dst) + + AssemblerLabel labelIgnoringWatchpoints() { - return dst.m_offset - src.m_offset; + return m_formatter.label(); } - - static int getDifferenceBetweenLabels(JmpSrc src, JmpDst dst) + + AssemblerLabel labelForWatchpoint() { - return dst.m_offset - src.m_offset; + AssemblerLabel result = m_formatter.label(); + if (static_cast(result.m_offset) != m_indexOfLastWatchpoint) + result = label(); + m_indexOfLastWatchpoint = result.m_offset; + m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize(); + return result; } - int executableOffsetFor(int location) + AssemblerLabel label() { - if (!location) - return 0; - return static_cast(m_formatter.data())[location / sizeof(int32_t) - 1]; + AssemblerLabel result = m_formatter.label(); + while (UNLIKELY(static_cast(result.m_offset) < m_indexOfTailOfLastWatchpoint)) { + if (UNLIKELY(static_cast(result.m_offset) + 4 <= m_indexOfTailOfLastWatchpoint)) + nopw(); + else + nop(); + result = m_formatter.label(); + } + return result; } - int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JumpPaddingSizes[jumpType] - JumpSizes[jumpLinkType]; } - - // Assembler admin methods: + AssemblerLabel align(int alignment) + { + while (!m_formatter.isAligned(alignment)) + bkpt(); - size_t size() const + return label(); + } + + static void* getRelocatedAddress(void* code, AssemblerLabel label) + { + ASSERT(label.isSet()); + return reinterpret_cast(reinterpret_cast(code) + label.m_offset); + } + + static int getDifferenceBetweenLabels(AssemblerLabel a, AssemblerLabel b) { - return m_formatter.size(); + return b.m_offset - a.m_offset; } - static bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b) + static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); } + + // Assembler admin methods: + + static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b) { return a.from() < b.from(); } - bool canCompact(JumpType jumpType) + static bool canCompact(JumpType jumpType) { // The following cannot be compacted: // JumpFixed: represents custom jump sequence @@ -1691,7 +2067,7 @@ public: return (jumpType == JumpNoCondition) || (jumpType == JumpCondition); } - JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) + static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { if (jumpType == JumpFixed) return LinkInvalid; @@ -1702,38 +2078,31 @@ public: if (jumpType == JumpConditionFixedSize) return LinkConditionalBX; - const int paddingSize = JumpPaddingSizes[jumpType]; - bool mayTriggerErrata = false; + const int paddingSize = JUMP_ENUM_SIZE(jumpType); if (jumpType == JumpCondition) { // 2-byte conditional T1 - const uint16_t* jumpT1Location = reinterpret_cast(from - (paddingSize - JumpSizes[LinkJumpT1])); + const uint16_t* jumpT1Location = reinterpret_cast_ptr(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT1))); if (canBeJumpT1(jumpT1Location, to)) return LinkJumpT1; // 4-byte conditional T3 - const uint16_t* jumpT3Location = reinterpret_cast(from - (paddingSize - JumpSizes[LinkJumpT3])); - if (canBeJumpT3(jumpT3Location, to, mayTriggerErrata)) { - if (!mayTriggerErrata) - return LinkJumpT3; - } + const uint16_t* jumpT3Location = reinterpret_cast_ptr(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT3))); + if (canBeJumpT3(jumpT3Location, to)) + return LinkJumpT3; // 4-byte conditional T4 with IT const uint16_t* conditionalJumpT4Location = - reinterpret_cast(from - (paddingSize - JumpSizes[LinkConditionalJumpT4])); - if (canBeJumpT4(conditionalJumpT4Location, to, mayTriggerErrata)) { - if (!mayTriggerErrata) - return LinkConditionalJumpT4; - } + reinterpret_cast_ptr(from - (paddingSize - JUMP_ENUM_SIZE(LinkConditionalJumpT4))); + if (canBeJumpT4(conditionalJumpT4Location, to)) + return LinkConditionalJumpT4; } else { // 2-byte unconditional T2 - const uint16_t* jumpT2Location = reinterpret_cast(from - (paddingSize - JumpSizes[LinkJumpT2])); + const uint16_t* jumpT2Location = reinterpret_cast_ptr(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT2))); if (canBeJumpT2(jumpT2Location, to)) return LinkJumpT2; // 4-byte unconditional T4 - const uint16_t* jumpT4Location = reinterpret_cast(from - (paddingSize - JumpSizes[LinkJumpT4])); - if (canBeJumpT4(jumpT4Location, to, mayTriggerErrata)) { - if (!mayTriggerErrata) - return LinkJumpT4; - } + const uint16_t* jumpT4Location = reinterpret_cast_ptr(from - (paddingSize - JUMP_ENUM_SIZE(LinkJumpT4))); + if (canBeJumpT4(jumpT4Location, to)) + return LinkJumpT4; // use long jump sequence return LinkBX; } @@ -1742,63 +2111,55 @@ public: return LinkConditionalBX; } - JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) + static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { JumpLinkType linkType = computeJumpType(record.type(), from, to); record.setLinkType(linkType); return linkType; } - void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) - { - int32_t ptr = regionStart / sizeof(int32_t); - const int32_t end = regionEnd / sizeof(int32_t); - int32_t* offsets = static_cast(m_formatter.data()); - while (ptr < end) - offsets[ptr++] = offset; - } - - Vector& jumpsToLink() + Vector& jumpsToLink() { std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator); return m_jumpsToLink; } - void link(LinkRecord& record, uint8_t* from, uint8_t* to) + static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to) { switch (record.linkType()) { case LinkJumpT1: - linkJumpT1(record.condition(), reinterpret_cast(from), to); + linkJumpT1(record.condition(), reinterpret_cast_ptr(from), to); break; case LinkJumpT2: - linkJumpT2(reinterpret_cast(from), to); + linkJumpT2(reinterpret_cast_ptr(from), to); break; case LinkJumpT3: - linkJumpT3(record.condition(), reinterpret_cast(from), to); + linkJumpT3(record.condition(), reinterpret_cast_ptr(from), to); break; case LinkJumpT4: - linkJumpT4(reinterpret_cast(from), to); + linkJumpT4(reinterpret_cast_ptr(from), to); break; case LinkConditionalJumpT4: - linkConditionalJumpT4(record.condition(), reinterpret_cast(from), to); + linkConditionalJumpT4(record.condition(), reinterpret_cast_ptr(from), to); break; case LinkConditionalBX: - linkConditionalBX(record.condition(), reinterpret_cast(from), to); + linkConditionalBX(record.condition(), reinterpret_cast_ptr(from), to); break; case LinkBX: - linkBX(reinterpret_cast(from), to); + linkBX(reinterpret_cast_ptr(from), to); break; default: - ASSERT_NOT_REACHED(); + RELEASE_ASSERT_NOT_REACHED(); break; } } void* unlinkedCode() { return m_formatter.data(); } - - static unsigned getCallReturnOffset(JmpSrc call) + size_t codeSize() const { return m_formatter.codeSize(); } + + static unsigned getCallReturnOffset(AssemblerLabel call) { - ASSERT(call.m_offset >= 0); + ASSERT(call.isSet()); return call.m_offset; } @@ -1810,35 +2171,32 @@ public: // writable region of memory; to modify the code in an execute-only execuable // pool the 'repatch' and 'relink' methods should be used. - void linkJump(JmpSrc from, JmpDst to) + void linkJump(AssemblerLabel from, AssemblerLabel to, JumpType type, Condition condition) { - ASSERT(to.m_offset != -1); - ASSERT(from.m_offset != -1); - m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, from.m_type, from.m_condition)); + ASSERT(to.isSet()); + ASSERT(from.isSet()); + m_jumpsToLink.append(LinkRecord(from.m_offset, to.m_offset, type, condition)); } - static void linkJump(void* code, JmpSrc from, void* to) + static void linkJump(void* code, AssemblerLabel from, void* to) { - ASSERT(from.m_offset != -1); + ASSERT(from.isSet()); uint16_t* location = reinterpret_cast(reinterpret_cast(code) + from.m_offset); linkJumpAbsolute(location, to); } - // bah, this mathod should really be static, since it is used by the LinkBuffer. - // return a bool saying whether the link was successful? - static void linkCall(void* code, JmpSrc from, void* to) + static void linkCall(void* code, AssemblerLabel from, void* to) { ASSERT(!(reinterpret_cast(code) & 1)); - ASSERT(from.m_offset != -1); - ASSERT(reinterpret_cast(to) & 1); + ASSERT(from.isSet()); - setPointer(reinterpret_cast(reinterpret_cast(code) + from.m_offset) - 1, to); + setPointer(reinterpret_cast(reinterpret_cast(code) + from.m_offset) - 1, to, false); } - static void linkPointer(void* code, JmpDst where, void* value) + static void linkPointer(void* code, AssemblerLabel where, void* value) { - setPointer(reinterpret_cast(code) + where.m_offset, value); + setPointer(reinterpret_cast(code) + where.m_offset, value, false); } static void relinkJump(void* from, void* to) @@ -1848,45 +2206,175 @@ public: linkJumpAbsolute(reinterpret_cast(from), to); - ExecutableAllocator::cacheFlush(reinterpret_cast(from) - 5, 5 * sizeof(uint16_t)); + cacheFlush(reinterpret_cast(from) - 5, 5 * sizeof(uint16_t)); } static void relinkCall(void* from, void* to) { ASSERT(!(reinterpret_cast(from) & 1)); - ASSERT(reinterpret_cast(to) & 1); - setPointer(reinterpret_cast(from) - 1, to); + setPointer(reinterpret_cast(from) - 1, to, true); + } + + static void* readCallTarget(void* from) + { + return readPointer(reinterpret_cast(from) - 1); } static void repatchInt32(void* where, int32_t value) { ASSERT(!(reinterpret_cast(where) & 1)); - setInt32(where, value); + setInt32(where, value, true); + } + + static void repatchCompact(void* where, int32_t offset) + { + ASSERT(offset >= -255 && offset <= 255); + + bool add = true; + if (offset < 0) { + add = false; + offset = -offset; + } + + offset |= (add << 9); + offset |= (1 << 10); + offset |= (1 << 11); + + uint16_t* location = reinterpret_cast(where); + location[1] &= ~((1 << 12) - 1); + location[1] |= offset; + cacheFlush(location, sizeof(uint16_t) * 2); } static void repatchPointer(void* where, void* value) { ASSERT(!(reinterpret_cast(where) & 1)); - setPointer(where, value); + setPointer(where, value, true); } - static void repatchLoadPtrToLEA(void* where) + static void* readPointer(void* where) { - ASSERT(!(reinterpret_cast(where) & 1)); - uint16_t* loadOp = reinterpret_cast(where) + 4; + return reinterpret_cast(readInt32(where)); + } + + static void replaceWithJump(void* instructionStart, void* to) + { + ASSERT(!(bitwise_cast(instructionStart) & 1)); + ASSERT(!(bitwise_cast(to) & 1)); - ASSERT((loadOp[0] & 0xfff0) == OP_LDR_reg_T2); - ASSERT((loadOp[1] & 0x0ff0) == 0); - int rn = loadOp[0] & 0xf; - int rt = loadOp[1] >> 12; - int rm = loadOp[1] & 0xf; +#if OS(LINUX) + if (canBeJumpT4(reinterpret_cast(instructionStart), to)) { + uint16_t* ptr = reinterpret_cast(instructionStart) + 2; + linkJumpT4(ptr, to); + cacheFlush(ptr - 2, sizeof(uint16_t) * 2); + } else { + uint16_t* ptr = reinterpret_cast(instructionStart) + 5; + linkBX(ptr, to); + cacheFlush(ptr - 5, sizeof(uint16_t) * 5); + } +#else + uint16_t* ptr = reinterpret_cast(instructionStart) + 2; + linkJumpT4(ptr, to); + cacheFlush(ptr - 2, sizeof(uint16_t) * 2); +#endif + } + + static ptrdiff_t maxJumpReplacementSize() + { +#if OS(LINUX) + return 10; +#else + return 4; +#endif + } + + static void replaceWithLoad(void* instructionStart) + { + ASSERT(!(bitwise_cast(instructionStart) & 1)); + uint16_t* ptr = reinterpret_cast(instructionStart); + switch (ptr[0] & 0xFFF0) { + case OP_LDR_imm_T3: + break; + case OP_ADD_imm_T3: + ASSERT(!(ptr[1] & 0xF000)); + ptr[0] &= 0x000F; + ptr[0] |= OP_LDR_imm_T3; + ptr[1] |= (ptr[1] & 0x0F00) << 4; + ptr[1] &= 0xF0FF; + cacheFlush(ptr, sizeof(uint16_t) * 2); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } - loadOp[0] = OP_ADD_reg_T3 | rn; - loadOp[1] = rt << 8 | rm; - ExecutableAllocator::cacheFlush(loadOp, sizeof(uint32_t)); + static void replaceWithAddressComputation(void* instructionStart) + { + ASSERT(!(bitwise_cast(instructionStart) & 1)); + uint16_t* ptr = reinterpret_cast(instructionStart); + switch (ptr[0] & 0xFFF0) { + case OP_LDR_imm_T3: + ASSERT(!(ptr[1] & 0x0F00)); + ptr[0] &= 0x000F; + ptr[0] |= OP_ADD_imm_T3; + ptr[1] |= (ptr[1] & 0xF000) >> 4; + ptr[1] &= 0x0FFF; + cacheFlush(ptr, sizeof(uint16_t) * 2); + break; + case OP_ADD_imm_T3: + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } + + unsigned debugOffset() { return m_formatter.debugOffset(); } + +#if OS(LINUX) + static inline void linuxPageFlush(uintptr_t begin, uintptr_t end) + { + asm volatile( + "push {r7}\n" + "mov r0, %0\n" + "mov r1, %1\n" + "movw r7, #0x2\n" + "movt r7, #0xf\n" + "movs r2, #0x0\n" + "svc 0x0\n" + "pop {r7}\n" + : + : "r" (begin), "r" (end) + : "r0", "r1", "r2"); + } +#endif + + static void cacheFlush(void* code, size_t size) + { +#if OS(IOS) + sys_cache_control(kCacheFunctionPrepareForExecution, code, size); +#elif OS(LINUX) + size_t page = pageSize(); + uintptr_t current = reinterpret_cast(code); + uintptr_t end = current + size; + uintptr_t firstPageEnd = (current & ~(page - 1)) + page; + + if (end <= firstPageEnd) { + linuxPageFlush(current, end); + return; + } + + linuxPageFlush(current, firstPageEnd); + + for (current = firstPageEnd; current + page < end; current += page) + linuxPageFlush(current, current + page); + + linuxPageFlush(current, end); +#else +#error "The cacheFlush support is missing on this platform." +#endif } private: @@ -1948,6 +2436,7 @@ private: if (isRoundZero) op |= 0x10; } else { + ASSERT(!isRoundZero); // 'op' field in instruction is isUnsigned if (!isUnsigned) op |= 0x10; @@ -1955,7 +2444,7 @@ private: return VFPOperand(op); } - static void setInt32(void* code, uint32_t value) + static void setInt32(void* code, uint32_t value, bool flush) { uint16_t* location = reinterpret_cast(code); ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2)); @@ -1967,12 +2456,41 @@ private: location[-2] = twoWordOp5i6Imm4Reg4EncodedImmFirst(OP_MOVT, hi16); location[-1] = twoWordOp5i6Imm4Reg4EncodedImmSecond((location[-1] >> 8) & 0xf, hi16); - ExecutableAllocator::cacheFlush(location - 4, 4 * sizeof(uint16_t)); + if (flush) + cacheFlush(location - 4, 4 * sizeof(uint16_t)); + } + + static int32_t readInt32(void* code) + { + uint16_t* location = reinterpret_cast(code); + ASSERT(isMOV_imm_T3(location - 4) && isMOVT(location - 2)); + + ARMThumbImmediate lo16; + ARMThumbImmediate hi16; + decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(lo16, location[-4]); + decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(lo16, location[-3]); + decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(hi16, location[-2]); + decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(hi16, location[-1]); + uint32_t result = hi16.asUInt16(); + result <<= 16; + result |= lo16.asUInt16(); + return static_cast(result); + } + + static void setUInt7ForLoad(void* code, ARMThumbImmediate imm) + { + // Requires us to have planted a LDR_imm_T1 + ASSERT(imm.isValid()); + ASSERT(imm.isUInt7()); + uint16_t* location = reinterpret_cast(code); + location[0] &= ~((static_cast(0x7f) >> 2) << 6); + location[0] |= (imm.getUInt7() >> 2) << 6; + cacheFlush(location, sizeof(uint16_t)); } - static void setPointer(void* code, void* value) + static void setPointer(void* code, void* value, bool flush) { - setInt32(code, reinterpret_cast(value)); + setInt32(code, reinterpret_cast(value), flush); } static bool isB(void* address) @@ -2037,49 +2555,25 @@ private: return ((relative << 20) >> 20) == relative; } - static bool canBeJumpT3(const uint16_t* instruction, const void* target, bool& mayTriggerErrata) + static bool canBeJumpT3(const uint16_t* instruction, const void* target) { ASSERT(!(reinterpret_cast(instruction) & 1)); ASSERT(!(reinterpret_cast(target) & 1)); intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); - // From Cortex-A8 errata: - // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and - // the target of the branch falls within the first region it is - // possible for the processor to incorrectly determine the branch - // instruction, and it is also possible in some cases for the processor - // to enter a deadlock state. - // The instruction is spanning two pages if it ends at an address ending 0x002 - bool spansTwo4K = ((reinterpret_cast(instruction) & 0xfff) == 0x002); - mayTriggerErrata = spansTwo4K; - // The target is in the first page if the jump branch back by [3..0x1002] bytes - bool targetInFirstPage = (relative >= -0x1002) && (relative < -2); - bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage; - return ((relative << 11) >> 11) == relative && !wouldTriggerA8Errata; + return ((relative << 11) >> 11) == relative; } - static bool canBeJumpT4(const uint16_t* instruction, const void* target, bool& mayTriggerErrata) + static bool canBeJumpT4(const uint16_t* instruction, const void* target) { ASSERT(!(reinterpret_cast(instruction) & 1)); ASSERT(!(reinterpret_cast(target) & 1)); intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); - // From Cortex-A8 errata: - // If the 32-bit Thumb-2 branch instruction spans two 4KiB regions and - // the target of the branch falls within the first region it is - // possible for the processor to incorrectly determine the branch - // instruction, and it is also possible in some cases for the processor - // to enter a deadlock state. - // The instruction is spanning two pages if it ends at an address ending 0x002 - bool spansTwo4K = ((reinterpret_cast(instruction) & 0xfff) == 0x002); - mayTriggerErrata = spansTwo4K; - // The target is in the first page if the jump branch back by [3..0x1002] bytes - bool targetInFirstPage = (relative >= -0x1002) && (relative < -2); - bool wouldTriggerA8Errata = spansTwo4K && targetInFirstPage; - return ((relative << 7) >> 7) == relative && !wouldTriggerA8Errata; + return ((relative << 7) >> 7) == relative; } - void linkJumpT1(Condition cond, uint16_t* instruction, void* target) + static void linkJumpT1(Condition cond, uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast(instruction) & 1)); @@ -2115,14 +2609,12 @@ private: instruction[-1] = OP_B_T2 | ((relative & 0xffe) >> 1); } - void linkJumpT3(Condition cond, uint16_t* instruction, void* target) + static void linkJumpT3(Condition cond, uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast(instruction) & 1)); ASSERT(!(reinterpret_cast(target) & 1)); - bool scratch; - UNUSED_PARAM(scratch); - ASSERT(canBeJumpT3(instruction, target, scratch)); + ASSERT(canBeJumpT3(instruction, target)); intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); @@ -2137,9 +2629,7 @@ private: // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast(instruction) & 1)); ASSERT(!(reinterpret_cast(target) & 1)); - bool scratch; - UNUSED_PARAM(scratch); - ASSERT(canBeJumpT4(instruction, target, scratch)); + ASSERT(canBeJumpT4(instruction, target)); intptr_t relative = reinterpret_cast(target) - (reinterpret_cast(instruction)); // ARM encoding for the top two bits below the sign bit is 'peculiar'. @@ -2152,7 +2642,7 @@ private: instruction[-1] = OP_B_T4b | ((relative & 0x800000) >> 10) | ((relative & 0x400000) >> 11) | ((relative & 0xffe) >> 1); } - void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target) + static void linkConditionalJumpT4(Condition cond, uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast(instruction) & 1)); @@ -2178,7 +2668,7 @@ private: instruction[-1] = OP_BX | (JUMP_TEMPORARY_REGISTER << 3); } - void linkConditionalBX(Condition cond, uint16_t* instruction, void* target) + static void linkConditionalBX(Condition cond, uint16_t* instruction, void* target) { // FIMXE: this should be up in the MacroAssembler layer. :-( ASSERT(!(reinterpret_cast(instruction) & 1)); @@ -2197,8 +2687,7 @@ private: ASSERT((isMOV_imm_T3(instruction - 5) && isMOVT(instruction - 3) && isBX(instruction - 1)) || (isNOP_T1(instruction - 5) && isNOP_T2(instruction - 4) && isB(instruction - 2))); - bool scratch; - if (canBeJumpT4(instruction, target, scratch)) { + if (canBeJumpT4(instruction, target)) { // There may be a better way to fix this, but right now put the NOPs first, since in the // case of an conditional branch this will be coming after an ITTT predicating *three* // instructions! Looking backwards to modify the ITTT to an IT is not easy, due to @@ -2225,66 +2714,90 @@ private: return op | (imm.m_value.i << 10) | imm.m_value.imm4; } + static void decodeTwoWordOp5i6Imm4Reg4EncodedImmFirst(ARMThumbImmediate& result, uint16_t value) + { + result.m_value.i = (value >> 10) & 1; + result.m_value.imm4 = value & 15; + } + static uint16_t twoWordOp5i6Imm4Reg4EncodedImmSecond(uint16_t rd, ARMThumbImmediate imm) { return (imm.m_value.imm3 << 12) | (rd << 8) | imm.m_value.imm8; } + static void decodeTwoWordOp5i6Imm4Reg4EncodedImmSecond(ARMThumbImmediate& result, uint16_t value) + { + result.m_value.imm3 = (value >> 12) & 7; + result.m_value.imm8 = value & 255; + } + class ARMInstructionFormatter { public: - void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm) + ALWAYS_INLINE void oneWordOp5Reg3Imm8(OpcodeID op, RegisterID rd, uint8_t imm) { m_buffer.putShort(op | (rd << 8) | imm); } - void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2) + ALWAYS_INLINE void oneWordOp5Imm5Reg3Reg3(OpcodeID op, uint8_t imm, RegisterID reg1, RegisterID reg2) { m_buffer.putShort(op | (imm << 6) | (reg1 << 3) | reg2); } - void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3) + ALWAYS_INLINE void oneWordOp7Reg3Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2, RegisterID reg3) { m_buffer.putShort(op | (reg1 << 6) | (reg2 << 3) | reg3); } - void oneWordOp8Imm8(OpcodeID op, uint8_t imm) + ALWAYS_INLINE void oneWordOp7Imm9(OpcodeID op, uint16_t imm) + { + m_buffer.putShort(op | imm); + } + + ALWAYS_INLINE void oneWordOp8Imm8(OpcodeID op, uint8_t imm) { m_buffer.putShort(op | imm); } - void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2) + ALWAYS_INLINE void oneWordOp8RegReg143(OpcodeID op, RegisterID reg1, RegisterID reg2) { m_buffer.putShort(op | ((reg2 & 8) << 4) | (reg1 << 3) | (reg2 & 7)); } - void oneWordOp9Imm7(OpcodeID op, uint8_t imm) + + ALWAYS_INLINE void oneWordOp9Imm7(OpcodeID op, uint8_t imm) { m_buffer.putShort(op | imm); } - void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2) + ALWAYS_INLINE void oneWordOp10Reg3Reg3(OpcodeID op, RegisterID reg1, RegisterID reg2) { m_buffer.putShort(op | (reg1 << 3) | reg2); } - void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff) + ALWAYS_INLINE void twoWordOp12Reg4FourFours(OpcodeID1 op, RegisterID reg, FourFours ff) { m_buffer.putShort(op | reg); m_buffer.putShort(ff.m_u.value); } - void twoWordOp16FourFours(OpcodeID1 op, FourFours ff) + ALWAYS_INLINE void twoWordOp16FourFours(OpcodeID1 op, FourFours ff) { m_buffer.putShort(op); m_buffer.putShort(ff.m_u.value); } - void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2) + ALWAYS_INLINE void twoWordOp16Op16(OpcodeID1 op1, OpcodeID2 op2) { m_buffer.putShort(op1); m_buffer.putShort(op2); } - void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm) + ALWAYS_INLINE void twoWordOp16Imm16(OpcodeID1 op1, uint16_t imm) + { + m_buffer.putShort(op1); + m_buffer.putShort(imm); + } + + ALWAYS_INLINE void twoWordOp5i6Imm4Reg4EncodedImm(OpcodeID1 op, int imm4, RegisterID rd, ARMThumbImmediate imm) { ARMThumbImmediate newImm = imm; newImm.m_value.imm4 = imm4; @@ -2293,17 +2806,23 @@ private: m_buffer.putShort(ARMv7Assembler::twoWordOp5i6Imm4Reg4EncodedImmSecond(rd, newImm)); } - void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm) + ALWAYS_INLINE void twoWordOp12Reg4Reg4Imm12(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm) { m_buffer.putShort(op | reg1); m_buffer.putShort((reg2 << 12) | imm); } + ALWAYS_INLINE void twoWordOp12Reg40Imm3Reg4Imm20Imm5(OpcodeID1 op, RegisterID reg1, RegisterID reg2, uint16_t imm1, uint16_t imm2, uint16_t imm3) + { + m_buffer.putShort(op | reg1); + m_buffer.putShort((imm1 << 12) | (reg2 << 8) | (imm2 << 6) | imm3); + } + // Formats up instructions of the pattern: // 111111111B11aaaa:bbbb222SA2C2cccc // Where 1s in the pattern come from op1, 2s in the pattern come from op2, S is the provided size bit. // Operands provide 5 bit values of the form Aaaaa, Bbbbb, Ccccc. - void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c) + ALWAYS_INLINE void vfpOp(OpcodeID1 op1, OpcodeID2 op2, bool size, VFPOperand a, VFPOperand b, VFPOperand c) { ASSERT(!(op1 & 0x004f)); ASSERT(!(op2 & 0xf1af)); @@ -2313,7 +2832,7 @@ private: // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. // (i.e. +/-(0..255) 32-bit words) - void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm) + ALWAYS_INLINE void vfpMemOp(OpcodeID1 op1, OpcodeID2 op2, bool size, RegisterID rn, VFPOperand rd, int32_t imm) { bool up = true; if (imm < 0) { @@ -2331,17 +2850,19 @@ private: // Administrative methods: - size_t size() const { return m_buffer.size(); } + size_t codeSize() const { return m_buffer.codeSize(); } + AssemblerLabel label() const { return m_buffer.label(); } bool isAligned(int alignment) const { return m_buffer.isAligned(alignment); } void* data() const { return m_buffer.data(); } - void* executableCopy(ExecutablePool* allocator) { return m_buffer.executableCopy(allocator); } - private: + unsigned debugOffset() { return m_buffer.debugOffset(); } + AssemblerBuffer m_buffer; } m_formatter; - Vector m_jumpsToLink; - Vector m_offsets; + Vector m_jumpsToLink; + int m_indexOfLastWatchpoint; + int m_indexOfTailOfLastWatchpoint; }; } // namespace JSC