/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ARMAssembler_h
-#define ARMAssembler_h
+#ifndef ARM64Assembler_h
+#define ARM64Assembler_h
#if ENABLE(ASSEMBLER) && CPU(ARM64)
#include "AssemblerBuffer.h"
+#include <limits.h>
#include <wtf/Assertions.h>
#include <wtf/Vector.h>
#include <stdint.h>
#define DATASIZE DATASIZE_OF(datasize)
#define MEMOPSIZE MEMOPSIZE_OF(datasize)
#define CHECK_FP_MEMOP_DATASIZE() ASSERT(datasize == 8 || datasize == 16 || datasize == 32 || datasize == 64 || datasize == 128)
+#define MEMPAIROPSIZE_INT(datasize) ((datasize == 64) ? MemPairOp_64 : MemPairOp_32)
+#define MEMPAIROPSIZE_FP(datasize) ((datasize == 128) ? MemPairOp_V128 : (datasize == 64) ? MemPairOp_V64 : MemPairOp_32)
namespace JSC {
+ALWAYS_INLINE bool isInt7(int32_t value)
+{
+ return value == ((value << 25) >> 25);
+}
+
ALWAYS_INLINE bool isInt9(int32_t value)
{
return value == ((value << 23) >> 23);
}
+ALWAYS_INLINE bool isInt11(int32_t value)
+{
+ return value == ((value << 21) >> 21);
+}
+
ALWAYS_INLINE bool isUInt5(int32_t value)
{
return !(value & ~0x1f);
int m_value;
};
+class PairPostIndex {
+public:
+ explicit PairPostIndex(int value)
+ : m_value(value)
+ {
+ ASSERT(isInt11(value));
+ }
+
+ operator int() { return m_value; }
+
+private:
+ int m_value;
+};
+
+class PairPreIndex {
+public:
+ explicit PairPreIndex(int value)
+ : m_value(value)
+ {
+ ASSERT(isInt11(value));
+ }
+
+ operator int() { return m_value; }
+
+private:
+ int m_value;
+};
+
class LogicalImmediate {
public:
static LogicalImmediate create32(uint32_t value)
public:
typedef ARM64Registers::RegisterID RegisterID;
typedef ARM64Registers::FPRegisterID FPRegisterID;
+
+ static RegisterID firstRegister() { return ARM64Registers::x0; }
+ static RegisterID lastRegister() { return ARM64Registers::sp; }
+
+ static FPRegisterID firstFPRegister() { return ARM64Registers::q0; }
+ static FPRegisterID lastFPRegister() { return ARM64Registers::q31; }
private:
static bool isSp(RegisterID reg) { return ARM64Registers::isSp(reg); }
, m_indexOfTailOfLastWatchpoint(INT_MIN)
{
}
+
+ AssemblerBuffer& buffer() { return m_buffer; }
// (HS, LO, HI, LS) -> (AE, B, A, BE)
// (VS, VC) -> (O, NO)
JumpType m_type : 8;
JumpLinkType m_linkType : 8;
Condition m_condition : 4;
- bool m_is64Bit : 1;
unsigned m_bitNumber : 6;
- RegisterID m_compareRegister : 5;
+ RegisterID m_compareRegister : 6;
+ bool m_is64Bit : 1;
} realTypes;
struct CopyTypes {
uint64_t content[3];
MemOp_LOAD_signed32 = 3 // size may be 0 or 1
};
+ enum MemPairOpSize {
+ MemPairOp_32 = 0,
+ MemPairOp_LoadSigned_32 = 1,
+ MemPairOp_64 = 2,
+
+ MemPairOp_V32 = MemPairOp_32,
+ MemPairOp_V64 = 1,
+ MemPairOp_V128 = 2
+ };
+
enum MoveWideOp {
MoveWideOp_N = 0,
MoveWideOp_Z = 2,
LdrLiteralOp_128BIT = 2
};
+ static unsigned memPairOffsetShift(bool V, MemPairOpSize size)
+ {
+ // return the log2 of the size in bytes, e.g. 64 bit size returns 3
+ if (V)
+ return size + 2;
+ return (size >> 1) + 2;
+ }
+
public:
// Integer Instructions:
ALWAYS_INLINE void add(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
{
CHECK_DATASIZE();
- if (isSp(rn)) {
+ if (isSp(rd) || isSp(rn)) {
ASSERT(shift == LSL);
+ ASSERT(!isSp(rm));
add<datasize, setFlags>(rd, rn, rm, UXTX, amount);
} else
insn(addSubtractShiftedRegister(DATASIZE, AddOp_ADD, setFlags, shift, rm, amount, rn, rd));
{
ASSERT(!(offset & 0xfff));
insn(pcRelative(true, offset >> 12, rd));
+ nopCortexA53Fix843419();
}
template<int datasize, SetFlags setFlags = DontSetFlags>
insn(excepnGeneration(ExcepnOp_HALT, imm, 0));
}
+ template<int datasize>
+ ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void ldp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_LOAD, simm, rn, rt, rt2));
+ }
+
template<int datasize>
ALWAYS_INLINE void ldr(RegisterID rt, RegisterID rn, RegisterID rm)
{
ALWAYS_INLINE void madd(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
CHECK_DATASIZE();
+ nopCortexA53Fix835769<datasize>();
insn(dataProcessing3Source(DATASIZE, DataOp_MADD, rm, ra, rn, rd));
}
ALWAYS_INLINE void msub(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
CHECK_DATASIZE();
+ nopCortexA53Fix835769<datasize>();
insn(dataProcessing3Source(DATASIZE, DataOp_MSUB, rm, ra, rn, rd));
}
{
insn(nopPseudo());
}
+
+ static void fillNops(void* base, size_t size)
+ {
+ RELEASE_ASSERT(!(size % sizeof(int32_t)));
+ size_t n = size / sizeof(int32_t);
+ for (int32_t* ptr = static_cast<int32_t*>(base); n--;)
+ *ptr++ = nopPseudo();
+ }
+
+ ALWAYS_INLINE void dmbSY()
+ {
+ insn(0xd5033fbf);
+ }
template<int datasize>
ALWAYS_INLINE void orn(RegisterID rd, RegisterID rn, RegisterID rm)
ALWAYS_INLINE void smaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
+ nopCortexA53Fix835769<64>();
insn(dataProcessing3Source(Datasize_64, DataOp_SMADDL, rm, ra, rn, rd));
}
ALWAYS_INLINE void smsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
+ nopCortexA53Fix835769<64>();
insn(dataProcessing3Source(Datasize_64, DataOp_SMSUBL, rm, ra, rn, rd));
}
smaddl(rd, rn, rm, ARM64Registers::zr);
}
+ template<int datasize>
+ ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPostIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPairPostIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
+ }
+
+ template<int datasize>
+ ALWAYS_INLINE void stp(RegisterID rt, RegisterID rt2, RegisterID rn, PairPreIndex simm)
+ {
+ CHECK_DATASIZE();
+ insn(loadStoreRegisterPairPreIndex(MEMPAIROPSIZE_INT(datasize), false, MemOp_STORE, simm, rn, rt, rt2));
+ }
+
template<int datasize>
ALWAYS_INLINE void str(RegisterID rt, RegisterID rn, RegisterID rm)
{
template<int datasize, SetFlags setFlags = DontSetFlags>
ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm)
{
- sub<datasize, setFlags>(rd, rn, rm, LSL, 0);
+ ASSERT_WITH_MESSAGE(!isSp(rd) || setFlags == DontSetFlags, "SUBS with shifted register does not support SP for Xd, it uses XZR for the register 31. SUBS with extended register support SP for Xd, but only if SetFlag is not used, otherwise register 31 is Xd.");
+ ASSERT_WITH_MESSAGE(!isSp(rm), "No encoding of SUBS supports SP for the third operand.");
+
+ if (isSp(rd) || isSp(rn))
+ sub<datasize, setFlags>(rd, rn, rm, UXTX, 0);
+ else
+ sub<datasize, setFlags>(rd, rn, rm, LSL, 0);
}
template<int datasize, SetFlags setFlags = DontSetFlags>
ALWAYS_INLINE void sub(RegisterID rd, RegisterID rn, RegisterID rm, ShiftType shift, int amount)
{
CHECK_DATASIZE();
- if (isSp(rn)) {
- ASSERT(shift == LSL);
- sub<datasize, setFlags>(rd, rn, rm, UXTX, amount);
- } else
- insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd));
+ ASSERT(!isSp(rd) && !isSp(rn) && !isSp(rm));
+ insn(addSubtractShiftedRegister(DATASIZE, AddOp_SUB, setFlags, shift, rm, amount, rn, rd));
}
template<int datasize>
ALWAYS_INLINE void umaddl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
+ nopCortexA53Fix835769<64>();
insn(dataProcessing3Source(Datasize_64, DataOp_UMADDL, rm, ra, rn, rd));
}
ALWAYS_INLINE void umsubl(RegisterID rd, RegisterID rn, RegisterID rm, RegisterID ra)
{
+ nopCortexA53Fix835769<64>();
insn(dataProcessing3Source(Datasize_64, DataOp_UMSUBL, rm, ra, rn, rd));
}
return b.m_offset - a.m_offset;
}
- int executableOffsetFor(int location)
- {
- if (!location)
- return 0;
- return static_cast<int32_t*>(m_buffer.data())[location / sizeof(int32_t) - 1];
- }
-
- PassRefPtr<ExecutableMemoryHandle> executableCopy(VM& vm, void* ownerUID, JITCompilationEffort effort)
- {
- return m_buffer.executableCopy(vm, ownerUID, effort);
- }
-
void* unlinkedCode() { return m_buffer.data(); }
size_t codeSize() const { return m_buffer.codeSize(); }
// Linking & patching:
//
// 'link' and 'patch' methods are for use on unprotected code - such as the code
- // within the AssemblerBuffer, and code being patched by the patch buffer. Once
+ // within the AssemblerBuffer, and code being patched by the patch buffer. Once
// code has been finalized it is (platform support permitting) within a non-
// writable region of memory; to modify the code in an execute-only execuable
// pool the 'repatch' and 'relink' methods should be used.
unsigned debugOffset() { return m_buffer.debugOffset(); }
+#if OS(LINUX) && COMPILER(GCC)
+ static inline void linuxPageFlush(uintptr_t begin, uintptr_t end)
+ {
+ __builtin___clear_cache(reinterpret_cast<void*>(begin), reinterpret_cast<void*>(end));
+ }
+#endif
+
static void cacheFlush(void* code, size_t size)
{
#if OS(IOS)
sys_cache_control(kCacheFunctionPrepareForExecution, code, size);
+#elif OS(LINUX)
+ size_t page = pageSize();
+ uintptr_t current = reinterpret_cast<uintptr_t>(code);
+ uintptr_t end = current + size;
+ uintptr_t firstPageEnd = (current & ~(page - 1)) + page;
+
+ if (end <= firstPageEnd) {
+ linuxPageFlush(current, end);
+ return;
+ }
+
+ linuxPageFlush(current, firstPageEnd);
+
+ for (current = firstPageEnd; current + page < end; current += page)
+ linuxPageFlush(current, current + page);
+
+ linuxPageFlush(current, end);
#else
#error "The cacheFlush support is missing on this platform."
#endif
// Assembler admin methods:
- int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
+ static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return JUMP_ENUM_SIZE(jumpType) - JUMP_ENUM_SIZE(jumpLinkType); }
static ALWAYS_INLINE bool linkRecordSourceComparator(const LinkRecord& a, const LinkRecord& b)
{
return a.from() < b.from();
}
- bool canCompact(JumpType jumpType)
+ static bool canCompact(JumpType jumpType)
{
// Fixed jumps cannot be compacted
return (jumpType == JumpNoCondition) || (jumpType == JumpCondition) || (jumpType == JumpCompareAndBranch) || (jumpType == JumpTestBit);
}
- JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
+ static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to)
{
switch (jumpType) {
case JumpFixed:
return LinkJumpNoCondition;
}
- JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
+ static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to)
{
JumpLinkType linkType = computeJumpType(record.type(), from, to);
record.setLinkType(linkType);
return linkType;
}
- void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset)
- {
- int32_t ptr = regionStart / sizeof(int32_t);
- const int32_t end = regionEnd / sizeof(int32_t);
- int32_t* offsets = static_cast<int32_t*>(m_buffer.data());
- while (ptr < end)
- offsets[ptr++] = offset;
- }
-
Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink()
{
std::sort(m_jumpsToLink.begin(), m_jumpsToLink.end(), linkRecordSourceComparator);
return m_jumpsToLink;
}
- void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
+ static void ALWAYS_INLINE link(LinkRecord& record, uint8_t* from, uint8_t* to)
{
switch (record.linkType()) {
case LinkJumpNoCondition:
RegisterID rd;
bool expected = disassembleMoveWideImediate(&insn, sf, opc, hw, imm16, rd);
- return expected &&
- sf == size &&
- opc == MoveWideOp_K &&
- hw == _hw &&
- rd == _rd;
+ return expected
+ && sf == size
+ && opc == MoveWideOp_K
+ && hw == _hw
+ && rd == _rd;
}
static void linkPointer(int* address, void* valuePtr, bool flush = false)
static bool disassembleNop(void* address)
{
- unsigned int insn = *static_cast<unsigned int*>(address);
+ unsigned insn = *static_cast<unsigned*>(address);
return insn == 0xd503201f;
}
int insn = *static_cast<int*>(address);
op = (insn >> 24) & 0x1;
imm14 = (insn << 13) >> 18;
- bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn > 19) & 0x1f));
+ bitNumber = static_cast<unsigned>((((insn >> 26) & 0x20)) | ((insn >> 19) & 0x1f));
rt = static_cast<RegisterID>(insn & 0x1f);
return (insn & 0x7e000000) == 0x36000000;
return loadStoreRegisterPostIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
}
+ // 'V' means vector
+ ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+ {
+ ASSERT(size < 3);
+ ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+ ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+ unsigned immedShiftAmount = memPairOffsetShift(V, size);
+ int imm7 = immediate >> immedShiftAmount;
+ ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+ return (0x28800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+ }
+
+ ALWAYS_INLINE static int loadStoreRegisterPairPostIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+ {
+ return loadStoreRegisterPairPostIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+ }
+
// 'V' means vector
ALWAYS_INLINE static int loadStoreRegisterPreIndex(MemOpSize size, bool V, MemOp opc, int imm9, RegisterID rn, FPRegisterID rt)
{
return loadStoreRegisterPreIndex(size, V, opc, imm9, rn, xOrZrAsFPR(rt));
}
+ // 'V' means vector
+ ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, FPRegisterID rt, FPRegisterID rt2)
+ {
+ ASSERT(size < 3);
+ ASSERT(opc == (opc & 1)); // Only load or store, load signed 64 is handled via size.
+ ASSERT(V || (size != MemPairOp_LoadSigned_32) || (opc == MemOp_LOAD)); // There isn't an integer store signed.
+ unsigned immedShiftAmount = memPairOffsetShift(V, size);
+ int imm7 = immediate >> immedShiftAmount;
+ ASSERT((imm7 << immedShiftAmount) == immediate && isInt7(imm7));
+ return (0x29800000 | size << 30 | V << 26 | opc << 22 | (imm7 & 0x7f) << 15 | rt2 << 10 | xOrSp(rn) << 5 | rt);
+ }
+
+ ALWAYS_INLINE static int loadStoreRegisterPairPreIndex(MemPairOpSize size, bool V, MemOp opc, int immediate, RegisterID rn, RegisterID rt, RegisterID rt2)
+ {
+ return loadStoreRegisterPairPreIndex(size, V, opc, immediate, rn, xOrZrAsFPR(rt), xOrZrAsFPR(rt2));
+ }
+
// 'V' means vector
// 'S' means shift rm
ALWAYS_INLINE static int loadStoreRegisterRegisterOffset(MemOpSize size, bool V, MemOp opc, RegisterID rm, ExtendType option, bool S, RegisterID rn, FPRegisterID rt)
return (0xd6000000 | opc << 21 | op2 << 16 | op3 << 10 | xOrZr(rn) << 5 | op4);
}
+ // Workaround for Cortex-A53 erratum (835769). Emit an extra nop if the
+ // last instruction in the buffer is a load, store or prefetch. Needed
+ // before 64-bit multiply-accumulate instructions.
+ template<int datasize>
+ ALWAYS_INLINE void nopCortexA53Fix835769()
+ {
+#if CPU(ARM64_CORTEXA53)
+ CHECK_DATASIZE();
+ if (datasize == 64) {
+ if (LIKELY(m_buffer.codeSize() >= sizeof(int32_t))) {
+ // From ARMv8 Reference Manual, Section C4.1: the encoding of the
+ // instructions in the Loads and stores instruction group is:
+ // ---- 1-0- ---- ---- ---- ---- ---- ----
+ if (UNLIKELY((*reinterpret_cast_ptr<int32_t*>(reinterpret_cast_ptr<char*>(m_buffer.data()) + m_buffer.codeSize() - sizeof(int32_t)) & 0x0a000000) == 0x08000000))
+ nop();
+ }
+ }
+#endif
+ }
+
+ // Workaround for Cortex-A53 erratum (843419). Emit extra nops to avoid
+ // wrong address access after ADRP instruction.
+ ALWAYS_INLINE void nopCortexA53Fix843419()
+ {
+#if CPU(ARM64_CORTEXA53)
+ nop();
+ nop();
+ nop();
+#endif
+ }
+
AssemblerBuffer m_buffer;
Vector<LinkRecord, 0, UnsafeVectorOverflow> m_jumpsToLink;
int m_indexOfLastWatchpoint;
#endif // ENABLE(ASSEMBLER) && CPU(ARM64)
-#endif // ARMAssembler_h
+#endif // ARM64Assembler_h