/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
{
}
- typedef ARM64Registers::FPRegisterID FPRegisterID;
typedef ARM64Assembler::LinkRecord LinkRecord;
typedef ARM64Assembler::JumpType JumpType;
typedef ARM64Assembler::JumpLinkType JumpLinkType;
Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
void* unlinkedCode() { return m_assembler.unlinkedCode(); }
- bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
- JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
- JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
- void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
- int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
- void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
- int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); }
+ static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
+ static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
+ static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
+ static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
+ static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARM64Assembler::link(record, from, to); }
static const Scale ScalePtr = TimesEight;
};
static const RegisterID stackPointerRegister = ARM64Registers::sp;
+ static const RegisterID framePointerRegister = ARM64Registers::fp;
static const RegisterID linkRegister = ARM64Registers::lr;
+ // FIXME: Get reasonable implementations for these
+ static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
+ static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
// Integer operations:
void add64(RegisterID src, RegisterID dest)
{
- m_assembler.add<64>(dest, dest, src);
+ if (src == ARM64Registers::sp)
+ m_assembler.add<64>(dest, src, dest);
+ else
+ m_assembler.add<64>(dest, dest, src);
}
void add64(TrustedImm32 imm, RegisterID dest)
store64(dataTempRegister, address.m_ptr);
}
+ void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
+ {
+ add64(imm, srcDest);
+ }
+
void add64(Address src, RegisterID dest)
{
load64(src, getCachedDataTempRegisterIDAndInvalidate());
m_assembler.and_<64>(dest, dest, dataTempRegister);
}
+ void and64(TrustedImmPtr imm, RegisterID dest)
+ {
+ LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm.m_value));
+
+ if (logicalImm.isValid()) {
+ m_assembler.and_<64>(dest, dest, logicalImm);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.and_<64>(dest, dest, dataTempRegister);
+ }
+
void countLeadingZeros32(RegisterID src, RegisterID dest)
{
m_assembler.clz<32>(dest, src);
lshift32(dest, imm, dest);
}
+ void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.lsl<64>(dest, src, shiftAmount);
+ }
+
+ void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f);
+ }
+
+ void lshift64(RegisterID shiftAmount, RegisterID dest)
+ {
+ lshift64(dest, shiftAmount, dest);
+ }
+
+ void lshift64(TrustedImm32 imm, RegisterID dest)
+ {
+ lshift64(dest, imm, dest);
+ }
+
void mul32(RegisterID src, RegisterID dest)
{
m_assembler.mul<32>(dest, dest, src);
}
+
+ void mul64(RegisterID src, RegisterID dest)
+ {
+ m_assembler.mul<64>(dest, dest, src);
+ }
void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
store32(dataTempRegister, address.m_ptr);
}
+ void or32(TrustedImm32 imm, Address address)
+ {
+ load32(address, getCachedDataTempRegisterIDAndInvalidate());
+ or32(imm, dataTempRegister, dataTempRegister);
+ store32(dataTempRegister, address);
+ }
+
void or64(RegisterID src, RegisterID dest)
{
or64(dest, src, dest);
LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
if (logicalImm.isValid()) {
- m_assembler.orr<64>(dest, dest, logicalImm);
+ m_assembler.orr<64>(dest, src, logicalImm);
return;
}
{
rshift32(dest, imm, dest);
}
+
+ void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
+ {
+ m_assembler.asr<64>(dest, src, shiftAmount);
+ }
+
+ void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
+ }
+
+ void rshift64(RegisterID shiftAmount, RegisterID dest)
+ {
+ rshift64(dest, shiftAmount, dest);
+ }
+
+ void rshift64(TrustedImm32 imm, RegisterID dest)
+ {
+ rshift64(dest, imm, dest);
+ }
void sub32(RegisterID src, RegisterID dest)
{
LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
if (logicalImm.isValid()) {
- m_assembler.eor<32>(dest, dest, logicalImm);
+ m_assembler.eor<32>(dest, src, logicalImm);
return;
}
LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
if (logicalImm.isValid()) {
- m_assembler.eor<64>(dest, dest, logicalImm);
+ m_assembler.eor<64>(dest, src, logicalImm);
return;
}
return label;
}
+ void abortWithReason(AbortReason reason)
+ {
+ move(TrustedImm32(reason), dataTempRegister);
+ breakpoint();
+ }
+
+ void abortWithReason(AbortReason reason, intptr_t misc)
+ {
+ move(TrustedImm64(misc), memoryTempRegister);
+ abortWithReason(reason);
+ }
+
ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
{
ConvertibleLoadLabel result(this);
void store8(RegisterID src, void* address)
{
- move(ImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
+ move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
m_assembler.strb(src, memoryTempRegister, 0);
}
+ void store8(RegisterID src, ImplicitAddress address)
+ {
+ if (tryStoreWithOffset<8>(src, address.base, address.offset))
+ return;
+
+ signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
+ m_assembler.str<8>(src, address.base, memoryTempRegister);
+ }
+
void store8(TrustedImm32 imm, void* address)
{
if (!imm.m_value) {
store8(dataTempRegister, address);
}
+ void store8(TrustedImm32 imm, ImplicitAddress address)
+ {
+ if (!imm.m_value) {
+ store8(ARM64Registers::zr, address);
+ return;
+ }
+
+ move(imm, getCachedDataTempRegisterIDAndInvalidate());
+ store8(dataTempRegister, address);
+ }
// Floating-point operations:
void addDouble(AbsoluteAddress address, FPRegisterID dest)
{
- loadDouble(address.m_ptr, fpTempRegister);
+ loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
addDouble(fpTempRegister, dest);
}
return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
}
- Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
- {
- // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
- m_assembler.fcvtzs<64, 64>(dest, src);
- // Check thlow 32-bits zero extend to be equal to the full value.
- m_assembler.cmp<64>(dest, dest, ARM64Assembler::UXTW, 0);
- return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
- }
-
void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
{
m_assembler.fcvt<32, 64>(dest, src);
convertInt32ToDouble(dataTempRegister, dest);
}
+ void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
+ {
+ m_assembler.scvtf<64, 64>(dest, src);
+ }
+
void divDouble(FPRegisterID src, FPRegisterID dest)
{
divDouble(dest, src, dest);
m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
}
- void loadDouble(const void* address, FPRegisterID dest)
+ void loadDouble(TrustedImmPtr address, FPRegisterID dest)
{
- moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+ moveToCachedReg(address, m_cachedMemoryTempRegister);
m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
}
m_assembler.str<64>(src, address.base, memoryTempRegister);
}
- void storeDouble(FPRegisterID src, const void* address)
+ void storeDouble(FPRegisterID src, TrustedImmPtr address)
{
- moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
+ moveToCachedReg(address, m_cachedMemoryTempRegister);
m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
}
// Stack manipulation operations:
//
// The ABI is assumed to provide a stack abstraction to memory,
- // containing machine word sized units of data. Push and pop
+ // containing machine word sized units of data. Push and pop
// operations add and remove a single register sized unit of data
- // to or from the stack. These operations are not supported on
- // ARM64. Peek and poke operations read or write values on the
- // stack, without moving the current stack position. Additionally,
+ // to or from the stack. These operations are not supported on
+ // ARM64. Peek and poke operations read or write values on the
+ // stack, without moving the current stack position. Additionally,
// there are popToRestore and pushToSave operations, which are
// designed just for quick-and-dirty saving and restoring of
- // temporary values. These operations don't claim to have any
+ // temporary values. These operations don't claim to have any
// ABI compatibility.
void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
CRASH();
}
+ void popPair(RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
+ }
+
+ void pushPair(RegisterID src1, RegisterID src2)
+ {
+ m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
+ }
+
void popToRestore(RegisterID dest)
{
m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
{
m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
}
+
+ void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
+ {
+ RegisterID reg = dataTempRegister;
+ pushPair(reg, reg);
+ move(imm, reg);
+ store64(reg, stackPointerRegister);
+ load64(Address(stackPointerRegister, 8), reg);
+ }
void pushToSave(Address address)
{
storeDouble(src, stackPointerRegister);
}
+ static ptrdiff_t pushToSaveByteOffset() { return 16; }
// Register move operations:
return branch64(cond, memoryTempRegister, right);
}
+ Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
+ {
+ load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch64(cond, memoryTempRegister, right);
+ }
+
Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
{
ASSERT(!(0xffffff00 & right.m_value));
return branch32(cond, memoryTempRegister, right);
}
+ Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
+ {
+ ASSERT(!(0xffffff00 & right.m_value));
+ load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
+ return branch32(cond, memoryTempRegister, right);
+ }
+
Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
{
m_assembler.tst<32>(reg, mask);
return Jump(makeBranch(cond));
}
+ void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ if (mask.m_value == -1)
+ m_assembler.tst<32>(reg, reg);
+ else {
+ bool testedWithImmediate = false;
+ if ((cond == Zero) || (cond == NonZero)) {
+ LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
+
+ if (logicalImm.isValid()) {
+ m_assembler.tst<32>(reg, logicalImm);
+ testedWithImmediate = true;
+ }
+ }
+ if (!testedWithImmediate) {
+ move(mask, getCachedDataTempRegisterIDAndInvalidate());
+ m_assembler.tst<32>(reg, dataTempRegister);
+ }
+ }
+ }
+
+ Jump branch(ResultCondition cond)
+ {
+ return Jump(makeBranch(cond));
+ }
+
Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
{
if (mask.m_value == -1) {
Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
{
- move(ImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
+ move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
return branchTest32(cond, dataTempRegister, mask);
}
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ load8(address, getCachedDataTempRegisterIDAndInvalidate());
+ return branchTest32(cond, dataTempRegister, mask);
+ }
+
Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
{
return branch32(cond, left, right);
// Arithmetic control flow operations:
//
// This set of conditional branch operations branch based
- // on the result of an arithmetic operation. The operation
+ // on the result of an arithmetic operation. The operation
// is performed as normal, storing the result.
//
// * jz operations branch if the result is zero.
return branchAdd32(cond, op1, dataTempRegister, dest);
}
+ Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ load32(src, getCachedDataTempRegisterIDAndInvalidate());
+ return branchAdd32(cond, dest, dataTempRegister, dest);
+ }
+
Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
{
return branchAdd32(cond, dest, src, dest);
return branchMul32(cond, dataTempRegister, src, dest);
}
+ Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
+ {
+ ASSERT(cond != Signed);
+
+ // This is a signed multiple of two 64-bit values, producing a 64-bit result.
+ m_assembler.mul<64>(dest, src1, src2);
+
+ if (cond != Overflow)
+ return branchTest64(cond, dest);
+
+ // Compute bits 127..64 of the result into dataTempRegister.
+ m_assembler.smulh(getCachedDataTempRegisterIDAndInvalidate(), src1, src2);
+ // Splat bit 63 of the result to bits 63..0 of memoryTempRegister.
+ m_assembler.asr<64>(getCachedMemoryTempRegisterIDAndInvalidate(), dest, 63);
+ // Check that bits 31..63 of the original result were all equal.
+ return branch64(NotEqual, memoryTempRegister, dataTempRegister);
+ }
+
+ Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchMul64(cond, dest, src, dest);
+ }
+
Jump branchNeg32(ResultCondition cond, RegisterID dest)
{
m_assembler.neg<32, S>(dest, dest);
return Jump(makeBranch(cond));
}
+ Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
+ {
+ m_assembler.neg<64, S>(srcDest, srcDest);
+ return Jump(makeBranch(cond));
+ }
+
Jump branchSub32(ResultCondition cond, RegisterID dest)
{
m_assembler.neg<32, S>(dest, dest);
return branch64(cond, left, dataTempRegister);
}
+ ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ dataLabel = DataLabel32(this);
+ moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
+ return branch32(cond, left, dataTempRegister);
+ }
+
PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
{
m_makeJumpPatchable = true;
return PatchableJump(result);
}
+ PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
PatchableJump patchableJump()
{
m_makeJumpPatchable = true;
{
m_assembler.nop();
}
+
+ void memoryFence()
+ {
+ m_assembler.dmbSY();
+ }
// Misc helper functions.
return ARM64Assembler::maxJumpReplacementSize();
}
+ RegisterID scratchRegisterForBlinding()
+ {
+ // We *do not* have a scratch register for blinding.
+ RELEASE_ASSERT_NOT_REACHED();
+ return getCachedDataTempRegisterIDAndInvalidate();
+ }
+
static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+ static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
{
return CodeLocationLabel();
}
+ static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
{
reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
UNREACHABLE_FOR_PLATFORM();
}
+ static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
protected:
ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
{
template<typename ImmediateType, typename rawType>
void moveInternal(ImmediateType imm, RegisterID dest)
{
- const int dataSize = sizeof(rawType)*8;
- const int numberHalfWords = dataSize/16;
+ const int dataSize = sizeof(rawType) * 8;
+ const int numberHalfWords = dataSize / 16;
rawType value = bitwise_cast<rawType>(imm.m_value);
uint16_t halfword[numberHalfWords];