/*
- * Copyright (C) 2009, 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2010, 2014 Apple Inc. All rights reserved.
* Copyright (C) 2010 University of Szeged
*
* Redistribution and use in source and binary forms, with or without
namespace JSC {
class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> {
- // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7?
- // - dTR is likely used more than aTR, and we'll get better instruction
- // encoding if it's in the low 8 registers.
static const RegisterID dataTempRegister = ARMRegisters::ip;
- static const RegisterID addressTempRegister = ARMRegisters::r3;
+ static const RegisterID addressTempRegister = ARMRegisters::r6;
static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7;
inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); }
public:
+ MacroAssemblerARMv7()
+ : m_makeJumpPatchable(false)
+ {
+ }
+
typedef ARMv7Assembler::LinkRecord LinkRecord;
typedef ARMv7Assembler::JumpType JumpType;
typedef ARMv7Assembler::JumpLinkType JumpLinkType;
- // Magic number is the biggest useful offset we can get on ARMv7 with
- // a LDR_imm_T2 encoding
- static const int MaximumCompactPtrAlignedAddressOffset = 124;
+ typedef ARMv7Assembler::Condition Condition;
- MacroAssemblerARMv7()
- : m_inUninterruptedSequence(false)
+ static const ARMv7Assembler::Condition DefaultCondition = ARMv7Assembler::ConditionInvalid;
+ static const ARMv7Assembler::JumpType DefaultJump = ARMv7Assembler::JumpNoConditionFixedSize;
+
+ static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
{
+ return value >= -255 && value <= 255;
}
-
- void beginUninterruptedSequence() { m_inUninterruptedSequence = true; }
- void endUninterruptedSequence() { m_inUninterruptedSequence = false; }
- Vector<LinkRecord>& jumpsToLink() { return m_assembler.jumpsToLink(); }
+
+ Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
void* unlinkedCode() { return m_assembler.unlinkedCode(); }
- bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); }
- JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); }
- JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); }
- void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
- int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); }
- void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); }
+ static bool canCompact(JumpType jumpType) { return ARMv7Assembler::canCompact(jumpType); }
+ static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(jumpType, from, to); }
+ static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARMv7Assembler::computeJumpType(record, from, to); }
+ static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARMv7Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
+ static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARMv7Assembler::link(record, from, to); }
struct ArmAddress {
enum AddressType {
};
public:
- typedef ARMRegisters::FPDoubleRegisterID FPRegisterID;
-
static const Scale ScalePtr = TimesFour;
enum RelationalCondition {
enum ResultCondition {
Overflow = ARMv7Assembler::ConditionVS,
Signed = ARMv7Assembler::ConditionMI,
+ PositiveOrZero = ARMv7Assembler::ConditionPL,
Zero = ARMv7Assembler::ConditionEQ,
NonZero = ARMv7Assembler::ConditionNE
};
};
static const RegisterID stackPointerRegister = ARMRegisters::sp;
+ static const RegisterID framePointerRegister = ARMRegisters::fp;
static const RegisterID linkRegister = ARMRegisters::lr;
// Integer arithmetic operations:
{
add32(imm, dest, dest);
}
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ load32(src.m_ptr, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value);
+
+ // For adds with stack pointer destination, moving the src first to sp is
+ // needed to avoid unpredictable instruction
+ if (dest == ARMRegisters::sp && src != dest) {
+ move(src, ARMRegisters::sp);
+ src = ARMRegisters::sp;
+ }
+
if (armImm.isValid())
m_assembler.add(dest, src, armImm);
else {
store32(dataTempRegister, address.m_ptr);
}
- void and32(RegisterID src, RegisterID dest)
+ void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
{
- m_assembler.ARM_and(dest, dest, src);
+ add32(imm, srcDest);
}
+
+ void add64(TrustedImm32 imm, AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
- void and32(TrustedImm32 imm, RegisterID dest)
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
+ else {
+ move(imm, addressTempRegister);
+ m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ }
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0));
+
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
+ m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31));
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4));
+ }
+
+ void and32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.ARM_and(dest, op1, op2);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
if (armImm.isValid())
- m_assembler.ARM_and(dest, dest, armImm);
+ m_assembler.ARM_and(dest, src, armImm);
else {
move(imm, dataTempRegister);
- m_assembler.ARM_and(dest, dest, dataTempRegister);
+ m_assembler.ARM_and(dest, src, dataTempRegister);
}
}
+ void and32(RegisterID src, RegisterID dest)
+ {
+ and32(dest, src, dest);
+ }
+
+ void and32(TrustedImm32 imm, RegisterID dest)
+ {
+ and32(imm, dest, dest);
+ }
+
+ void and32(Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ and32(dataTempRegister, dest);
+ }
+
void countLeadingZeros32(RegisterID src, RegisterID dest)
{
m_assembler.clz(dest, src);
}
- void lshift32(RegisterID shift_amount, RegisterID dest)
+ void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
{
// Clamp the shift to the range 0..31
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
ASSERT(armImm.isValid());
- m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
- m_assembler.lsl(dest, dest, dataTempRegister);
+ m_assembler.lsl(dest, src, dataTempRegister);
+ }
+
+ void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsl(dest, src, imm.m_value & 0x1f);
+ }
+
+ void lshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ lshift32(dest, shiftAmount, dest);
}
void lshift32(TrustedImm32 imm, RegisterID dest)
{
- m_assembler.lsl(dest, dest, imm.m_value & 0x1f);
+ lshift32(dest, imm, dest);
}
void mul32(RegisterID src, RegisterID dest)
m_assembler.neg(srcDest, srcDest);
}
- void not32(RegisterID srcDest)
+ void or32(RegisterID src, RegisterID dest)
{
- m_assembler.mvn(srcDest, srcDest);
+ m_assembler.orr(dest, dest, src);
+ }
+
+ void or32(RegisterID src, AbsoluteAddress dest)
+ {
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ load32(addressTempRegister, dataTempRegister);
+ or32(src, dataTempRegister);
+ store32(dataTempRegister, addressTempRegister);
}
- void or32(RegisterID src, RegisterID dest)
+ void or32(TrustedImm32 imm, Address address)
{
- m_assembler.orr(dest, dest, src);
+ load32(address, dataTempRegister);
+ or32(imm, dataTempRegister, dataTempRegister);
+ store32(dataTempRegister, address);
}
void or32(TrustedImm32 imm, RegisterID dest)
+ {
+ or32(imm, dest, dest);
+ }
+
+ void or32(RegisterID op1, RegisterID op2, RegisterID dest)
+ {
+ m_assembler.orr(dest, op1, op2);
+ }
+
+ void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
if (armImm.isValid())
- m_assembler.orr(dest, dest, armImm);
+ m_assembler.orr(dest, src, armImm);
else {
move(imm, dataTempRegister);
- m_assembler.orr(dest, dest, dataTempRegister);
+ m_assembler.orr(dest, src, dataTempRegister);
}
}
- void rshift32(RegisterID shift_amount, RegisterID dest)
+ void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
{
// Clamp the shift to the range 0..31
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
ASSERT(armImm.isValid());
- m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
- m_assembler.asr(dest, dest, dataTempRegister);
+ m_assembler.asr(dest, src, dataTempRegister);
}
- void rshift32(TrustedImm32 imm, RegisterID dest)
+ void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
{
- m_assembler.asr(dest, dest, imm.m_value & 0x1f);
+ m_assembler.asr(dest, src, imm.m_value & 0x1f);
+ }
+
+ void rshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ rshift32(dest, shiftAmount, dest);
}
- void urshift32(RegisterID shift_amount, RegisterID dest)
+ void rshift32(TrustedImm32 imm, RegisterID dest)
+ {
+ rshift32(dest, imm, dest);
+ }
+
+ void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
{
// Clamp the shift to the range 0..31
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f);
ASSERT(armImm.isValid());
- m_assembler.ARM_and(dataTempRegister, shift_amount, armImm);
+ m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm);
- m_assembler.lsr(dest, dest, dataTempRegister);
+ m_assembler.lsr(dest, src, dataTempRegister);
+ }
+
+ void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
+ {
+ m_assembler.lsr(dest, src, imm.m_value & 0x1f);
+ }
+
+ void urshift32(RegisterID shiftAmount, RegisterID dest)
+ {
+ urshift32(dest, shiftAmount, dest);
}
void urshift32(TrustedImm32 imm, RegisterID dest)
{
- m_assembler.lsr(dest, dest, imm.m_value & 0x1f);
+ urshift32(dest, imm, dest);
}
void sub32(RegisterID src, RegisterID dest)
store32(dataTempRegister, address.m_ptr);
}
- void xor32(RegisterID src, RegisterID dest)
+ void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
{
- m_assembler.eor(dest, dest, src);
+ m_assembler.eor(dest, op1, op2);
}
- void xor32(TrustedImm32 imm, RegisterID dest)
+ void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
+ if (imm.m_value == -1) {
+ m_assembler.mvn(dest, src);
+ return;
+ }
+
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
if (armImm.isValid())
- m_assembler.eor(dest, dest, armImm);
+ m_assembler.eor(dest, src, armImm);
else {
move(imm, dataTempRegister);
- m_assembler.eor(dest, dest, dataTempRegister);
+ m_assembler.eor(dest, src, dataTempRegister);
}
}
+
+ void xor32(RegisterID src, RegisterID dest)
+ {
+ xor32(dest, src, dest);
+ }
+
+ void xor32(TrustedImm32 imm, RegisterID dest)
+ {
+ if (imm.m_value == -1)
+ m_assembler.mvn(dest, dest);
+ else
+ xor32(imm, dest, dest);
+ }
// Memory access operations:
m_assembler.ldrh(dest, address.base, address.u.offset, true, false);
}
}
+
+ void load16Signed(ArmAddress address, RegisterID dest)
+ {
+ ASSERT(address.type == ArmAddress::HasIndex);
+ m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale);
+ }
void load8(ArmAddress address, RegisterID dest)
{
m_assembler.ldrb(dest, address.base, address.u.offset, true, false);
}
}
+
+ void load8Signed(ArmAddress address, RegisterID dest)
+ {
+ ASSERT(address.type == ArmAddress::HasIndex);
+ m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale);
+ }
+protected:
void store32(RegisterID src, ArmAddress address)
{
if (address.type == ArmAddress::HasIndex)
}
}
+private:
+ void store8(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.strb(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.strb(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.strb(src, address.base, address.u.offset, true, false);
+ }
+ }
+
+ void store16(RegisterID src, ArmAddress address)
+ {
+ if (address.type == ArmAddress::HasIndex)
+ m_assembler.strh(src, address.base, address.u.index, address.u.scale);
+ else if (address.u.offset >= 0) {
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset);
+ ASSERT(armImm.isValid());
+ m_assembler.strh(src, address.base, armImm);
+ } else {
+ ASSERT(address.u.offset >= -255);
+ m_assembler.strh(src, address.base, address.u.offset, true, false);
+ }
+ }
+
public:
void load32(ImplicitAddress address, RegisterID dest)
{
load32(setupArmAddress(address), dest);
}
+ void load16Unaligned(BaseIndex address, RegisterID dest)
+ {
+ load16(setupArmAddress(address), dest);
+ }
+
void load32(const void* address, RegisterID dest)
{
move(TrustedImmPtr(address), addressTempRegister);
m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
}
+
+ void abortWithReason(AbortReason reason)
+ {
+ move(TrustedImm32(reason), dataTempRegister);
+ breakpoint();
+ }
+
+ void abortWithReason(AbortReason reason, intptr_t misc)
+ {
+ move(TrustedImm32(misc), addressTempRegister);
+ abortWithReason(reason);
+ }
+
+ ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
+ {
+ ConvertibleLoadLabel result(this);
+ ASSERT(address.offset >= 0 && address.offset <= 255);
+ m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset);
+ return result;
+ }
void load8(ImplicitAddress address, RegisterID dest)
{
load8(setupArmAddress(address), dest);
}
+ void load8Signed(ImplicitAddress, RegisterID)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
+
+ void load8(BaseIndex address, RegisterID dest)
+ {
+ load8(setupArmAddress(address), dest);
+ }
+
+ void load8Signed(BaseIndex address, RegisterID dest)
+ {
+ load8Signed(setupArmAddress(address), dest);
+ }
+
+ void load8(const void* address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address), dest);
+ load8(dest, dest);
+ }
+
DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
{
DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister);
DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
{
+ padBeforePatch();
+
+ RegisterID base = address.base;
+
DataLabelCompact label(this);
- ASSERT(address.offset >= 0);
- ASSERT(address.offset <= MaximumCompactPtrAlignedAddressOffset);
- ASSERT(ARMThumbImmediate::makeUInt12(address.offset).isUInt7());
- m_assembler.ldrCompact(dest, address.base, ARMThumbImmediate::makeUInt12(address.offset));
+ ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
+
+ m_assembler.ldr(dest, base, address.offset, true, false);
return label;
}
m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale);
}
+ void load16Signed(BaseIndex address, RegisterID dest)
+ {
+ load16Signed(setupArmAddress(address), dest);
+ }
+
void load16(ImplicitAddress address, RegisterID dest)
{
ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset);
m_assembler.ldrh(dest, address.base, dataTempRegister);
}
}
+
+ void load16Signed(ImplicitAddress, RegisterID)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ }
DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
{
store32(dataTempRegister, setupArmAddress(address));
}
+ void store32(TrustedImm32 imm, BaseIndex address)
+ {
+ move(imm, dataTempRegister);
+ store32(dataTempRegister, setupArmAddress(address));
+ }
+
void store32(RegisterID src, const void* address)
{
move(TrustedImmPtr(address), addressTempRegister);
store32(dataTempRegister, address);
}
+ void store8(RegisterID src, Address address)
+ {
+ store8(src, setupArmAddress(address));
+ }
+
+ void store8(RegisterID src, BaseIndex address)
+ {
+ store8(src, setupArmAddress(address));
+ }
+
+ void store8(RegisterID src, void* address)
+ {
+ move(TrustedImmPtr(address), addressTempRegister);
+ store8(src, ArmAddress(addressTempRegister, 0));
+ }
+
+ void store8(TrustedImm32 imm, void* address)
+ {
+ move(imm, dataTempRegister);
+ store8(dataTempRegister, address);
+ }
+
+ void store8(TrustedImm32 imm, Address address)
+ {
+ move(imm, dataTempRegister);
+ store8(dataTempRegister, address);
+ }
+
+ void store16(RegisterID src, BaseIndex address)
+ {
+ store16(src, setupArmAddress(address));
+ }
- // Floating-point operations:
-
- bool supportsFloatingPoint() const { return true; }
- // On x86(_64) the MacroAssembler provides an interface to truncate a double to an integer.
- // If a value is not representable as an integer, and possibly for some values that are,
- // (on x86 INT_MIN, since this is indistinguishable from results for out-of-range/NaN input)
- // a branch will be taken. It is not clear whether this interface will be well suited to
- // other platforms. On ARMv7 the hardware truncation operation produces multiple possible
- // failure values (saturates to INT_MIN & INT_MAX, NaN reulsts in a value of 0). This is a
- // temporary solution while we work out what this interface should be. Either we need to
- // decide to make this interface work on all platforms, rework the interface to make it more
- // generic, or decide that the MacroAssembler cannot practically be used to abstracted these
- // operations, and make clients go directly to the m_assembler to plant truncation instructions.
- // In short, FIXME:.
- bool supportsFloatingPointTruncate() const { return false; }
+ // Possibly clobbers src, but not on this architecture.
+ void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.vmov(dest1, dest2, src);
+ }
+
+ void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch)
+ {
+ UNUSED_PARAM(scratch);
+ m_assembler.vmov(dest, src1, src2);
+ }
- bool supportsFloatingPointSqrt() const
+ static bool shouldBlindForSpecificArch(uint32_t value)
{
- return false;
+ ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value);
+
+ // Couldn't be encoded as an immediate, so assume it's untrusted.
+ if (!immediate.isValid())
+ return true;
+
+ // If we can encode the immediate, we have less than 16 attacker
+ // controlled bits.
+ if (immediate.isEncodedImm())
+ return false;
+
+ // Don't let any more than 12 bits of an instruction word
+ // be controlled by an attacker.
+ return !immediate.isUInt12();
}
+ // Floating-point operations:
+
+ static bool supportsFloatingPoint() { return true; }
+ static bool supportsFloatingPointTruncate() { return true; }
+ static bool supportsFloatingPointSqrt() { return true; }
+ static bool supportsFloatingPointAbs() { return true; }
+
void loadDouble(ImplicitAddress address, FPRegisterID dest)
{
RegisterID base = address.base;
m_assembler.vldr(dest, base, offset);
}
- void loadDouble(const void* address, FPRegisterID dest)
+ void loadFloat(ImplicitAddress address, FPRegisterID dest)
{
- move(TrustedImmPtr(address), addressTempRegister);
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.flds(ARMRegisters::asSingle(dest), base, offset);
+ }
+
+ void loadDouble(BaseIndex address, FPRegisterID dest)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ loadDouble(Address(addressTempRegister, address.offset), dest);
+ }
+
+ void loadFloat(BaseIndex address, FPRegisterID dest)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ loadFloat(Address(addressTempRegister, address.offset), dest);
+ }
+
+ void moveDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ if (src != dest)
+ m_assembler.vmov(dest, src);
+ }
+
+ void loadDouble(TrustedImmPtr address, FPRegisterID dest)
+ {
+ move(address, addressTempRegister);
m_assembler.vldr(dest, addressTempRegister, 0);
}
m_assembler.vstr(src, base, offset);
}
+ void storeFloat(FPRegisterID src, ImplicitAddress address)
+ {
+ RegisterID base = address.base;
+ int32_t offset = address.offset;
+
+ // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2.
+ if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) {
+ add32(TrustedImm32(offset), base, addressTempRegister);
+ base = addressTempRegister;
+ offset = 0;
+ }
+
+ m_assembler.fsts(ARMRegisters::asSingle(src), base, offset);
+ }
+
+ void storeDouble(FPRegisterID src, TrustedImmPtr address)
+ {
+ move(address, addressTempRegister);
+ storeDouble(src, addressTempRegister);
+ }
+
+ void storeDouble(FPRegisterID src, BaseIndex address)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ storeDouble(src, Address(addressTempRegister, address.offset));
+ }
+
+ void storeFloat(FPRegisterID src, BaseIndex address)
+ {
+ move(address.index, addressTempRegister);
+ lshift32(TrustedImm32(address.scale), addressTempRegister);
+ add32(address.base, addressTempRegister);
+ storeFloat(src, Address(addressTempRegister, address.offset));
+ }
+
void addDouble(FPRegisterID src, FPRegisterID dest)
{
- m_assembler.vadd_F64(dest, dest, src);
+ m_assembler.vadd(dest, dest, src);
}
void addDouble(Address src, FPRegisterID dest)
addDouble(fpTempRegister, dest);
}
+ void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vadd(dest, op1, op2);
+ }
+
+ void addDouble(AbsoluteAddress address, FPRegisterID dest)
+ {
+ loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
+ m_assembler.vadd(dest, dest, fpTempRegister);
+ }
+
void divDouble(FPRegisterID src, FPRegisterID dest)
{
- m_assembler.vdiv_F64(dest, dest, src);
+ m_assembler.vdiv(dest, dest, src);
+ }
+
+ void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vdiv(dest, op1, op2);
}
void subDouble(FPRegisterID src, FPRegisterID dest)
{
- m_assembler.vsub_F64(dest, dest, src);
+ m_assembler.vsub(dest, dest, src);
}
void subDouble(Address src, FPRegisterID dest)
subDouble(fpTempRegister, dest);
}
+ void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vsub(dest, op1, op2);
+ }
+
void mulDouble(FPRegisterID src, FPRegisterID dest)
{
- m_assembler.vmul_F64(dest, dest, src);
+ m_assembler.vmul(dest, dest, src);
}
void mulDouble(Address src, FPRegisterID dest)
mulDouble(fpTempRegister, dest);
}
- void sqrtDouble(FPRegisterID, FPRegisterID)
+ void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
+ {
+ m_assembler.vmul(dest, op1, op2);
+ }
+
+ void sqrtDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vsqrt(dest, src);
+ }
+
+ void absDouble(FPRegisterID src, FPRegisterID dest)
{
- ASSERT_NOT_REACHED();
+ m_assembler.vabs(dest, src);
+ }
+
+ void negateDouble(FPRegisterID src, FPRegisterID dest)
+ {
+ m_assembler.vneg(dest, src);
}
void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
{
- m_assembler.vmov(fpTempRegisterAsSingle(), src);
- m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
+ m_assembler.vmov(fpTempRegister, src, src);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
}
void convertInt32ToDouble(Address address, FPRegisterID dest)
{
// Fixme: load directly into the fpr!
load32(address, dataTempRegister);
- m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister);
- m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
+ m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
}
void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
{
// Fixme: load directly into the fpr!
load32(address.m_ptr, dataTempRegister);
- m_assembler.vmov(fpTempRegisterAsSingle(), dataTempRegister);
- m_assembler.vcvt_F64_S32(dest, fpTempRegisterAsSingle());
+ m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister);
+ m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle());
+ }
+
+ void convertFloatToDouble(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvtds(dst, ARMRegisters::asSingle(src));
+ }
+
+ void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst)
+ {
+ m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src);
}
Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
{
- m_assembler.vcmp_F64(left, right);
+ m_assembler.vcmp(left, right);
m_assembler.vmrs();
if (cond == DoubleNotEqual) {
return makeBranch(cond);
}
- Jump branchTruncateDoubleToInt32(FPRegisterID, RegisterID)
+ enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
+ Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
+ {
+ // Convert into dest.
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+
+ // Calculate 2x dest. If the value potentially underflowed, it will have
+ // clamped to 0x80000000, so 2x dest is zero in this case. In the case of
+ // overflow the result will be equal to -2.
+ Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister);
+ Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2));
+
+ // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps.
+ underflow.link(this);
+ if (branchType == BranchIfTruncateSuccessful)
+ return noOverflow;
+
+ // We'll reach the current point in the code on failure, so plant a
+ // jump here & link the success case.
+ Jump failure = jump();
+ noOverflow.link(this);
+ return failure;
+ }
+
+ // Result is undefined if the value is outside of the integer range.
+ void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
{
- ASSERT_NOT_REACHED();
- return jump();
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
}
+ void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
+ {
+ m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src);
+ m_assembler.vmov(dest, fpTempRegisterAsSingle());
+ }
+
// Convert 'src' to an integer, and places the resulting 'dest'.
// If the result is not representable as a 32 bit value, branch.
// May also branch for some values that are representable in 32 bits
// (specifically, in this case, 0).
- void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID)
+ void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
{
- m_assembler.vcvtr_S32_F64(fpTempRegisterAsSingle(), src);
+ m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src);
m_assembler.vmov(dest, fpTempRegisterAsSingle());
// Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
- m_assembler.vcvt_F64_S32(fpTempRegister, fpTempRegisterAsSingle());
+ m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle());
failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
// If the result is zero, it might have been -0.0, and the double comparison won't catch this!
- failureCases.append(branchTest32(Zero, dest));
+ if (negZeroCheck)
+ failureCases.append(branchTest32(Zero, dest));
}
Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
{
- m_assembler.vcmpz_F64(reg);
+ m_assembler.vcmpz(reg);
m_assembler.vmrs();
Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
Jump result = makeBranch(ARMv7Assembler::ConditionNE);
Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
{
- m_assembler.vcmpz_F64(reg);
+ m_assembler.vcmpz(reg);
m_assembler.vmrs();
Jump unordered = makeBranch(ARMv7Assembler::ConditionVS);
Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE);
void pop(RegisterID dest)
{
- // store postindexed with writeback
- m_assembler.ldr(dest, ARMRegisters::sp, sizeof(void*), false, true);
+ m_assembler.pop(dest);
}
void push(RegisterID src)
{
- // store preindexed with writeback
- m_assembler.str(src, ARMRegisters::sp, -sizeof(void*), true, true);
+ m_assembler.push(src);
}
void push(Address address)
push(dataTempRegister);
}
+ void popPair(RegisterID dest1, RegisterID dest2)
+ {
+ m_assembler.pop(1 << dest1 | 1 << dest2);
+ }
+
+ void pushPair(RegisterID src1, RegisterID src2)
+ {
+ m_assembler.push(1 << src1 | 1 << src2);
+ }
+
// Register move operations:
//
// Move values in registers.
{
uint32_t value = imm.m_value;
- if (imm.m_isPointer)
- moveFixedWidthEncoding(imm, dest);
- else {
- ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value);
- if (armImm.isValid())
- m_assembler.mov(dest, armImm);
- else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
- m_assembler.mvn(dest, armImm);
- else {
- m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
- if (value & 0xffff0000)
- m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
- }
+ if (armImm.isValid())
+ m_assembler.mov(dest, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid())
+ m_assembler.mvn(dest, armImm);
+ else {
+ m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value));
+ if (value & 0xffff0000)
+ m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16));
}
}
void move(RegisterID src, RegisterID dest)
{
- m_assembler.mov(dest, src);
+ if (src != dest)
+ m_assembler.mov(dest, src);
}
void move(TrustedImmPtr imm, RegisterID dest)
void signExtend32ToPtr(RegisterID src, RegisterID dest)
{
- if (src != dest)
- move(src, dest);
+ move(src, dest);
}
void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
{
- if (src != dest)
- move(src, dest);
+ move(src, dest);
+ }
+
+ // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
+ static RelationalCondition invert(RelationalCondition cond)
+ {
+ return static_cast<RelationalCondition>(cond ^ 1);
}
void nop()
{
m_assembler.nop();
}
+
+ void memoryFence()
+ {
+ m_assembler.dmbSY();
+ }
+
+ static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+ {
+ ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
+ }
+
+ static ptrdiff_t maxJumpReplacementSize()
+ {
+ return ARMv7Assembler::maxJumpReplacementSize();
+ }
// Forwards / external control flow operations:
//
void compare32(RegisterID left, TrustedImm32 right)
{
int32_t imm = right.m_value;
- if (!imm)
- m_assembler.tst(left, left);
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
+ if (armImm.isValid())
+ m_assembler.cmp(left, armImm);
+ else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
+ m_assembler.cmn(left, armImm);
else {
- ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
- if (armImm.isValid())
- m_assembler.cmp(left, armImm);
- else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid())
- m_assembler.cmn(left, armImm);
- else {
- move(TrustedImm32(imm), dataTempRegister);
- m_assembler.cmp(left, dataTempRegister);
- }
+ move(TrustedImm32(imm), dataTempRegister);
+ m_assembler.cmp(left, dataTempRegister);
}
}
m_assembler.tst(reg, reg);
else {
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm);
- if (armImm.isValid())
- m_assembler.tst(reg, armImm);
- else {
+ if (armImm.isValid()) {
+ if (reg == ARMRegisters::sp) {
+ move(reg, addressTempRegister);
+ m_assembler.tst(addressTempRegister, armImm);
+ } else
+ m_assembler.tst(reg, armImm);
+ } else {
move(mask, dataTempRegister);
- m_assembler.tst(reg, dataTempRegister);
+ if (reg == ARMRegisters::sp) {
+ move(reg, addressTempRegister);
+ m_assembler.tst(addressTempRegister, dataTempRegister);
+ } else
+ m_assembler.tst(reg, dataTempRegister);
}
}
}
public:
+ void test32(ResultCondition, RegisterID reg, TrustedImm32 mask)
+ {
+ test32(reg, mask);
+ }
+
+ Jump branch(ResultCondition cond)
+ {
+ return Jump(makeBranch(cond));
+ }
+
Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
{
m_assembler.cmp(left, right);
return branch32(cond, addressTempRegister, right);
}
- Jump branch16(RelationalCondition cond, BaseIndex left, RegisterID right)
+ Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
{
- load16(left, dataTempRegister);
- m_assembler.lsl(addressTempRegister, right, 16);
- m_assembler.lsl(dataTempRegister, dataTempRegister, 16);
- return branch32(cond, dataTempRegister, addressTempRegister);
- }
-
- Jump branch16(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
- {
- // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
- load16(left, addressTempRegister);
- m_assembler.lsl(addressTempRegister, addressTempRegister, 16);
- return branch32(cond, addressTempRegister, TrustedImm32(right.m_value << 16));
+ load32(left, dataTempRegister);
+ return branch32(cond, dataTempRegister, right);
}
Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right)
Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
{
+ ASSERT(!(0xffffff00 & right.m_value));
// use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/
load8(left, addressTempRegister);
return branch8(cond, addressTempRegister, right);
}
+ Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
+ {
+ ASSERT(!(0xffffff00 & right.m_value));
+ // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/
+ load8(left, addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
+ Jump branch8(RelationalCondition cond, AbsoluteAddress address, TrustedImm32 right)
+ {
+ // Use addressTempRegister instead of dataTempRegister, since branch32 uses dataTempRegister.
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ load8(Address(addressTempRegister), addressTempRegister);
+ return branch32(cond, addressTempRegister, right);
+ }
+
Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
{
m_assembler.tst(reg, mask);
return branchTest32(cond, addressTempRegister, mask);
}
- Jump branchTest8(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
{
- test32(reg, mask);
- return Jump(makeBranch(cond));
+ // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+ load8(address, addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
}
Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
{
// use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
load8(address, addressTempRegister);
- return branchTest8(cond, addressTempRegister, mask);
+ return branchTest32(cond, addressTempRegister, mask);
+ }
+
+ Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/
+ move(TrustedImmPtr(address.m_ptr), addressTempRegister);
+ load8(Address(addressTempRegister), addressTempRegister);
+ return branchTest32(cond, addressTempRegister, mask);
}
void jump(RegisterID target)
load32(address, dataTempRegister);
m_assembler.bx(dataTempRegister);
}
+
+ void jump(AbsoluteAddress address)
+ {
+ move(TrustedImmPtr(address.m_ptr), dataTempRegister);
+ load32(Address(dataTempRegister), dataTempRegister);
+ m_assembler.bx(dataTempRegister);
+ }
// Arithmetic control flow operations:
// * jo operations branch if the (signed) arithmetic
// operation caused an overflow to occur.
- Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
{
- m_assembler.add_S(dest, dest, src);
+ m_assembler.add_S(dest, op1, op2);
return Jump(makeBranch(cond));
}
- Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
{
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
if (armImm.isValid())
- m_assembler.add_S(dest, dest, armImm);
+ m_assembler.add_S(dest, op1, armImm);
else {
move(imm, dataTempRegister);
- m_assembler.add_S(dest, dest, dataTempRegister);
+ m_assembler.add_S(dest, op1, dataTempRegister);
}
return Jump(makeBranch(cond));
}
+ Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchAdd32(cond, dest, src, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
+ {
+ load32(src, dataTempRegister);
+ return branchAdd32(cond, dest, dataTempRegister, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchAdd32(cond, dest, imm, dest);
+ }
+
+ Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest)
+ {
+ // Move the high bits of the address into addressTempRegister,
+ // and load the value into dataTempRegister.
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+
+ // Do the add.
+ ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
+ if (armImm.isValid())
+ m_assembler.add_S(dataTempRegister, dataTempRegister, armImm);
+ else {
+ // If the operand does not fit into an immediate then load it temporarily
+ // into addressTempRegister; since we're overwriting addressTempRegister
+ // we'll need to reload it with the high bits of the address afterwards.
+ move(imm, addressTempRegister);
+ m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister);
+ move(TrustedImmPtr(dest.m_ptr), addressTempRegister);
+ }
+
+ // Store the result.
+ m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0));
+
+ return Jump(makeBranch(cond));
+ }
+
Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
{
m_assembler.smull(dest, dataTempRegister, src1, src2);
return branchMul32(cond, dataTempRegister, src, dest);
}
+ Jump branchNeg32(ResultCondition cond, RegisterID srcDest)
+ {
+ ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0);
+ m_assembler.sub_S(srcDest, zero, srcDest);
+ return Jump(makeBranch(cond));
+ }
+
Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest)
{
m_assembler.orr_S(dest, dest, src);
return Jump(makeBranch(cond));
}
- Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
{
- m_assembler.sub_S(dest, dest, src);
+ m_assembler.sub_S(dest, op1, op2);
return Jump(makeBranch(cond));
}
- Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
{
ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value);
if (armImm.isValid())
- m_assembler.sub_S(dest, dest, armImm);
+ m_assembler.sub_S(dest, op1, armImm);
else {
move(imm, dataTempRegister);
- m_assembler.sub_S(dest, dest, dataTempRegister);
+ m_assembler.sub_S(dest, op1, dataTempRegister);
}
return Jump(makeBranch(cond));
}
+ Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
+ {
+ return branchSub32(cond, dest, src, dest);
+ }
+
+ Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
+ {
+ return branchSub32(cond, dest, imm, dest);
+ }
+
void relativeTableJump(RegisterID index, int scale)
{
ASSERT(scale >= 0 && scale <= 31);
// Miscellaneous operations:
- void breakpoint()
+ void breakpoint(uint8_t imm = 0)
{
- m_assembler.bkpt(0);
+ m_assembler.bkpt(imm);
}
ALWAYS_INLINE Call nearCall()
compare32(cond, dataTempRegister, right, dest);
}
+ void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
+ {
+ load8(left, addressTempRegister);
+ compare32(cond, addressTempRegister, right, dest);
+ }
+
void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
{
compare32(left, right);
ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst)
{
+ padBeforePatch();
moveFixedWidthEncoding(imm, dst);
return DataLabel32(this);
}
ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst)
{
+ padBeforePatch();
moveFixedWidthEncoding(TrustedImm32(imm), dst);
return DataLabelPtr(this);
}
dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
return branch32(cond, addressTempRegister, dataTempRegister);
}
+
+ ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ load32(left, addressTempRegister);
+ dataLabel = moveWithPatch(initialRightValue, dataTempRegister);
+ return branch32(cond, addressTempRegister, dataTempRegister);
+ }
+
+ PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, left, TrustedImm32(right));
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branchTest32(cond, reg, mask);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32(cond, reg, imm);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ m_makeJumpPatchable = true;
+ Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
+
+ PatchableJump patchableJump()
+ {
+ padBeforePatch();
+ m_makeJumpPatchable = true;
+ Jump result = jump();
+ m_makeJumpPatchable = false;
+ return PatchableJump(result);
+ }
ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
{
}
- int executableOffsetFor(int location)
+ static FunctionPtr readCallTarget(CodeLocationCall call)
+ {
+ return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation())));
+ }
+
+ static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
+ static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
+
+ static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
+ {
+ const unsigned twoWordOpSize = 4;
+ return label.labelAtOffset(-twoWordOpSize * 2);
+ }
+
+ static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue)
+ {
+#if OS(LINUX)
+ ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue));
+#else
+ UNUSED_PARAM(rd);
+ ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue) & 0xffff));
+#endif
+ }
+
+ static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
{
- return m_assembler.executableOffsetFor(location);
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return CodeLocationLabel();
+ }
+
+ static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
+ {
+ UNREACHABLE_FOR_PLATFORM();
}
-protected:
- bool inUninterruptedSequence()
+ static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
{
- return m_inUninterruptedSequence;
+ UNREACHABLE_FOR_PLATFORM();
}
+#if USE(MASM_PROBE)
+ struct CPUState {
+ #define DECLARE_REGISTER(_type, _regName) \
+ _type _regName;
+ FOR_EACH_CPU_REGISTER(DECLARE_REGISTER)
+ #undef DECLARE_REGISTER
+ };
+
+ struct ProbeContext;
+ typedef void (*ProbeFunction)(struct ProbeContext*);
+
+ struct ProbeContext {
+ ProbeFunction probeFunction;
+ void* arg1;
+ void* arg2;
+ CPUState cpu;
+
+ void dump(const char* indentation = 0);
+ private:
+ void dumpCPURegisters(const char* indentation);
+ };
+
+ // For details about probe(), see comment in MacroAssemblerX86_64.h.
+ void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
+#endif // USE(MASM_PROBE)
+
+protected:
ALWAYS_INLINE Jump jump()
{
+ m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
- return Jump(m_assembler.bx(dataTempRegister), inUninterruptedSequence() ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
+ return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition);
}
ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond)
{
+ m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint.
m_assembler.it(cond, true, true);
moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister);
- return Jump(m_assembler.bx(dataTempRegister), inUninterruptedSequence() ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
+ return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond);
}
ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); }
ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); }
{
return static_cast<ARMv7Assembler::Condition>(cond);
}
-
+
private:
friend class LinkBuffer;
friend class RepatchBuffer;
static void linkCall(void* code, Call call, FunctionPtr function)
{
- ARMv7Assembler::linkCall(code, call.m_jmp, function.value());
+ ARMv7Assembler::linkCall(code, call.m_label, function.value());
}
static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
{
ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress());
}
-
- bool m_inUninterruptedSequence;
+
+#if USE(MASM_PROBE)
+ inline TrustedImm32 trustedImm32FromPtr(void* ptr)
+ {
+ return TrustedImm32(TrustedImmPtr(ptr));
+ }
+
+ inline TrustedImm32 trustedImm32FromPtr(ProbeFunction function)
+ {
+ return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
+ }
+
+ inline TrustedImm32 trustedImm32FromPtr(void (*function)())
+ {
+ return TrustedImm32(TrustedImmPtr(reinterpret_cast<void*>(function)));
+ }
+#endif
+
+ bool m_makeJumpPatchable;
};
} // namespace JSC