/*
- * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
#ifndef MacroAssembler_h
#define MacroAssembler_h
-#include <wtf/Platform.h>
-
#if ENABLE(ASSEMBLER)
#if CPU(ARM_THUMB2)
class MacroAssembler : public MacroAssemblerBase {
public:
+ static RegisterID nextRegister(RegisterID reg)
+ {
+ return static_cast<RegisterID>(reg + 1);
+ }
+
+ static FPRegisterID nextFPRegister(FPRegisterID reg)
+ {
+ return static_cast<FPRegisterID>(reg + 1);
+ }
+
+ static unsigned numberOfRegisters()
+ {
+ return lastRegister() - firstRegister() + 1;
+ }
+
+ static unsigned registerIndex(RegisterID reg)
+ {
+ return reg - firstRegister();
+ }
+
+ static unsigned numberOfFPRegisters()
+ {
+ return lastFPRegister() - firstFPRegister() + 1;
+ }
+
+ static unsigned fpRegisterIndex(FPRegisterID reg)
+ {
+ return reg - firstFPRegister();
+ }
+
+ static unsigned registerIndex(FPRegisterID reg)
+ {
+ return fpRegisterIndex(reg) + numberOfRegisters();
+ }
+
+ static unsigned totalNumberOfRegisters()
+ {
+ return numberOfRegisters() + numberOfFPRegisters();
+ }
+
using MacroAssemblerBase::pop;
using MacroAssemblerBase::jump;
using MacroAssemblerBase::branch32;
using MacroAssemblerBase::move;
-
-#if ENABLE(JIT_CONSTANT_BLINDING)
using MacroAssemblerBase::add32;
using MacroAssemblerBase::and32;
using MacroAssemblerBase::branchAdd32;
using MacroAssemblerBase::branchMul32;
+#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64)
+ using MacroAssemblerBase::branchPtr;
+#endif
using MacroAssemblerBase::branchSub32;
using MacroAssemblerBase::lshift32;
using MacroAssemblerBase::or32;
using MacroAssemblerBase::sub32;
using MacroAssemblerBase::urshift32;
using MacroAssemblerBase::xor32;
-#endif
-
+
static bool isPtrAlignedAddressOffset(ptrdiff_t value)
{
- return value == (int32_t)value;
+ return value == static_cast<int32_t>(value);
}
static const double twoToThe32; // This is super useful for some double code.
{
push(src);
}
+ void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
+ {
+ push(imm);
+ }
void popToRestore(RegisterID dest)
{
pop(dest);
loadDouble(stackPointerRegister, dest);
addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
}
+
+ static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
#endif // !CPU(ARM64)
#if CPU(X86_64) || CPU(ARM64)
}
#endif
+ // Immediate shifts only have 5 controllable bits
+ // so we'll consider them safe for now.
+ TrustedImm32 trustedImm32ForShift(Imm32 imm)
+ {
+ return TrustedImm32(imm.asTrustedImm32().m_value & 31);
+ }
+
// Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
{
return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
}
+ PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+ {
+ return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
+ }
+
+#if !CPU(ARM_TRADITIONAL)
PatchableJump patchableJump()
{
return PatchableJump(jump());
{
return PatchableJump(branch32(cond, reg, imm));
}
+
+ PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
+ {
+ return PatchableJump(branch32(cond, address, imm));
+ }
+#endif
#endif
void jump(Label target)
{
and32(imm, srcDest);
}
-
+
+ void andPtr(TrustedImmPtr imm, RegisterID srcDest)
+ {
+ and32(TrustedImm32(imm), srcDest);
+ }
+
+ void lshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ lshift32(trustedImm32ForShift(imm), srcDest);
+ }
+
void negPtr(RegisterID dest)
{
neg32(dest);
compare32(cond, left, right, dest);
}
+ void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+ {
+ compare32(cond, left, right, dest);
+ }
+
void storePtr(RegisterID src, ImplicitAddress address)
{
store32(src, address);
store32(TrustedImm32(imm), address);
}
+ void storePtr(TrustedImm32 imm, ImplicitAddress address)
+ {
+ store32(imm, address);
+ }
+
+ void storePtr(TrustedImmPtr imm, BaseIndex address)
+ {
+ store32(TrustedImm32(imm), address);
+ }
+
DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
{
return store32WithAddressOffsetPatch(src, address);
{
return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
}
-#else
+
+#else // !CPU(X86_64)
+
void addPtr(RegisterID src, RegisterID dest)
{
add64(src, dest);
and64(imm, srcDest);
}
+ void andPtr(TrustedImmPtr imm, RegisterID srcDest)
+ {
+ and64(imm, srcDest);
+ }
+
+ void lshiftPtr(Imm32 imm, RegisterID srcDest)
+ {
+ lshift64(trustedImm32ForShift(imm), srcDest);
+ }
+
void negPtr(RegisterID dest)
{
neg64(dest);
return branchSub64(cond, src1, src2, dest);
}
-#if ENABLE(JIT_CONSTANT_BLINDING)
using MacroAssemblerBase::and64;
using MacroAssemblerBase::convertInt32ToDouble;
using MacroAssemblerBase::store64;
if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
return shouldConsiderBlinding();
- value = abs(value);
+ value = fabs(value);
// Only allow a limited set of fractional components
double scaledValue = value * 8;
if (scaledValue / 8 != value)
return value > 0xff;
}
+ bool shouldBlindPointerForSpecificArch(uintptr_t value)
+ {
+ if (sizeof(void*) == 4)
+ return shouldBlindForSpecificArch(static_cast<uint32_t>(value));
+ return shouldBlindForSpecificArch(static_cast<uint64_t>(value));
+ }
+
bool shouldBlind(ImmPtr imm)
- {
+ {
+ if (!canBlind())
+ return false;
+
#if ENABLE(FORCED_JIT_BLINDING)
UNUSED_PARAM(imm);
// Debug always blind all constants, if only so we know
// if we've broken blinding during patch development.
- return true;
+ return true;
#endif
// First off we'll special case common, "safe" values to avoid hurting
if (!shouldConsiderBlinding())
return false;
- return shouldBlindForSpecificArch(value);
+ return shouldBlindPointerForSpecificArch(value);
}
struct RotatedImmPtr {
void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
{
- if (shouldBlind(imm)) {
+ if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
convertInt32ToDouble(scratchRegister, dest);
Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
{
- if (shouldBlind(right)) {
+ if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
return branchPtr(cond, left, scratchRegister);
void storePtr(ImmPtr imm, Address dest)
{
- if (shouldBlind(imm)) {
+ if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
storePtr(scratchRegister, dest);
void store64(Imm64 imm, Address dest)
{
- if (shouldBlind(imm)) {
+ if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
RegisterID scratchRegister = scratchRegisterForBlinding();
loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
store64(scratchRegister, dest);
store64(imm.asTrustedImm64(), dest);
}
-#endif
-
#endif // !CPU(X86_64)
-#if ENABLE(JIT_CONSTANT_BLINDING)
bool shouldBlind(Imm32 imm)
{
#if ENABLE(FORCED_JIT_BLINDING)
// Debug always blind all constants, if only so we know
// if we've broken blinding during patch development.
return true;
-#else
+#else // ENABLE(FORCED_JIT_BLINDING)
// First off we'll special case common, "safe" values to avoid hurting
// performance too much
return false;
return shouldBlindForSpecificArch(value);
-#endif
+#endif // ENABLE(FORCED_JIT_BLINDING)
}
struct BlindedImm32 {
{
store64(value, addressForPoke(index));
}
-#endif
+#endif // CPU(X86_64)
void store32(Imm32 imm, Address dest)
{
BlindedImm32 blind = xorBlindConstant(imm);
store32(blind.value1, dest);
xor32(blind.value2, dest);
-#else
- if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
- loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
- store32(scratchRegister, dest);
+#else // CPU(X86) || CPU(X86_64)
+ if (haveScratchRegisterForBlinding()) {
+ loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding());
+ store32(scratchRegisterForBlinding(), dest);
} else {
// If we don't have a scratch register available for use, we'll just
// place a random number of nops.
nop();
store32(imm.asTrustedImm32(), dest);
}
-#endif
+#endif // CPU(X86) || CPU(X86_64)
} else
store32(imm.asTrustedImm32(), dest);
}
Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
{
if (shouldBlind(right)) {
- if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
- loadXorBlindedConstant(xorBlindConstant(right), scratchRegister);
- return branch32(cond, left, scratchRegister);
+ if (haveScratchRegisterForBlinding()) {
+ loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding());
+ return branch32(cond, left, scratchRegisterForBlinding());
}
// If we don't have a scratch register available for use, we'll just
// place a random number of nops.
Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
{
if (src == dest)
- ASSERT(scratchRegisterForBlinding());
+ ASSERT(haveScratchRegisterForBlinding());
if (shouldBlind(imm)) {
if (src == dest) {
- if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
- move(src, scratchRegister);
- src = scratchRegister;
- }
+ move(src, scratchRegisterForBlinding());
+ src = scratchRegisterForBlinding();
}
loadXorBlindedConstant(xorBlindConstant(imm), dest);
return branchAdd32(cond, src, dest);
Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
{
if (src == dest)
- ASSERT(scratchRegisterForBlinding());
+ ASSERT(haveScratchRegisterForBlinding());
if (shouldBlind(imm)) {
if (src == dest) {
- if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
- move(src, scratchRegister);
- src = scratchRegister;
- }
+ move(src, scratchRegisterForBlinding());
+ src = scratchRegisterForBlinding();
}
loadXorBlindedConstant(xorBlindConstant(imm), dest);
return branchMul32(cond, src, dest);
return branchSub32(cond, src, imm.asTrustedImm32(), dest);
}
- // Immediate shifts only have 5 controllable bits
- // so we'll consider them safe for now.
- TrustedImm32 trustedImm32ForShift(Imm32 imm)
- {
- return TrustedImm32(imm.asTrustedImm32().m_value & 31);
- }
-
void lshift32(Imm32 imm, RegisterID dest)
{
lshift32(trustedImm32ForShift(imm), dest);
{
urshift32(src, trustedImm32ForShift(amount), dest);
}
-#endif
};
} // namespace JSC