X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/93a3786624b2768d89bfa27e46598dc64e2fb70a..ed1e77d3adeb83d26fd1dfb16dd84cabdcefd250:/assembler/MacroAssembler.h?ds=inline diff --git a/assembler/MacroAssembler.h b/assembler/MacroAssembler.h index a99aefd..fd4c5bb 100644 --- a/assembler/MacroAssembler.h +++ b/assembler/MacroAssembler.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef MacroAssembler_h #define MacroAssembler_h -#include - #if ENABLE(ASSEMBLER) #if CPU(ARM_THUMB2) @@ -71,16 +69,57 @@ namespace JSC { class MacroAssembler : public MacroAssemblerBase { public: + static RegisterID nextRegister(RegisterID reg) + { + return static_cast(reg + 1); + } + + static FPRegisterID nextFPRegister(FPRegisterID reg) + { + return static_cast(reg + 1); + } + + static unsigned numberOfRegisters() + { + return lastRegister() - firstRegister() + 1; + } + + static unsigned registerIndex(RegisterID reg) + { + return reg - firstRegister(); + } + + static unsigned numberOfFPRegisters() + { + return lastFPRegister() - firstFPRegister() + 1; + } + + static unsigned fpRegisterIndex(FPRegisterID reg) + { + return reg - firstFPRegister(); + } + + static unsigned registerIndex(FPRegisterID reg) + { + return fpRegisterIndex(reg) + numberOfRegisters(); + } + + static unsigned totalNumberOfRegisters() + { + return numberOfRegisters() + numberOfFPRegisters(); + } + using MacroAssemblerBase::pop; using MacroAssemblerBase::jump; using MacroAssemblerBase::branch32; using MacroAssemblerBase::move; - -#if ENABLE(JIT_CONSTANT_BLINDING) using MacroAssemblerBase::add32; using MacroAssemblerBase::and32; using MacroAssemblerBase::branchAdd32; using MacroAssemblerBase::branchMul32; +#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64) + using MacroAssemblerBase::branchPtr; +#endif using MacroAssemblerBase::branchSub32; using MacroAssemblerBase::lshift32; using MacroAssemblerBase::or32; @@ -89,11 +128,10 @@ public: using MacroAssemblerBase::sub32; using MacroAssemblerBase::urshift32; using MacroAssemblerBase::xor32; -#endif - + static bool isPtrAlignedAddressOffset(ptrdiff_t value) { - return value == (int32_t)value; + return value == static_cast(value); } static const double twoToThe32; // This is super useful for some double code. @@ -197,6 +235,10 @@ public: { push(src); } + void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm) + { + push(imm); + } void popToRestore(RegisterID dest) { pop(dest); @@ -211,6 +253,8 @@ public: loadDouble(stackPointerRegister, dest); addPtr(TrustedImm32(sizeof(double)), stackPointerRegister); } + + static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); } #endif // !CPU(ARM64) #if CPU(X86_64) || CPU(ARM64) @@ -238,6 +282,13 @@ public: } #endif + // Immediate shifts only have 5 controllable bits + // so we'll consider them safe for now. + TrustedImm32 trustedImm32ForShift(Imm32 imm) + { + return TrustedImm32(imm.asTrustedImm32().m_value & 31); + } + // Backwards banches, these are currently all implemented using existing forwards branch mechanisms. void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target) { @@ -294,6 +345,12 @@ public: return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue)); } + PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue)); + } + +#if !CPU(ARM_TRADITIONAL) PatchableJump patchableJump() { return PatchableJump(jump()); @@ -308,6 +365,12 @@ public: { return PatchableJump(branch32(cond, reg, imm)); } + + PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm) + { + return PatchableJump(branch32(cond, address, imm)); + } +#endif #endif void jump(Label target) @@ -398,7 +461,27 @@ public: { and32(imm, srcDest); } + + void andPtr(TrustedImmPtr imm, RegisterID srcDest) + { + and32(TrustedImm32(imm), srcDest); + } + + void lshiftPtr(Imm32 imm, RegisterID srcDest) + { + lshift32(trustedImm32ForShift(imm), srcDest); + } + void rshiftPtr(Imm32 imm, RegisterID srcDest) + { + rshift32(trustedImm32ForShift(imm), srcDest); + } + + void urshiftPtr(Imm32 imm, RegisterID srcDest) + { + urshift32(trustedImm32ForShift(imm), srcDest); + } + void negPtr(RegisterID dest) { neg32(dest); @@ -485,6 +568,11 @@ public: compare32(cond, left, right, dest); } + void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) + { + compare32(cond, left, right, dest); + } + void storePtr(RegisterID src, ImplicitAddress address) { store32(src, address); @@ -515,6 +603,16 @@ public: store32(TrustedImm32(imm), address); } + void storePtr(TrustedImm32 imm, ImplicitAddress address) + { + store32(imm, address); + } + + void storePtr(TrustedImmPtr imm, BaseIndex address) + { + store32(TrustedImm32(imm), address); + } + DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) { return store32WithAddressOffsetPatch(src, address); @@ -599,7 +697,9 @@ public: { return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask); } -#else + +#else // !CPU(X86_64) + void addPtr(RegisterID src, RegisterID dest) { add64(src, dest); @@ -650,6 +750,26 @@ public: and64(imm, srcDest); } + void andPtr(TrustedImmPtr imm, RegisterID srcDest) + { + and64(imm, srcDest); + } + + void lshiftPtr(Imm32 imm, RegisterID srcDest) + { + lshift64(trustedImm32ForShift(imm), srcDest); + } + + void rshiftPtr(Imm32 imm, RegisterID srcDest) + { + rshift64(trustedImm32ForShift(imm), srcDest); + } + + void urshiftPtr(Imm32 imm, RegisterID srcDest) + { + urshift64(trustedImm32ForShift(imm), srcDest); + } + void negPtr(RegisterID dest) { neg64(dest); @@ -875,7 +995,6 @@ public: return branchSub64(cond, src1, src2, dest); } -#if ENABLE(JIT_CONSTANT_BLINDING) using MacroAssemblerBase::and64; using MacroAssemblerBase::convertInt32ToDouble; using MacroAssemblerBase::store64; @@ -890,7 +1009,7 @@ public: if (bitwise_cast(value * 1.0) != bitwise_cast(value)) return shouldConsiderBlinding(); - value = abs(value); + value = fabs(value); // Only allow a limited set of fractional components double scaledValue = value * 8; if (scaledValue / 8 != value) @@ -902,13 +1021,23 @@ public: return value > 0xff; } + bool shouldBlindPointerForSpecificArch(uintptr_t value) + { + if (sizeof(void*) == 4) + return shouldBlindForSpecificArch(static_cast(value)); + return shouldBlindForSpecificArch(static_cast(value)); + } + bool shouldBlind(ImmPtr imm) - { + { + if (!canBlind()) + return false; + #if ENABLE(FORCED_JIT_BLINDING) UNUSED_PARAM(imm); // Debug always blind all constants, if only so we know // if we've broken blinding during patch development. - return true; + return true; #endif // First off we'll special case common, "safe" values to avoid hurting @@ -934,7 +1063,7 @@ public: if (!shouldConsiderBlinding()) return false; - return shouldBlindForSpecificArch(value); + return shouldBlindPointerForSpecificArch(value); } struct RotatedImmPtr { @@ -1031,7 +1160,7 @@ public: void convertInt32ToDouble(Imm32 imm, FPRegisterID dest) { - if (shouldBlind(imm)) { + if (shouldBlind(imm) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); convertInt32ToDouble(scratchRegister, dest); @@ -1067,7 +1196,7 @@ public: Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right) { - if (shouldBlind(right)) { + if (shouldBlind(right) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister); return branchPtr(cond, left, scratchRegister); @@ -1077,7 +1206,7 @@ public: void storePtr(ImmPtr imm, Address dest) { - if (shouldBlind(imm)) { + if (shouldBlind(imm) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister); storePtr(scratchRegister, dest); @@ -1087,7 +1216,7 @@ public: void store64(Imm64 imm, Address dest) { - if (shouldBlind(imm)) { + if (shouldBlind(imm) && haveScratchRegisterForBlinding()) { RegisterID scratchRegister = scratchRegisterForBlinding(); loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister); store64(scratchRegister, dest); @@ -1095,11 +1224,8 @@ public: store64(imm.asTrustedImm64(), dest); } -#endif - #endif // !CPU(X86_64) -#if ENABLE(JIT_CONSTANT_BLINDING) bool shouldBlind(Imm32 imm) { #if ENABLE(FORCED_JIT_BLINDING) @@ -1107,7 +1233,7 @@ public: // Debug always blind all constants, if only so we know // if we've broken blinding during patch development. return true; -#else +#else // ENABLE(FORCED_JIT_BLINDING) // First off we'll special case common, "safe" values to avoid hurting // performance too much @@ -1128,7 +1254,7 @@ public: return false; return shouldBlindForSpecificArch(value); -#endif +#endif // ENABLE(FORCED_JIT_BLINDING) } struct BlindedImm32 { @@ -1299,7 +1425,7 @@ public: { store64(value, addressForPoke(index)); } -#endif +#endif // CPU(X86_64) void store32(Imm32 imm, Address dest) { @@ -1308,10 +1434,10 @@ public: BlindedImm32 blind = xorBlindConstant(imm); store32(blind.value1, dest); xor32(blind.value2, dest); -#else - if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { - loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister); - store32(scratchRegister, dest); +#else // CPU(X86) || CPU(X86_64) + if (haveScratchRegisterForBlinding()) { + loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding()); + store32(scratchRegisterForBlinding(), dest); } else { // If we don't have a scratch register available for use, we'll just // place a random number of nops. @@ -1320,7 +1446,7 @@ public: nop(); store32(imm.asTrustedImm32(), dest); } -#endif +#endif // CPU(X86) || CPU(X86_64) } else store32(imm.asTrustedImm32(), dest); } @@ -1368,9 +1494,9 @@ public: Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right) { if (shouldBlind(right)) { - if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { - loadXorBlindedConstant(xorBlindConstant(right), scratchRegister); - return branch32(cond, left, scratchRegister); + if (haveScratchRegisterForBlinding()) { + loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding()); + return branch32(cond, left, scratchRegisterForBlinding()); } // If we don't have a scratch register available for use, we'll just // place a random number of nops. @@ -1386,14 +1512,12 @@ public: Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest) { if (src == dest) - ASSERT(scratchRegisterForBlinding()); + ASSERT(haveScratchRegisterForBlinding()); if (shouldBlind(imm)) { if (src == dest) { - if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { - move(src, scratchRegister); - src = scratchRegister; - } + move(src, scratchRegisterForBlinding()); + src = scratchRegisterForBlinding(); } loadXorBlindedConstant(xorBlindConstant(imm), dest); return branchAdd32(cond, src, dest); @@ -1404,14 +1528,12 @@ public: Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest) { if (src == dest) - ASSERT(scratchRegisterForBlinding()); + ASSERT(haveScratchRegisterForBlinding()); if (shouldBlind(imm)) { if (src == dest) { - if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) { - move(src, scratchRegister); - src = scratchRegister; - } + move(src, scratchRegisterForBlinding()); + src = scratchRegisterForBlinding(); } loadXorBlindedConstant(xorBlindConstant(imm), dest); return branchMul32(cond, src, dest); @@ -1432,13 +1554,6 @@ public: return branchSub32(cond, src, imm.asTrustedImm32(), dest); } - // Immediate shifts only have 5 controllable bits - // so we'll consider them safe for now. - TrustedImm32 trustedImm32ForShift(Imm32 imm) - { - return TrustedImm32(imm.asTrustedImm32().m_value & 31); - } - void lshift32(Imm32 imm, RegisterID dest) { lshift32(trustedImm32ForShift(imm), dest); @@ -1468,7 +1583,6 @@ public: { urshift32(src, trustedImm32ForShift(amount), dest); } -#endif }; } // namespace JSC