X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/14957cd040308e3eeec43d26bae5d76da13fcd85..8b637bb680022adfddad653280734877951535a9:/assembler/MacroAssemblerX86Common.h?ds=sidebyside diff --git a/assembler/MacroAssemblerX86Common.h b/assembler/MacroAssemblerX86Common.h index 933b36b..f171dc4 100644 --- a/assembler/MacroAssemblerX86Common.h +++ b/assembler/MacroAssemblerX86Common.h @@ -34,14 +34,23 @@ namespace JSC { class MacroAssemblerX86Common : public AbstractMacroAssembler { +protected: +#if CPU(X86_64) + static const X86Registers::RegisterID scratchRegister = X86Registers::r11; +#endif + static const int DoubleConditionBitInvert = 0x10; static const int DoubleConditionBitSpecial = 0x20; static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial; public: typedef X86Assembler::FPRegisterID FPRegisterID; + typedef X86Assembler::XMMRegisterID XMMRegisterID; - static const int MaximumCompactPtrAlignedAddressOffset = 127; + static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value) + { + return value >= -128 && value <= 127; + } enum RelationalCondition { Equal = X86Assembler::ConditionE, @@ -59,6 +68,7 @@ public: enum ResultCondition { Overflow = X86Assembler::ConditionO, Signed = X86Assembler::ConditionS, + PositiveOrZero = X86Assembler::ConditionNS, Zero = X86Assembler::ConditionE, NonZero = X86Assembler::ConditionNE }; @@ -85,6 +95,16 @@ public: static const RegisterID stackPointerRegister = X86Registers::esp; +#if ENABLE(JIT_CONSTANT_BLINDING) + static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; } +#if CPU(X86_64) + static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; } +#if OS(DARWIN) // On 64-bit systems other than DARWIN uint64_t and uintptr_t are the same type so overload is prohibited. + static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; } +#endif +#endif +#endif + // Integer arithmetic operations: // // Operations are typically two operand - operation(source, srcDst) @@ -116,6 +136,11 @@ public: { m_assembler.addl_rm(src, dest.offset, dest.base); } + + void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + m_assembler.leal_mr(imm.m_value, src, dest); + } void and32(RegisterID src, RegisterID dest) { @@ -222,16 +247,6 @@ public: m_assembler.negl_m(srcDest.offset, srcDest.base); } - void not32(RegisterID srcDest) - { - m_assembler.notl_r(srcDest); - } - - void not32(Address srcDest) - { - m_assembler.notl_m(srcDest.offset, srcDest.base); - } - void or32(RegisterID src, RegisterID dest) { m_assembler.orl_rr(src, dest); @@ -374,7 +389,6 @@ public: m_assembler.subl_rm(src, dest.offset, dest.base); } - void xor32(RegisterID src, RegisterID dest) { m_assembler.xorl_rr(src, dest); @@ -382,11 +396,17 @@ public: void xor32(TrustedImm32 imm, Address dest) { - m_assembler.xorl_im(imm.m_value, dest.offset, dest.base); + if (imm.m_value == -1) + m_assembler.notl_m(dest.offset, dest.base); + else + m_assembler.xorl_im(imm.m_value, dest.offset, dest.base); } void xor32(TrustedImm32 imm, RegisterID dest) { + if (imm.m_value == -1) + m_assembler.notl_r(dest); + else m_assembler.xorl_ir(imm.m_value, dest); } @@ -423,6 +443,23 @@ public: m_assembler.sqrtsd_rr(src, dst); } + void absDouble(FPRegisterID src, FPRegisterID dst) + { + ASSERT(src != dst); + static const double negativeZeroConstant = -0.0; + loadDouble(&negativeZeroConstant, dst); + m_assembler.andnpd_rr(src, dst); + } + + void negateDouble(FPRegisterID src, FPRegisterID dst) + { + ASSERT(src != dst); + static const double negativeZeroConstant = -0.0; + loadDouble(&negativeZeroConstant, dst); + m_assembler.xorpd_rr(src, dst); + } + + // Memory access operations: // // Loads are of the form load(address, destination) and stores of the form @@ -445,31 +482,58 @@ public: load32(address, dest); } + void load16Unaligned(BaseIndex address, RegisterID dest) + { + load16(address, dest); + } + DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) { + padBeforePatch(); m_assembler.movl_mr_disp32(address.offset, address.base, dest); return DataLabel32(this); } DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest) { + padBeforePatch(); m_assembler.movl_mr_disp8(address.offset, address.base, dest); return DataLabelCompact(this); } static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value) { - ASSERT(value >= 0); - ASSERT(value < MaximumCompactPtrAlignedAddressOffset); + ASSERT(isCompactPtrAlignedAddressOffset(value)); AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value); } DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest) { + padBeforePatch(); m_assembler.movl_mr_disp8(address.offset, address.base, dest); return DataLabelCompact(this); } + void load8(BaseIndex address, RegisterID dest) + { + m_assembler.movzbl_mr(address.offset, address.base, address.index, address.scale, dest); + } + + void load8(ImplicitAddress address, RegisterID dest) + { + m_assembler.movzbl_mr(address.offset, address.base, dest); + } + + void load8Signed(BaseIndex address, RegisterID dest) + { + m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest); + } + + void load8Signed(ImplicitAddress address, RegisterID dest) + { + m_assembler.movsbl_mr(address.offset, address.base, dest); + } + void load16(BaseIndex address, RegisterID dest) { m_assembler.movzwl_mr(address.offset, address.base, address.index, address.scale, dest); @@ -480,8 +544,19 @@ public: m_assembler.movzwl_mr(address.offset, address.base, dest); } + void load16Signed(BaseIndex address, RegisterID dest) + { + m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest); + } + + void load16Signed(Address address, RegisterID dest) + { + m_assembler.movswl_mr(address.offset, address.base, dest); + } + DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) { + padBeforePatch(); m_assembler.movl_rm_disp32(src, address.offset, address.base); return DataLabel32(this); } @@ -500,6 +575,77 @@ public: { m_assembler.movl_i32m(imm.m_value, address.offset, address.base); } + + void store32(TrustedImm32 imm, BaseIndex address) + { + m_assembler.movl_i32m(imm.m_value, address.offset, address.base, address.index, address.scale); + } + + void store8(TrustedImm32 imm, Address address) + { + ASSERT(-128 <= imm.m_value && imm.m_value < 128); + m_assembler.movb_i8m(imm.m_value, address.offset, address.base); + } + + void store8(TrustedImm32 imm, BaseIndex address) + { + ASSERT(-128 <= imm.m_value && imm.m_value < 128); + m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale); + } + + void store8(RegisterID src, BaseIndex address) + { +#if CPU(X86) + // On 32-bit x86 we can only store from the first 4 registers; + // esp..edi are mapped to the 'h' registers! + if (src >= 4) { + // Pick a temporary register. + RegisterID temp; + if (address.base != X86Registers::eax && address.index != X86Registers::eax) + temp = X86Registers::eax; + else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx) + temp = X86Registers::ebx; + else { + ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx); + temp = X86Registers::ecx; + } + + // Swap to the temporary register to perform the store. + swap(src, temp); + m_assembler.movb_rm(temp, address.offset, address.base, address.index, address.scale); + swap(src, temp); + return; + } +#endif + m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale); + } + + void store16(RegisterID src, BaseIndex address) + { +#if CPU(X86) + // On 32-bit x86 we can only store from the first 4 registers; + // esp..edi are mapped to the 'h' registers! + if (src >= 4) { + // Pick a temporary register. + RegisterID temp; + if (address.base != X86Registers::eax && address.index != X86Registers::eax) + temp = X86Registers::eax; + else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx) + temp = X86Registers::ebx; + else { + ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx); + temp = X86Registers::ecx; + } + + // Swap to the temporary register to perform the store. + swap(src, temp); + m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale); + swap(src, temp); + return; + } +#endif + m_assembler.movw_rm(src, address.offset, address.base, address.index, address.scale); + } // Floating-point operation: @@ -513,17 +659,63 @@ public: m_assembler.movsd_rr(src, dest); } + void loadDouble(const void* address, FPRegisterID dest) + { +#if CPU(X86) + ASSERT(isSSE2Present()); + m_assembler.movsd_mr(address, dest); +#else + move(TrustedImmPtr(address), scratchRegister); + loadDouble(scratchRegister, dest); +#endif + } + void loadDouble(ImplicitAddress address, FPRegisterID dest) { ASSERT(isSSE2Present()); m_assembler.movsd_mr(address.offset, address.base, dest); } + + void loadDouble(BaseIndex address, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.movsd_mr(address.offset, address.base, address.index, address.scale, dest); + } + void loadFloat(BaseIndex address, FPRegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.movss_mr(address.offset, address.base, address.index, address.scale, dest); + } void storeDouble(FPRegisterID src, ImplicitAddress address) { ASSERT(isSSE2Present()); m_assembler.movsd_rm(src, address.offset, address.base); } + + void storeDouble(FPRegisterID src, BaseIndex address) + { + ASSERT(isSSE2Present()); + m_assembler.movsd_rm(src, address.offset, address.base, address.index, address.scale); + } + + void storeFloat(FPRegisterID src, BaseIndex address) + { + ASSERT(isSSE2Present()); + m_assembler.movss_rm(src, address.offset, address.base, address.index, address.scale); + } + + void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst) + { + ASSERT(isSSE2Present()); + m_assembler.cvtsd2ss_rr(src, dst); + } + + void convertFloatToDouble(FPRegisterID src, FPRegisterID dst) + { + ASSERT(isSSE2Present()); + m_assembler.cvtss2sd_rr(src, dst); + } void addDouble(FPRegisterID src, FPRegisterID dest) { @@ -635,11 +827,15 @@ public: m_assembler.ucomisd_rr(right, left); if (cond == DoubleEqual) { + if (left == right) + return Jump(m_assembler.jnp()); Jump isUnordered(m_assembler.jp()); Jump result = Jump(m_assembler.je()); isUnordered.link(this); return result; } else if (cond == DoubleNotEqualOrUnordered) { + if (left == right) + return Jump(m_assembler.jp()); Jump isUnordered(m_assembler.jp()); Jump isEqual(m_assembler.je()); isUnordered.link(this); @@ -664,17 +860,39 @@ public: return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000)); } + Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) + { + ASSERT(isSSE2Present()); + m_assembler.cvttsd2si_rr(src, dest); + return branch32(branchType ? GreaterThanOrEqual : LessThan, dest, TrustedImm32(0)); + } + + void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.cvttsd2si_rr(src, dest); + } + +#if CPU(X86_64) + void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) + { + ASSERT(isSSE2Present()); + m_assembler.cvttsd2siq_rr(src, dest); + } +#endif + // Convert 'src' to an integer, and places the resulting 'dest'. // If the result is not representable as a 32 bit value, branch. // May also branch for some values that are representable in 32 bits // (specifically, in this case, 0). - void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp) + void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true) { ASSERT(isSSE2Present()); m_assembler.cvttsd2si_rr(src, dest); // If the result is zero, it might have been -0.0, and the double comparison won't catch this! - failureCases.append(branchTest32(Zero, dest)); + if (negZeroCheck) + failureCases.append(branchTest32(Zero, dest)); // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. convertInt32ToDouble(dest, fpTemp); @@ -697,6 +915,36 @@ public: return branchDouble(DoubleEqualOrUnordered, reg, scratch); } + void lshiftPacked(TrustedImm32 imm, XMMRegisterID reg) + { + ASSERT(isSSE2Present()); + m_assembler.psllq_i8r(imm.m_value, reg); + } + + void rshiftPacked(TrustedImm32 imm, XMMRegisterID reg) + { + ASSERT(isSSE2Present()); + m_assembler.psrlq_i8r(imm.m_value, reg); + } + + void orPacked(XMMRegisterID src, XMMRegisterID dst) + { + ASSERT(isSSE2Present()); + m_assembler.por_rr(src, dst); + } + + void moveInt32ToPacked(RegisterID src, XMMRegisterID dst) + { + ASSERT(isSSE2Present()); + m_assembler.movd_rr(src, dst); + } + + void movePackedToInt32(XMMRegisterID src, RegisterID dst) + { + ASSERT(isSSE2Present()); + m_assembler.movd_rr(src, dst); + } + // Stack manipulation operations: // // The ABI is assumed to provide a stack abstraction to memory, @@ -754,6 +1002,11 @@ public: m_assembler.movq_i64r(imm.asIntptr(), dest); } + void move(TrustedImm64 imm, RegisterID dest) + { + m_assembler.movq_i64r(imm.m_value, dest); + } + void swap(RegisterID reg1, RegisterID reg2) { if (reg1 != reg2) @@ -839,15 +1092,6 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right) - { - if (((cond == Equal) || (cond == NotEqual)) && !left.m_value) - m_assembler.testl_rr(right, right); - else - m_assembler.cmpl_ir(left.m_value, right); - return Jump(m_assembler.jCC(x86Condition(commute(cond)))); - } - Jump branch32(RelationalCondition cond, RegisterID left, Address right) { m_assembler.cmpl_mr(right.offset, right.base, left); @@ -877,20 +1121,6 @@ public: return branch32(cond, left, right); } - Jump branch16(RelationalCondition cond, BaseIndex left, RegisterID right) - { - m_assembler.cmpw_rm(right, left.offset, left.base, left.index, left.scale); - return Jump(m_assembler.jCC(x86Condition(cond))); - } - - Jump branch16(RelationalCondition cond, BaseIndex left, TrustedImm32 right) - { - ASSERT(!(right.m_value & 0xFFFF0000)); - - m_assembler.cmpw_im(right.m_value, left.offset, left.base, left.index, left.scale); - return Jump(m_assembler.jCC(x86Condition(cond))); - } - Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) { m_assembler.testl_rr(reg, mask); @@ -902,8 +1132,6 @@ public: // if we are only interested in the low seven bits, this can be tested with a testb if (mask.m_value == -1) m_assembler.testl_rr(reg, reg); - else if ((mask.m_value & ~0x7f) == 0) - m_assembler.testb_i8r(mask.m_value, reg); else m_assembler.testl_i32r(mask.m_value, reg); return Jump(m_assembler.jCC(x86Condition(cond))); @@ -927,17 +1155,6 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchTest8(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) - { - // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values. - ASSERT(mask.m_value >= -128 && mask.m_value <= 255); - if (mask.m_value == -1) - m_assembler.testb_rr(reg, reg); - else - m_assembler.testb_i8r(mask.m_value, reg); - return Jump(m_assembler.jCC(x86Condition(cond))); - } - Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { // Byte in TrustedImm32 is not well defined, so be a little permisive here, but don't accept nonsense values. @@ -960,6 +1177,14 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } + Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) + { + ASSERT(!(right.m_value & 0xFFFFFF00)); + + m_assembler.cmpb_im(right.m_value, left.offset, left.base, left.index, left.scale); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + Jump jump() { return Jump(m_assembler.jmp()); @@ -1148,11 +1373,16 @@ public: m_assembler.ret(); } + void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) + { + m_assembler.cmpb_im(right.m_value, left.offset, left.base); + set32(x86Condition(cond), dest); + } + void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) { m_assembler.cmpl_rr(right, left); - m_assembler.setCC_r(x86Condition(cond), dest); - m_assembler.movzbl_rr(dest, dest); + set32(x86Condition(cond), dest); } void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) @@ -1161,12 +1391,11 @@ public: m_assembler.testl_rr(left, left); else m_assembler.cmpl_ir(right.m_value, left); - m_assembler.setCC_r(x86Condition(cond), dest); - m_assembler.movzbl_rr(dest, dest); + set32(x86Condition(cond), dest); } // FIXME: - // The mask should be optional... paerhaps the argument order should be + // The mask should be optional... perhaps the argument order should be // dest-src, operations always have a dest? ... possibly not true, considering // asm ops like test, or pseudo ops like pop(). @@ -1176,8 +1405,7 @@ public: m_assembler.cmpb_im(0, address.offset, address.base); else m_assembler.testb_im(mask.m_value, address.offset, address.base); - m_assembler.setCC_r(x86Condition(cond), dest); - m_assembler.movzbl_rr(dest, dest); + set32(x86Condition(cond), dest); } void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) @@ -1186,8 +1414,7 @@ public: m_assembler.cmpl_im(0, address.offset, address.base); else m_assembler.testl_i32m(mask.m_value, address.offset, address.base); - m_assembler.setCC_r(x86Condition(cond), dest); - m_assembler.movzbl_rr(dest, dest); + set32(x86Condition(cond), dest); } // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc. @@ -1196,28 +1423,19 @@ public: return static_cast(cond ^ 1); } - // Commute a relational condition, returns a new condition that will produce - // the same results given the same inputs but with their positions exchanged. - static RelationalCondition commute(RelationalCondition cond) + void nop() { - // Equality is commutative! - if (cond == Equal || cond == NotEqual) - return cond; - - // Based on the values of x86 condition codes, remap > with < and >= with <= - if (cond >= LessThan) { - ASSERT(cond == LessThan || cond == LessThanOrEqual || cond == GreaterThan || cond == GreaterThanOrEqual); - return static_cast(X86Assembler::ConditionL + X86Assembler::ConditionG - cond); - } + m_assembler.nop(); + } - // As above, for unsigned conditions. - ASSERT(cond == Below || cond == BelowOrEqual || cond == Above || cond == AboveOrEqual); - return static_cast(X86Assembler::ConditionB + X86Assembler::ConditionA - cond); + static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) + { + X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress()); } - void nop() + static ptrdiff_t maxJumpReplacementSize() { - m_assembler.nop(); + return X86Assembler::maxJumpReplacementSize(); } protected: @@ -1231,6 +1449,23 @@ protected: return static_cast(cond); } + void set32(X86Assembler::Condition cond, RegisterID dest) + { +#if CPU(X86) + // On 32-bit x86 we can only set the first 4 registers; + // esp..edi are mapped to the 'h' registers! + if (dest >= 4) { + m_assembler.xchgl_rr(dest, X86Registers::eax); + m_assembler.setCC_r(cond, X86Registers::eax); + m_assembler.movzbl_rr(X86Registers::eax, X86Registers::eax); + m_assembler.xchgl_rr(dest, X86Registers::eax); + return; + } +#endif + m_assembler.setCC_r(cond, dest); + m_assembler.movzbl_rr(dest, dest); + } + private: // Only MacroAssemblerX86 should be using the following method; SSE2 is always available on // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.