X-Git-Url: https://git.saurik.com/apple/javascriptcore.git/blobdiff_plain/f9bf01c6616d5ddcf65b13b33cedf9e387ff7a63..2d39b0e377c0896910ee49ae70082ba665faf986:/assembler/MacroAssemblerX86_64.h diff --git a/assembler/MacroAssemblerX86_64.h b/assembler/MacroAssemblerX86_64.h index ec93f8c..5f92261 100644 --- a/assembler/MacroAssemblerX86_64.h +++ b/assembler/MacroAssemblerX86_64.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,270 +26,490 @@ #ifndef MacroAssemblerX86_64_h #define MacroAssemblerX86_64_h -#include - #if ENABLE(ASSEMBLER) && CPU(X86_64) #include "MacroAssemblerX86Common.h" +#if USE(MASM_PROBE) +#include +#endif + #define REPTACH_OFFSET_CALL_R11 3 +inline bool CAN_SIGN_EXTEND_32_64(int64_t value) { return value == (int64_t)(int32_t)value; } + namespace JSC { class MacroAssemblerX86_64 : public MacroAssemblerX86Common { -protected: - static const X86Registers::RegisterID scratchRegister = X86Registers::r11; - public: static const Scale ScalePtr = TimesEight; using MacroAssemblerX86Common::add32; using MacroAssemblerX86Common::and32; + using MacroAssemblerX86Common::branchAdd32; using MacroAssemblerX86Common::or32; using MacroAssemblerX86Common::sub32; + using MacroAssemblerX86Common::load8; using MacroAssemblerX86Common::load32; using MacroAssemblerX86Common::store32; + using MacroAssemblerX86Common::store8; using MacroAssemblerX86Common::call; + using MacroAssemblerX86Common::jump; + using MacroAssemblerX86Common::addDouble; using MacroAssemblerX86Common::loadDouble; using MacroAssemblerX86Common::convertInt32ToDouble; - void add32(Imm32 imm, AbsoluteAddress address) + void add32(TrustedImm32 imm, AbsoluteAddress address) { - move(ImmPtr(address.m_ptr), scratchRegister); + move(TrustedImmPtr(address.m_ptr), scratchRegister); add32(imm, Address(scratchRegister)); } - void and32(Imm32 imm, AbsoluteAddress address) + void and32(TrustedImm32 imm, AbsoluteAddress address) { - move(ImmPtr(address.m_ptr), scratchRegister); + move(TrustedImmPtr(address.m_ptr), scratchRegister); and32(imm, Address(scratchRegister)); } - void or32(Imm32 imm, AbsoluteAddress address) + void add32(AbsoluteAddress address, RegisterID dest) { - move(ImmPtr(address.m_ptr), scratchRegister); + move(TrustedImmPtr(address.m_ptr), scratchRegister); + add32(Address(scratchRegister), dest); + } + + void or32(TrustedImm32 imm, AbsoluteAddress address) + { + move(TrustedImmPtr(address.m_ptr), scratchRegister); or32(imm, Address(scratchRegister)); } - void sub32(Imm32 imm, AbsoluteAddress address) + void or32(RegisterID reg, AbsoluteAddress address) { - move(ImmPtr(address.m_ptr), scratchRegister); + move(TrustedImmPtr(address.m_ptr), scratchRegister); + or32(reg, Address(scratchRegister)); + } + + void sub32(TrustedImm32 imm, AbsoluteAddress address) + { + move(TrustedImmPtr(address.m_ptr), scratchRegister); sub32(imm, Address(scratchRegister)); } + + void load8(const void* address, RegisterID dest) + { + move(TrustedImmPtr(address), dest); + load8(dest, dest); + } - void load32(void* address, RegisterID dest) + void load32(const void* address, RegisterID dest) { if (dest == X86Registers::eax) m_assembler.movl_mEAX(address); else { - move(X86Registers::eax, dest); - m_assembler.movl_mEAX(address); - swap(X86Registers::eax, dest); + move(TrustedImmPtr(address), dest); + load32(dest, dest); } } - void loadDouble(void* address, FPRegisterID dest) + void addDouble(AbsoluteAddress address, FPRegisterID dest) { - move(ImmPtr(address), scratchRegister); - loadDouble(scratchRegister, dest); + move(TrustedImmPtr(address.m_ptr), scratchRegister); + m_assembler.addsd_mr(0, scratchRegister, dest); } - void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) + void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest) { - move(Imm32(*static_cast(src.m_ptr)), scratchRegister); + move(imm, scratchRegister); m_assembler.cvtsi2sd_rr(scratchRegister, dest); } - void store32(Imm32 imm, void* address) + void store32(TrustedImm32 imm, void* address) { - move(X86Registers::eax, scratchRegister); - move(imm, X86Registers::eax); - m_assembler.movl_EAXm(address); - move(scratchRegister, X86Registers::eax); + move(TrustedImmPtr(address), scratchRegister); + store32(imm, scratchRegister); } + void store32(RegisterID source, void* address) + { + if (source == X86Registers::eax) + m_assembler.movl_EAXm(address); + else { + move(TrustedImmPtr(address), scratchRegister); + store32(source, scratchRegister); + } + } + + void store8(TrustedImm32 imm, void* address) + { + move(TrustedImmPtr(address), scratchRegister); + store8(imm, Address(scratchRegister)); + } + + void store8(RegisterID reg, void* address) + { + move(TrustedImmPtr(address), scratchRegister); + store8(reg, Address(scratchRegister)); + } + +#if OS(WINDOWS) + Call callWithSlowPathReturnType() + { + // On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value. + // On entry, rcx should contain a pointer to this stack space. The other parameters are shifted to the right, + // rdx should contain the first argument, r8 should contain the second argument, and r9 should contain the third argument. + // On return, rax contains a pointer to this stack value. See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx. + // We then need to copy the 16 byte return value into rax and rdx, since JIT expects the return value to be split between the two. + // It is assumed that the parameters are already shifted to the right, when entering this method. + // Note: this implementation supports up to 3 parameters. + + // JIT relies on the CallerFrame (frame pointer) being put on the stack, + // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit. + // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer. + store64(X86Registers::ebp, Address(X86Registers::esp, -16)); + + // We also need to allocate the shadow space on the stack for the 4 parameter registers. + // In addition, we need to allocate 16 bytes for the return value. + // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated). + sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); + + // The first parameter register should contain a pointer to the stack allocated space for the return value. + move(X86Registers::esp, X86Registers::ecx); + add64(TrustedImm32(4 * sizeof(int64_t)), X86Registers::ecx); + + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister); + Call result = Call(m_assembler.call(scratchRegister), Call::Linkable); + + add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); + + // Copy the return value into rax and rdx. + load64(Address(X86Registers::eax, sizeof(int64_t)), X86Registers::edx); + load64(Address(X86Registers::eax), X86Registers::eax); + + ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11); + return result; + } +#endif + Call call() { - DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister); +#if OS(WINDOWS) + // JIT relies on the CallerFrame (frame pointer) being put on the stack, + // On Win64 we need to manually copy the frame pointer to the stack, since MSVC may not maintain a frame pointer on 64-bit. + // See http://msdn.microsoft.com/en-us/library/9z1stfyw.aspx where it's stated that rbp MAY be used as a frame pointer. + store64(X86Registers::ebp, Address(X86Registers::esp, -16)); + + // On Windows we need to copy the arguments that don't fit in registers to the stack location where the callee expects to find them. + // We don't know the number of arguments at this point, so the arguments (5, 6, ...) should always be copied. + + // Copy argument 5 + load64(Address(X86Registers::esp, 4 * sizeof(int64_t)), scratchRegister); + store64(scratchRegister, Address(X86Registers::esp, -4 * sizeof(int64_t))); + + // Copy argument 6 + load64(Address(X86Registers::esp, 5 * sizeof(int64_t)), scratchRegister); + store64(scratchRegister, Address(X86Registers::esp, -3 * sizeof(int64_t))); + + // We also need to allocate the shadow space on the stack for the 4 parameter registers. + // Also, we should allocate 16 bytes for the frame pointer, and return address (not populated). + // In addition, we need to allocate 16 bytes for two more parameters, since the call can have up to 6 parameters. + sub64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); +#endif + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister); Call result = Call(m_assembler.call(scratchRegister), Call::Linkable); - ASSERT(differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11); +#if OS(WINDOWS) + add64(TrustedImm32(8 * sizeof(int64_t)), X86Registers::esp); +#endif + ASSERT_UNUSED(label, differenceBetween(label, result) == REPTACH_OFFSET_CALL_R11); return result; } + // Address is a memory location containing the address to jump to + void jump(AbsoluteAddress address) + { + move(TrustedImmPtr(address.m_ptr), scratchRegister); + jump(Address(scratchRegister)); + } + Call tailRecursiveCall() { - DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister); + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister); Jump newJump = Jump(m_assembler.jmp_r(scratchRegister)); - ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); + ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); return Call::fromTailJump(newJump); } Call makeTailRecursiveCall(Jump oldJump) { oldJump.link(this); - DataLabelPtr label = moveWithPatch(ImmPtr(0), scratchRegister); + DataLabelPtr label = moveWithPatch(TrustedImmPtr(0), scratchRegister); Jump newJump = Jump(m_assembler.jmp_r(scratchRegister)); - ASSERT(differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); + ASSERT_UNUSED(label, differenceBetween(label, newJump) == REPTACH_OFFSET_CALL_R11); return Call::fromTailJump(newJump); } + Jump branchAdd32(ResultCondition cond, TrustedImm32 src, AbsoluteAddress dest) + { + move(TrustedImmPtr(dest.m_ptr), scratchRegister); + add32(src, Address(scratchRegister)); + return Jump(m_assembler.jCC(x86Condition(cond))); + } - void addPtr(RegisterID src, RegisterID dest) + void add64(RegisterID src, RegisterID dest) { m_assembler.addq_rr(src, dest); } + + void add64(Address src, RegisterID dest) + { + m_assembler.addq_mr(src.offset, src.base, dest); + } - void addPtr(Imm32 imm, RegisterID srcDest) + void add64(AbsoluteAddress src, RegisterID dest) { - m_assembler.addq_ir(imm.m_value, srcDest); + move(TrustedImmPtr(src.m_ptr), scratchRegister); + add64(Address(scratchRegister), dest); } - void addPtr(ImmPtr imm, RegisterID dest) + void add64(TrustedImm32 imm, RegisterID srcDest) { - move(imm, scratchRegister); - m_assembler.addq_rr(scratchRegister, dest); + if (imm.m_value == 1) + m_assembler.incq_r(srcDest); + else + m_assembler.addq_ir(imm.m_value, srcDest); + } + + void add64(TrustedImm64 imm, RegisterID dest) + { + if (imm.m_value == 1) + m_assembler.incq_r(dest); + else { + move(imm, scratchRegister); + add64(scratchRegister, dest); + } } - void addPtr(Imm32 imm, RegisterID src, RegisterID dest) + void add64(TrustedImm32 imm, RegisterID src, RegisterID dest) { m_assembler.leaq_mr(imm.m_value, src, dest); } - void addPtr(Imm32 imm, Address address) + void add64(TrustedImm32 imm, Address address) { m_assembler.addq_im(imm.m_value, address.offset, address.base); } - void addPtr(Imm32 imm, AbsoluteAddress address) + void add64(TrustedImm32 imm, AbsoluteAddress address) { - move(ImmPtr(address.m_ptr), scratchRegister); - addPtr(imm, Address(scratchRegister)); + move(TrustedImmPtr(address.m_ptr), scratchRegister); + add64(imm, Address(scratchRegister)); } - - void andPtr(RegisterID src, RegisterID dest) + + void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest) + { + m_assembler.leaq_mr(imm.m_value, srcDest, srcDest); + } + + void and64(RegisterID src, RegisterID dest) { m_assembler.andq_rr(src, dest); } - void andPtr(Imm32 imm, RegisterID srcDest) + void and64(TrustedImm32 imm, RegisterID srcDest) { m_assembler.andq_ir(imm.m_value, srcDest); } - void orPtr(RegisterID src, RegisterID dest) + void and64(TrustedImmPtr imm, RegisterID srcDest) + { + move(imm, scratchRegister); + and64(scratchRegister, srcDest); + } + + void lshift64(TrustedImm32 imm, RegisterID dest) + { + m_assembler.shlq_i8r(imm.m_value, dest); + } + + void rshift64(TrustedImm32 imm, RegisterID dest) + { + m_assembler.sarq_i8r(imm.m_value, dest); + } + + void mul64(RegisterID src, RegisterID dest) + { + m_assembler.imulq_rr(src, dest); + } + + void neg64(RegisterID dest) + { + m_assembler.negq_r(dest); + } + + void or64(RegisterID src, RegisterID dest) { m_assembler.orq_rr(src, dest); } - void orPtr(ImmPtr imm, RegisterID dest) + void or64(TrustedImm64 imm, RegisterID dest) { move(imm, scratchRegister); - m_assembler.orq_rr(scratchRegister, dest); + or64(scratchRegister, dest); } - void orPtr(Imm32 imm, RegisterID dest) + void or64(TrustedImm32 imm, RegisterID dest) { m_assembler.orq_ir(imm.m_value, dest); } - void subPtr(RegisterID src, RegisterID dest) + void or64(RegisterID op1, RegisterID op2, RegisterID dest) + { + if (op1 == op2) + move(op1, dest); + else if (op1 == dest) + or64(op2, dest); + else { + move(op2, dest); + or64(op1, dest); + } + } + + void or64(TrustedImm32 imm, RegisterID src, RegisterID dest) + { + move(src, dest); + or64(imm, dest); + } + + void rotateRight64(TrustedImm32 imm, RegisterID srcDst) + { + m_assembler.rorq_i8r(imm.m_value, srcDst); + } + + void sub64(RegisterID src, RegisterID dest) { m_assembler.subq_rr(src, dest); } - void subPtr(Imm32 imm, RegisterID dest) + void sub64(TrustedImm32 imm, RegisterID dest) { - m_assembler.subq_ir(imm.m_value, dest); + if (imm.m_value == 1) + m_assembler.decq_r(dest); + else + m_assembler.subq_ir(imm.m_value, dest); } - void subPtr(ImmPtr imm, RegisterID dest) + void sub64(TrustedImm64 imm, RegisterID dest) { - move(imm, scratchRegister); - m_assembler.subq_rr(scratchRegister, dest); + if (imm.m_value == 1) + m_assembler.decq_r(dest); + else { + move(imm, scratchRegister); + sub64(scratchRegister, dest); + } } - void xorPtr(RegisterID src, RegisterID dest) + void xor64(RegisterID src, RegisterID dest) { m_assembler.xorq_rr(src, dest); } + + void xor64(RegisterID src, Address dest) + { + m_assembler.xorq_rm(src, dest.offset, dest.base); + } - void xorPtr(Imm32 imm, RegisterID srcDest) + void xor64(TrustedImm32 imm, RegisterID srcDest) { m_assembler.xorq_ir(imm.m_value, srcDest); } - - void loadPtr(ImplicitAddress address, RegisterID dest) + void load64(ImplicitAddress address, RegisterID dest) { m_assembler.movq_mr(address.offset, address.base, dest); } - void loadPtr(BaseIndex address, RegisterID dest) + void load64(BaseIndex address, RegisterID dest) { m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest); } - void loadPtr(void* address, RegisterID dest) + void load64(const void* address, RegisterID dest) { if (dest == X86Registers::eax) m_assembler.movq_mEAX(address); else { - move(X86Registers::eax, dest); - m_assembler.movq_mEAX(address); - swap(X86Registers::eax, dest); + move(TrustedImmPtr(address), dest); + load64(dest, dest); } } - DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest) + DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest) { + padBeforePatch(); m_assembler.movq_mr_disp32(address.offset, address.base, dest); return DataLabel32(this); } + + DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest) + { + padBeforePatch(); + m_assembler.movq_mr_disp8(address.offset, address.base, dest); + return DataLabelCompact(this); + } - void storePtr(RegisterID src, ImplicitAddress address) + void store64(RegisterID src, ImplicitAddress address) { m_assembler.movq_rm(src, address.offset, address.base); } - void storePtr(RegisterID src, BaseIndex address) + void store64(RegisterID src, BaseIndex address) { m_assembler.movq_rm(src, address.offset, address.base, address.index, address.scale); } - void storePtr(RegisterID src, void* address) + void store64(RegisterID src, void* address) { if (src == X86Registers::eax) m_assembler.movq_EAXm(address); else { - swap(X86Registers::eax, src); - m_assembler.movq_EAXm(address); - swap(X86Registers::eax, src); + move(TrustedImmPtr(address), scratchRegister); + store64(src, scratchRegister); } } - void storePtr(ImmPtr imm, ImplicitAddress address) + void store64(TrustedImm64 imm, ImplicitAddress address) { - move(imm, scratchRegister); - storePtr(scratchRegister, address); + if (CAN_SIGN_EXTEND_32_64(imm.m_value)) + m_assembler.movq_i32m(static_cast(imm.m_value), address.offset, address.base); + else { + move(imm, scratchRegister); + store64(scratchRegister, address); + } } - DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address) + void store64(TrustedImm64 imm, BaseIndex address) { + move(imm, scratchRegister); + m_assembler.movq_rm(scratchRegister, address.offset, address.base, address.index, address.scale); + } + + DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address) + { + padBeforePatch(); m_assembler.movq_rm_disp32(src, address.offset, address.base); return DataLabel32(this); } - void movePtrToDouble(RegisterID src, FPRegisterID dest) + void move64ToDouble(RegisterID src, FPRegisterID dest) { m_assembler.movq_rr(src, dest); } - void moveDoubleToPtr(FPRegisterID src, RegisterID dest) + void moveDoubleTo64(FPRegisterID src, RegisterID dest) { m_assembler.movq_rr(src, dest); } - void setPtr(Condition cond, RegisterID left, Imm32 right, RegisterID dest) + void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) m_assembler.testq_rr(left, left); @@ -298,50 +518,78 @@ public: m_assembler.setCC_r(x86Condition(cond), dest); m_assembler.movzbl_rr(dest, dest); } - - Jump branchPtr(Condition cond, RegisterID left, RegisterID right) + + void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) + { + m_assembler.cmpq_rr(right, left); + m_assembler.setCC_r(x86Condition(cond), dest); + m_assembler.movzbl_rr(dest, dest); + } + + Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right) { m_assembler.cmpq_rr(right, left); return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchPtr(Condition cond, RegisterID left, ImmPtr right) + Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right) { + if (((cond == Equal) || (cond == NotEqual)) && !right.m_value) { + m_assembler.testq_rr(left, left); + return Jump(m_assembler.jCC(x86Condition(cond))); + } move(right, scratchRegister); - return branchPtr(cond, left, scratchRegister); + return branch64(cond, left, scratchRegister); } - Jump branchPtr(Condition cond, RegisterID left, Address right) + Jump branch64(RelationalCondition cond, RegisterID left, Address right) { m_assembler.cmpq_mr(right.offset, right.base, left); return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchPtr(Condition cond, AbsoluteAddress left, RegisterID right) + Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right) { - move(ImmPtr(left.m_ptr), scratchRegister); - return branchPtr(cond, Address(scratchRegister), right); + move(TrustedImmPtr(left.m_ptr), scratchRegister); + return branch64(cond, Address(scratchRegister), right); } - Jump branchPtr(Condition cond, Address left, RegisterID right) + Jump branch64(RelationalCondition cond, Address left, RegisterID right) { m_assembler.cmpq_rm(right, left.offset, left.base); return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchPtr(Condition cond, Address left, ImmPtr right) + Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right) + { + move(right, scratchRegister); + return branch64(cond, left, scratchRegister); + } + + Jump branch64(RelationalCondition cond, BaseIndex address, RegisterID right) + { + m_assembler.cmpq_rm(right, address.offset, address.base, address.index, address.scale); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + + Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right) + { + return branch64(cond, left, right); + } + + Jump branchPtr(RelationalCondition cond, BaseIndex left, TrustedImmPtr right) { move(right, scratchRegister); return branchPtr(cond, left, scratchRegister); } - Jump branchTestPtr(Condition cond, RegisterID reg, RegisterID mask) + Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask) { m_assembler.testq_rr(reg, mask); return Jump(m_assembler.jCC(x86Condition(cond))); } - - Jump branchTestPtr(Condition cond, RegisterID reg, Imm32 mask = Imm32(-1)) + + Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) { // if we are only interested in the low seven bits, this can be tested with a testb if (mask.m_value == -1) @@ -353,7 +601,30 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchTestPtr(Condition cond, Address address, Imm32 mask = Imm32(-1)) + void test64(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest) + { + if (mask.m_value == -1) + m_assembler.testq_rr(reg, reg); + else if ((mask.m_value & ~0x7f) == 0) + m_assembler.testb_i8r(mask.m_value, reg); + else + m_assembler.testq_i32r(mask.m_value, reg); + set32(x86Condition(cond), dest); + } + + void test64(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest) + { + m_assembler.testq_rr(reg, mask); + set32(x86Condition(cond), dest); + } + + Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) + { + load64(address.m_ptr, scratchRegister); + return branchTest64(cond, scratchRegister, mask); + } + + Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) { if (mask.m_value == -1) m_assembler.cmpq_im(0, address.offset, address.base); @@ -362,7 +633,13 @@ public: return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchTestPtr(Condition cond, BaseIndex address, Imm32 mask = Imm32(-1)) + Jump branchTest64(ResultCondition cond, Address address, RegisterID reg) + { + m_assembler.testq_rm(reg, address.offset, address.base); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + + Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) { if (mask.m_value == -1) m_assembler.cmpq_im(0, address.offset, address.base, address.index, address.scale); @@ -372,55 +649,217 @@ public: } - Jump branchAddPtr(Condition cond, RegisterID src, RegisterID dest) + Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + add64(imm, dest); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + + Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest) { - ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); - addPtr(src, dest); + add64(src, dest); return Jump(m_assembler.jCC(x86Condition(cond))); } - Jump branchSubPtr(Condition cond, Imm32 imm, RegisterID dest) + Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest) { - ASSERT((cond == Overflow) || (cond == Zero) || (cond == NonZero)); - subPtr(imm, dest); + mul64(src, dest); + if (cond != Overflow) + m_assembler.testq_rr(dest, dest); return Jump(m_assembler.jCC(x86Condition(cond))); } - DataLabelPtr moveWithPatch(ImmPtr initialValue, RegisterID dest) + Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest) + { + sub64(imm, dest); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + + Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest) + { + sub64(src, dest); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + + Jump branchSub64(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest) + { + move(src1, dest); + return branchSub64(cond, src2, dest); + } + + Jump branchNeg64(ResultCondition cond, RegisterID srcDest) + { + neg64(srcDest); + return Jump(m_assembler.jCC(x86Condition(cond))); + } + + void abortWithReason(AbortReason reason) + { + move(TrustedImm32(reason), X86Registers::r11); + breakpoint(); + } + + void abortWithReason(AbortReason reason, intptr_t misc) + { + move(TrustedImm64(misc), X86Registers::r10); + abortWithReason(reason); + } + + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) + { + ConvertibleLoadLabel result = ConvertibleLoadLabel(this); + m_assembler.movq_mr(address.offset, address.base, dest); + return result; + } + + DataLabelPtr moveWithPatch(TrustedImmPtr initialValue, RegisterID dest) { + padBeforePatch(); m_assembler.movq_i64r(initialValue.asIntptr(), dest); return DataLabelPtr(this); } - Jump branchPtrWithPatch(Condition cond, RegisterID left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) + DataLabelPtr moveWithPatch(TrustedImm32 initialValue, RegisterID dest) + { + padBeforePatch(); + m_assembler.movq_i64r(initialValue.m_value, dest); + return DataLabelPtr(this); + } + + Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { dataLabel = moveWithPatch(initialRightValue, scratchRegister); - return branchPtr(cond, left, scratchRegister); + return branch64(cond, left, scratchRegister); } - Jump branchPtrWithPatch(Condition cond, Address left, DataLabelPtr& dataLabel, ImmPtr initialRightValue = ImmPtr(0)) + Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) { dataLabel = moveWithPatch(initialRightValue, scratchRegister); - return branchPtr(cond, left, scratchRegister); + return branch64(cond, left, scratchRegister); + } + + Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0)) + { + padBeforePatch(); + m_assembler.movl_i32r(initialRightValue.m_value, scratchRegister); + dataLabel = DataLabel32(this); + return branch32(cond, left, scratchRegister); } - DataLabelPtr storePtrWithPatch(ImmPtr initialValue, ImplicitAddress address) + DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) { DataLabelPtr label = moveWithPatch(initialValue, scratchRegister); - storePtr(scratchRegister, address); + store64(scratchRegister, address); return label; } + + using MacroAssemblerX86Common::branch8; + Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) + { + MacroAssemblerX86Common::move(TrustedImmPtr(left.m_ptr), scratchRegister); + return MacroAssemblerX86Common::branch8(cond, Address(scratchRegister), right); + } + + using MacroAssemblerX86Common::branchTest8; + Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1)) + { + TrustedImmPtr addr(reinterpret_cast(address.offset)); + MacroAssemblerX86Common::move(addr, scratchRegister); + return MacroAssemblerX86Common::branchTest8(cond, BaseIndex(scratchRegister, address.base, TimesOne), mask); + } + + Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) + { + MacroAssemblerX86Common::move(TrustedImmPtr(address.m_ptr), scratchRegister); + return MacroAssemblerX86Common::branchTest8(cond, Address(scratchRegister), mask); + } - Label loadPtrWithPatchToLEA(Address address, RegisterID dest) + void convertInt64ToDouble(RegisterID src, FPRegisterID dest) { - Label label(this); - loadPtr(address, dest); - return label; + m_assembler.cvtsi2sdq_rr(src, dest); } - bool supportsFloatingPoint() const { return true; } + static bool supportsFloatingPoint() { return true; } // See comment on MacroAssemblerARMv7::supportsFloatingPointTruncate() - bool supportsFloatingPointTruncate() const { return true; } + static bool supportsFloatingPointTruncate() { return true; } + static bool supportsFloatingPointSqrt() { return true; } + static bool supportsFloatingPointAbs() { return true; } + + static FunctionPtr readCallTarget(CodeLocationCall call) + { + return FunctionPtr(X86Assembler::readPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation())); + } + + static bool haveScratchRegisterForBlinding() { return true; } + static RegisterID scratchRegisterForBlinding() { return scratchRegister; } + + static bool canJumpReplacePatchableBranchPtrWithPatch() { return true; } + static bool canJumpReplacePatchableBranch32WithPatch() { return true; } + + static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) + { + const int rexBytes = 1; + const int opcodeBytes = 1; + const int immediateBytes = 8; + const int totalBytes = rexBytes + opcodeBytes + immediateBytes; + ASSERT(totalBytes >= maxJumpReplacementSize()); + return label.labelAtOffset(-totalBytes); + } + + static CodeLocationLabel startOfBranch32WithPatchOnRegister(CodeLocationDataLabel32 label) + { + const int rexBytes = 1; + const int opcodeBytes = 1; + const int immediateBytes = 4; + const int totalBytes = rexBytes + opcodeBytes + immediateBytes; + ASSERT(totalBytes >= maxJumpReplacementSize()); + return label.labelAtOffset(-totalBytes); + } + + static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr label) + { + return startOfBranchPtrWithPatchOnRegister(label); + } + + static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32 label) + { + return startOfBranch32WithPatchOnRegister(label); + } + + static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel instructionStart, Address, void* initialValue) + { + X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast(initialValue), scratchRegister); + } + + static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel instructionStart, Address, int32_t initialValue) + { + X86Assembler::revertJumpTo_movl_i32r(instructionStart.executableAddress(), initialValue, scratchRegister); + } + + static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue) + { + X86Assembler::revertJumpTo_movq_i64r(instructionStart.executableAddress(), reinterpret_cast(initialValue), scratchRegister); + } + +#if USE(MASM_PROBE) + // This function emits code to preserve the CPUState (e.g. registers), + // call a user supplied probe function, and restore the CPUState before + // continuing with other JIT generated code. + // + // The user supplied probe function will be called with a single pointer to + // a ProbeContext struct (defined above) which contains, among other things, + // the preserved CPUState. This allows the user probe function to inspect + // the CPUState at that point in the JIT generated code. + // + // If the user probe function alters the register values in the ProbeContext, + // the altered values will be loaded into the CPU registers when the probe + // returns. + // + // The ProbeContext is stack allocated and is only valid for the duration + // of the call to the user probe function. + + void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0); +#endif // USE(MASM_PROBE) private: friend class LinkBuffer; @@ -429,9 +868,9 @@ private: static void linkCall(void* code, Call call, FunctionPtr function) { if (!call.isFlagSet(Call::Near)) - X86Assembler::linkPointer(code, X86Assembler::labelFor(call.m_jmp, -REPTACH_OFFSET_CALL_R11), function.value()); + X86Assembler::linkPointer(code, call.m_label.labelAtOffset(-REPTACH_OFFSET_CALL_R11), function.value()); else - X86Assembler::linkCall(code, call.m_jmp, function.value()); + X86Assembler::linkCall(code, call.m_label, function.value()); } static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) @@ -444,8 +883,68 @@ private: X86Assembler::repatchPointer(call.dataLabelPtrAtOffset(-REPTACH_OFFSET_CALL_R11).dataLocation(), destination.executableAddress()); } +#if USE(MASM_PROBE) + inline TrustedImm64 trustedImm64FromPtr(void* ptr) + { + return TrustedImm64(TrustedImmPtr(ptr)); + } + + inline TrustedImm64 trustedImm64FromPtr(ProbeFunction function) + { + return TrustedImm64(TrustedImmPtr(reinterpret_cast(function))); + } + + inline TrustedImm64 trustedImm64FromPtr(void (*function)()) + { + return TrustedImm64(TrustedImmPtr(reinterpret_cast(function))); + } +#endif }; +#if USE(MASM_PROBE) + +extern "C" void ctiMasmProbeTrampoline(); + +// What code is emitted for the probe? +// ================================== +// We want to keep the size of the emitted probe invocation code as compact as +// possible to minimize the perturbation to the JIT generated code. However, +// we also need to preserve the CPU registers and set up the ProbeContext to be +// passed to the user probe function. +// +// Hence, we do only the minimum here to preserve a scratch register (i.e. rax +// in this case) and the stack pointer (i.e. rsp), and pass the probe arguments. +// We'll let the ctiMasmProbeTrampoline handle the rest of the probe invocation +// work i.e. saving the CPUState (and setting up the ProbeContext), calling the +// user probe function, and restoring the CPUState before returning to JIT +// generated code. +// +// What values are in the saved registers? +// ====================================== +// Conceptually, the saved registers should contain values as if the probe +// is not present in the JIT generated code. Hence, they should contain values +// that are expected at the start of the instruction immediately following the +// probe. +// +// Specifcally, the saved stack pointer register will point to the stack +// position before we push the ProbeContext frame. The saved rip will point to +// the address of the instruction immediately following the probe. + +inline void MacroAssemblerX86_64::probe(MacroAssemblerX86_64::ProbeFunction function, void* arg1, void* arg2) +{ + push(RegisterID::esp); + push(RegisterID::eax); + move(trustedImm64FromPtr(arg2), RegisterID::eax); + push(RegisterID::eax); + move(trustedImm64FromPtr(arg1), RegisterID::eax); + push(RegisterID::eax); + move(trustedImm64FromPtr(function), RegisterID::eax); + push(RegisterID::eax); + move(trustedImm64FromPtr(ctiMasmProbeTrampoline), RegisterID::eax); + call(RegisterID::eax); +} +#endif // USE(MASM_PROBE) + } // namespace JSC #endif // ENABLE(ASSEMBLER)