]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - assembler/MacroAssemblerX86Common.h
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssemblerX86Common.h
index e398dcdad29901b8e495262dee06015a55a6ff38..b3b50748a96c98b3c4e862e709466c5aa055d524 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2014 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
 
 namespace JSC {
 
-class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler> {
-protected:
+class MacroAssemblerX86Common : public AbstractMacroAssembler<X86Assembler, MacroAssemblerX86Common> {
+public:
 #if CPU(X86_64)
     static const X86Registers::RegisterID scratchRegister = X86Registers::r11;
 #endif
 
+protected:
     static const int DoubleConditionBitInvert = 0x10;
     static const int DoubleConditionBitSpecial = 0x20;
     static const int DoubleConditionBits = DoubleConditionBitInvert | DoubleConditionBitSpecial;
 
 public:
-    typedef X86Assembler::FPRegisterID FPRegisterID;
     typedef X86Assembler::XMMRegisterID XMMRegisterID;
     
-    static const int MaximumCompactPtrAlignedAddressOffset = 127;
+    static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
+    {
+        return value >= -128 && value <= 127;
+    }
 
     enum RelationalCondition {
         Equal = X86Assembler::ConditionE,
@@ -65,6 +68,7 @@ public:
     enum ResultCondition {
         Overflow = X86Assembler::ConditionO,
         Signed = X86Assembler::ConditionS,
+        PositiveOrZero = X86Assembler::ConditionNS,
         Zero = X86Assembler::ConditionE,
         NonZero = X86Assembler::ConditionNE
     };
@@ -90,13 +94,11 @@ public:
         DoubleConditionBits_should_not_interfere_with_X86Assembler_Condition_codes);
 
     static const RegisterID stackPointerRegister = X86Registers::esp;
-
-#if ENABLE(JIT_CONSTANT_BLINDING)
+    static const RegisterID framePointerRegister = X86Registers::ebp;
+    
+    static bool canBlind() { return true; }
     static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
-#if CPU(X86_64)
-    static bool shouldBlindForSpecificArch(uintptr_t value) { return value >= 0x00ffffff; }
-#endif
-#endif
+    static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
 
     // Integer arithmetic operations:
     //
@@ -117,7 +119,10 @@ public:
 
     void add32(TrustedImm32 imm, RegisterID dest)
     {
-        m_assembler.addl_ir(imm.m_value, dest);
+        if (imm.m_value == 1)
+            m_assembler.inc_r(dest);
+        else
+            m_assembler.addl_ir(imm.m_value, dest);
     }
     
     void add32(Address src, RegisterID dest)
@@ -178,6 +183,18 @@ public:
         and32(imm, dest);
     }
 
+    void countLeadingZeros32(RegisterID src, RegisterID dst)
+    {
+        m_assembler.bsr_rr(src, dst);
+        Jump srcIsNonZero = m_assembler.jCC(x86Condition(NonZero));
+        move(TrustedImm32(32), dst);
+
+        Jump skipNonZeroCase = jump();
+        srcIsNonZero.link(this);
+        xor32(TrustedImm32(0x1f), dst);
+        skipNonZeroCase.link(this);
+    }
+
     void lshift32(RegisterID shift_amount, RegisterID dest)
     {
         ASSERT(shift_amount != dest);
@@ -364,7 +381,10 @@ public:
     
     void sub32(TrustedImm32 imm, RegisterID dest)
     {
-        m_assembler.subl_ir(imm.m_value, dest);
+        if (imm.m_value == 1)
+            m_assembler.dec_r(dest);
+        else
+            m_assembler.subl_ir(imm.m_value, dest);
     }
     
     void sub32(TrustedImm32 imm, Address address)
@@ -440,7 +460,7 @@ public:
     {
         ASSERT(src != dst);
         static const double negativeZeroConstant = -0.0;
-        loadDouble(&negativeZeroConstant, dst);
+        loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
         m_assembler.andnpd_rr(src, dst);
     }
 
@@ -448,7 +468,7 @@ public:
     {
         ASSERT(src != dst);
         static const double negativeZeroConstant = -0.0;
-        loadDouble(&negativeZeroConstant, dst);
+        loadDouble(TrustedImmPtr(&negativeZeroConstant), dst);
         m_assembler.xorpd_rr(src, dst);
     }
 
@@ -482,25 +502,27 @@ public:
 
     DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
     {
+        padBeforePatch();
         m_assembler.movl_mr_disp32(address.offset, address.base, dest);
         return DataLabel32(this);
     }
     
     DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
     {
+        padBeforePatch();
         m_assembler.movl_mr_disp8(address.offset, address.base, dest);
         return DataLabelCompact(this);
     }
     
     static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
     {
-        ASSERT(value >= 0);
-        ASSERT(value < MaximumCompactPtrAlignedAddressOffset);
+        ASSERT(isCompactPtrAlignedAddressOffset(value));
         AssemblerType_T::repatchCompact(dataLabelCompact.dataLocation(), value);
     }
     
     DataLabelCompact loadCompactWithAddressOffsetPatch(Address address, RegisterID dest)
     {
+        padBeforePatch();
         m_assembler.movl_mr_disp8(address.offset, address.base, dest);
         return DataLabelCompact(this);
     }
@@ -515,12 +537,12 @@ public:
         m_assembler.movzbl_mr(address.offset, address.base, dest);
     }
     
-    void load8Signed(BaseIndex address, RegisterID dest)
+    void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
         m_assembler.movsbl_mr(address.offset, address.base, address.index, address.scale, dest);
     }
 
-    void load8Signed(ImplicitAddress address, RegisterID dest)
+    void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
     {
         m_assembler.movsbl_mr(address.offset, address.base, dest);
     }
@@ -535,18 +557,19 @@ public:
         m_assembler.movzwl_mr(address.offset, address.base, dest);
     }
 
-    void load16Signed(BaseIndex address, RegisterID dest)
+    void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
     {
         m_assembler.movswl_mr(address.offset, address.base, address.index, address.scale, dest);
     }
     
-    void load16Signed(Address address, RegisterID dest)
+    void load16SignedExtendTo32(Address address, RegisterID dest)
     {
         m_assembler.movswl_mr(address.offset, address.base, dest);
     }
 
     DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
     {
+        padBeforePatch();
         m_assembler.movl_rm_disp32(src, address.offset, address.base);
         return DataLabel32(this);
     }
@@ -582,7 +605,28 @@ public:
         ASSERT(-128 <= imm.m_value && imm.m_value < 128);
         m_assembler.movb_i8m(imm.m_value, address.offset, address.base, address.index, address.scale);
     }
-    
+
+    static ALWAYS_INLINE RegisterID getUnusedRegister(BaseIndex address)
+    {
+        if (address.base != X86Registers::eax && address.index != X86Registers::eax)
+            return X86Registers::eax;
+
+        if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
+            return X86Registers::ebx;
+
+        ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
+        return X86Registers::ecx;
+    }
+
+    static ALWAYS_INLINE RegisterID getUnusedRegister(Address address)
+    {
+        if (address.base != X86Registers::eax)
+            return X86Registers::eax;
+
+        ASSERT(address.base != X86Registers::edx);
+        return X86Registers::edx;
+    }
+
     void store8(RegisterID src, BaseIndex address)
     {
 #if CPU(X86)
@@ -590,15 +634,7 @@ public:
         // esp..edi are mapped to the 'h' registers!
         if (src >= 4) {
             // Pick a temporary register.
-            RegisterID temp;
-            if (address.base != X86Registers::eax && address.index != X86Registers::eax)
-                temp = X86Registers::eax;
-            else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
-                temp = X86Registers::ebx;
-            else {
-                ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
-                temp = X86Registers::ecx;
-            }
+            RegisterID temp = getUnusedRegister(address);
 
             // Swap to the temporary register to perform the store.
             swap(src, temp);
@@ -609,6 +645,25 @@ public:
 #endif
         m_assembler.movb_rm(src, address.offset, address.base, address.index, address.scale);
     }
+    
+    void store8(RegisterID src, Address address)
+    {
+#if CPU(X86)
+        // On 32-bit x86 we can only store from the first 4 registers;
+        // esp..edi are mapped to the 'h' registers!
+        if (src >= 4) {
+            // Pick a temporary register.
+            RegisterID temp = getUnusedRegister(address);
+
+            // Swap to the temporary register to perform the store.
+            swap(src, temp);
+            m_assembler.movb_rm(temp, address.offset, address.base);
+            swap(src, temp);
+            return;
+        }
+#endif
+        m_assembler.movb_rm(src, address.offset, address.base);
+    }
 
     void store16(RegisterID src, BaseIndex address)
     {
@@ -617,16 +672,8 @@ public:
         // esp..edi are mapped to the 'h' registers!
         if (src >= 4) {
             // Pick a temporary register.
-            RegisterID temp;
-            if (address.base != X86Registers::eax && address.index != X86Registers::eax)
-                temp = X86Registers::eax;
-            else if (address.base != X86Registers::ebx && address.index != X86Registers::ebx)
-                temp = X86Registers::ebx;
-            else {
-                ASSERT(address.base != X86Registers::ecx && address.index != X86Registers::ecx);
-                temp = X86Registers::ecx;
-            }
-            
+            RegisterID temp = getUnusedRegister(address);
+
             // Swap to the temporary register to perform the store.
             swap(src, temp);
             m_assembler.movw_rm(temp, address.offset, address.base, address.index, address.scale);
@@ -649,13 +696,13 @@ public:
             m_assembler.movsd_rr(src, dest);
     }
 
-    void loadDouble(const void* address, FPRegisterID dest)
+    void loadDouble(TrustedImmPtr address, FPRegisterID dest)
     {
 #if CPU(X86)
         ASSERT(isSSE2Present());
-        m_assembler.movsd_mr(address, dest);
+        m_assembler.movsd_mr(address.m_value, dest);
 #else
-        move(TrustedImmPtr(address), scratchRegister);
+        move(address, scratchRegister);
         loadDouble(scratchRegister, dest);
 #endif
     }
@@ -817,11 +864,15 @@ public:
             m_assembler.ucomisd_rr(right, left);
 
         if (cond == DoubleEqual) {
+            if (left == right)
+                return Jump(m_assembler.jnp());
             Jump isUnordered(m_assembler.jp());
             Jump result = Jump(m_assembler.je());
             isUnordered.link(this);
             return result;
         } else if (cond == DoubleNotEqualOrUnordered) {
+            if (left == right)
+                return Jump(m_assembler.jp());
             Jump isUnordered(m_assembler.jp());
             Jump isEqual(m_assembler.je());
             isUnordered.link(this);
@@ -846,13 +897,6 @@ public:
         return branch32(branchType ? NotEqual : Equal, dest, TrustedImm32(0x80000000));
     }
 
-    Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
-    {
-        ASSERT(isSSE2Present());
-        m_assembler.cvttsd2si_rr(src, dest);
-        return branch32(branchType ? GreaterThanOrEqual : LessThan, dest, TrustedImm32(0));
-    }
-
     void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
     {
         ASSERT(isSSE2Present());
@@ -871,13 +915,23 @@ public:
     // If the result is not representable as a 32 bit value, branch.
     // May also branch for some values that are representable in 32 bits
     // (specifically, in this case, 0).
-    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp)
+    void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp, bool negZeroCheck = true)
     {
         ASSERT(isSSE2Present());
         m_assembler.cvttsd2si_rr(src, dest);
 
         // If the result is zero, it might have been -0.0, and the double comparison won't catch this!
-        failureCases.append(branchTest32(Zero, dest));
+#if CPU(X86_64)
+        if (negZeroCheck) {
+            Jump valueIsNonZero = branchTest32(NonZero, dest);
+            m_assembler.movmskpd_rr(src, scratchRegister);
+            failureCases.append(branchTest32(NonZero, scratchRegister, TrustedImm32(1)));
+            valueIsNonZero.link(this);
+        }
+#else
+        if (negZeroCheck)
+            failureCases.append(branchTest32(Zero, dest));
+#endif
 
         // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
         convertInt32ToDouble(dest, fpTemp);
@@ -987,6 +1041,11 @@ public:
         m_assembler.movq_i64r(imm.asIntptr(), dest);
     }
 
+    void move(TrustedImm64 imm, RegisterID dest)
+    {
+        m_assembler.movq_i64r(imm.m_value, dest);
+    }
+
     void swap(RegisterID reg1, RegisterID reg2)
     {
         if (reg1 != reg2)
@@ -1107,22 +1166,33 @@ public:
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
-    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    void test32(ResultCondition, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
     {
-        // if we are only interested in the low seven bits, this can be tested with a testb
         if (mask.m_value == -1)
             m_assembler.testl_rr(reg, reg);
-        else
+        else if (!(mask.m_value & ~0xff) && reg < X86Registers::esp) { // Using esp and greater as a byte register yields the upper half of the 16 bit registers ax, cx, dx and bx, e.g. esp, register 4, is actually ah.
+            if (mask.m_value == 0xff)
+                m_assembler.testb_rr(reg, reg);
+            else
+                m_assembler.testb_i8r(mask.m_value, reg);
+        } else
             m_assembler.testl_i32r(mask.m_value, reg);
+    }
+
+    Jump branch(ResultCondition cond)
+    {
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
+    Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        test32(cond, reg, mask);
+        return branch(cond);
+    }
+
     Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
     {
-        if (mask.m_value == -1)
-            m_assembler.cmpl_im(0, address.offset, address.base);
-        else
-            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+        generateTest32(address, mask);
         return Jump(m_assembler.jCC(x86Condition(cond)));
     }
 
@@ -1390,10 +1460,7 @@ public:
 
     void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
     {
-        if (mask.m_value == -1)
-            m_assembler.cmpl_im(0, address.offset, address.base);
-        else
-            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+        generateTest32(address, mask);
         set32(x86Condition(cond), dest);
     }
 
@@ -1407,6 +1474,30 @@ public:
     {
         m_assembler.nop();
     }
+    
+    void memoryFence()
+    {
+        m_assembler.mfence();
+    }
+
+    static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
+    {
+        X86Assembler::replaceWithJump(instructionStart.executableAddress(), destination.executableAddress());
+    }
+    
+    static ptrdiff_t maxJumpReplacementSize()
+    {
+        return X86Assembler::maxJumpReplacementSize();
+    }
+
+#if ENABLE(MASM_PROBE)
+    // Methods required by the MASM_PROBE mechanism as defined in
+    // AbstractMacroAssembler.h. 
+    static void printCPURegisters(CPUState&, int indentation = 0);
+    static void printRegister(CPUState&, RegisterID);
+    static void printRegister(CPUState&, FPRegisterID);
+    void probe(ProbeFunction, void* arg1 = 0, void* arg2 = 0);
+#endif // ENABLE(MASM_PROBE)
 
 protected:
     X86Assembler::Condition x86Condition(RelationalCondition cond)
@@ -1441,6 +1532,22 @@ private:
     // x86_64, and clients & subclasses of MacroAssembler should be using 'supportsFloatingPoint()'.
     friend class MacroAssemblerX86;
 
+    ALWAYS_INLINE void generateTest32(Address address, TrustedImm32 mask = TrustedImm32(-1))
+    {
+        if (mask.m_value == -1)
+            m_assembler.cmpl_im(0, address.offset, address.base);
+        else if (!(mask.m_value & ~0xff))
+            m_assembler.testb_im(mask.m_value, address.offset, address.base);
+        else if (!(mask.m_value & ~0xff00))
+            m_assembler.testb_im(mask.m_value >> 8, address.offset + 1, address.base);
+        else if (!(mask.m_value & ~0xff0000))
+            m_assembler.testb_im(mask.m_value >> 16, address.offset + 2, address.base);
+        else if (!(mask.m_value & ~0xff000000))
+            m_assembler.testb_im(mask.m_value >> 24, address.offset + 3, address.base);
+        else
+            m_assembler.testl_i32m(mask.m_value, address.offset, address.base);
+    }
+
 #if CPU(X86)
 #if OS(MAC_OS_X)