]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - assembler/MacroAssembler.h
JavaScriptCore-7600.1.4.17.5.tar.gz
[apple/javascriptcore.git] / assembler / MacroAssembler.h
index a99aefd05ede4a0ed6d17a00a86f24b9816db6c2..c70f2b7904e47ef99cc99a1091037f44549cf825 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013, 2014 Apple Inc. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -26,8 +26,6 @@
 #ifndef MacroAssembler_h
 #define MacroAssembler_h
 
-#include <wtf/Platform.h>
-
 #if ENABLE(ASSEMBLER)
 
 #if CPU(ARM_THUMB2)
@@ -71,16 +69,57 @@ namespace JSC {
 class MacroAssembler : public MacroAssemblerBase {
 public:
 
+    static RegisterID nextRegister(RegisterID reg)
+    {
+        return static_cast<RegisterID>(reg + 1);
+    }
+    
+    static FPRegisterID nextFPRegister(FPRegisterID reg)
+    {
+        return static_cast<FPRegisterID>(reg + 1);
+    }
+    
+    static unsigned numberOfRegisters()
+    {
+        return lastRegister() - firstRegister() + 1;
+    }
+    
+    static unsigned registerIndex(RegisterID reg)
+    {
+        return reg - firstRegister();
+    }
+    
+    static unsigned numberOfFPRegisters()
+    {
+        return lastFPRegister() - firstFPRegister() + 1;
+    }
+    
+    static unsigned fpRegisterIndex(FPRegisterID reg)
+    {
+        return reg - firstFPRegister();
+    }
+    
+    static unsigned registerIndex(FPRegisterID reg)
+    {
+        return fpRegisterIndex(reg) + numberOfRegisters();
+    }
+    
+    static unsigned totalNumberOfRegisters()
+    {
+        return numberOfRegisters() + numberOfFPRegisters();
+    }
+
     using MacroAssemblerBase::pop;
     using MacroAssemblerBase::jump;
     using MacroAssemblerBase::branch32;
     using MacroAssemblerBase::move;
-
-#if ENABLE(JIT_CONSTANT_BLINDING)
     using MacroAssemblerBase::add32;
     using MacroAssemblerBase::and32;
     using MacroAssemblerBase::branchAdd32;
     using MacroAssemblerBase::branchMul32;
+#if CPU(ARM64) || CPU(ARM_THUMB2) || CPU(X86_64)
+    using MacroAssemblerBase::branchPtr;
+#endif
     using MacroAssemblerBase::branchSub32;
     using MacroAssemblerBase::lshift32;
     using MacroAssemblerBase::or32;
@@ -89,11 +128,10 @@ public:
     using MacroAssemblerBase::sub32;
     using MacroAssemblerBase::urshift32;
     using MacroAssemblerBase::xor32;
-#endif
-    
+
     static bool isPtrAlignedAddressOffset(ptrdiff_t value)
     {
-        return value == (int32_t)value;
+        return value == static_cast<int32_t>(value);
     }
 
     static const double twoToThe32; // This is super useful for some double code.
@@ -197,6 +235,10 @@ public:
     {
         push(src);
     }
+    void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
+    {
+        push(imm);
+    }
     void popToRestore(RegisterID dest)
     {
         pop(dest);
@@ -211,6 +253,8 @@ public:
         loadDouble(stackPointerRegister, dest);
         addPtr(TrustedImm32(sizeof(double)), stackPointerRegister);
     }
+    
+    static ptrdiff_t pushToSaveByteOffset() { return sizeof(void*); }
 #endif // !CPU(ARM64)
 
 #if CPU(X86_64) || CPU(ARM64)
@@ -238,6 +282,13 @@ public:
     }
 #endif
 
+    // Immediate shifts only have 5 controllable bits
+    // so we'll consider them safe for now.
+    TrustedImm32 trustedImm32ForShift(Imm32 imm)
+    {
+        return TrustedImm32(imm.asTrustedImm32().m_value & 31);
+    }
+
     // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
     void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
     {
@@ -294,6 +345,12 @@ public:
         return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
     }
 
+    PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
+    {
+        return PatchableJump(branch32WithPatch(cond, left, dataLabel, initialRightValue));
+    }
+
+#if !CPU(ARM_TRADITIONAL)
     PatchableJump patchableJump()
     {
         return PatchableJump(jump());
@@ -308,6 +365,12 @@ public:
     {
         return PatchableJump(branch32(cond, reg, imm));
     }
+
+    PatchableJump patchableBranch32(RelationalCondition cond, Address address, TrustedImm32 imm)
+    {
+        return PatchableJump(branch32(cond, address, imm));
+    }
+#endif
 #endif
 
     void jump(Label target)
@@ -398,7 +461,17 @@ public:
     {
         and32(imm, srcDest);
     }
-    
+
+    void andPtr(TrustedImmPtr imm, RegisterID srcDest)
+    {
+        and32(TrustedImm32(imm), srcDest);
+    }
+
+    void lshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        lshift32(trustedImm32ForShift(imm), srcDest);
+    }
+
     void negPtr(RegisterID dest)
     {
         neg32(dest);
@@ -485,6 +558,11 @@ public:
         compare32(cond, left, right, dest);
     }
 
+    void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
+    {
+        compare32(cond, left, right, dest);
+    }
+    
     void storePtr(RegisterID src, ImplicitAddress address)
     {
         store32(src, address);
@@ -515,6 +593,16 @@ public:
         store32(TrustedImm32(imm), address);
     }
 
+    void storePtr(TrustedImm32 imm, ImplicitAddress address)
+    {
+        store32(imm, address);
+    }
+
+    void storePtr(TrustedImmPtr imm, BaseIndex address)
+    {
+        store32(TrustedImm32(imm), address);
+    }
+
     DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
     {
         return store32WithAddressOffsetPatch(src, address);
@@ -599,7 +687,9 @@ public:
     {
         return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
     }
-#else
+
+#else // !CPU(X86_64)
+
     void addPtr(RegisterID src, RegisterID dest)
     {
         add64(src, dest);
@@ -650,6 +740,16 @@ public:
         and64(imm, srcDest);
     }
     
+    void andPtr(TrustedImmPtr imm, RegisterID srcDest)
+    {
+        and64(imm, srcDest);
+    }
+    
+    void lshiftPtr(Imm32 imm, RegisterID srcDest)
+    {
+        lshift64(trustedImm32ForShift(imm), srcDest);
+    }
+
     void negPtr(RegisterID dest)
     {
         neg64(dest);
@@ -875,7 +975,6 @@ public:
         return branchSub64(cond, src1, src2, dest);
     }
 
-#if ENABLE(JIT_CONSTANT_BLINDING)
     using MacroAssemblerBase::and64;
     using MacroAssemblerBase::convertInt32ToDouble;
     using MacroAssemblerBase::store64;
@@ -890,7 +989,7 @@ public:
         if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
             return shouldConsiderBlinding();
 
-        value = abs(value);
+        value = fabs(value);
         // Only allow a limited set of fractional components
         double scaledValue = value * 8;
         if (scaledValue / 8 != value)
@@ -902,13 +1001,23 @@ public:
         return value > 0xff;
     }
     
+    bool shouldBlindPointerForSpecificArch(uintptr_t value)
+    {
+        if (sizeof(void*) == 4)
+            return shouldBlindForSpecificArch(static_cast<uint32_t>(value));
+        return shouldBlindForSpecificArch(static_cast<uint64_t>(value));
+    }
+    
     bool shouldBlind(ImmPtr imm)
-    { 
+    {
+        if (!canBlind())
+            return false;
+        
 #if ENABLE(FORCED_JIT_BLINDING)
         UNUSED_PARAM(imm);
         // Debug always blind all constants, if only so we know
         // if we've broken blinding during patch development.
-        return true;        
+        return true;
 #endif
 
         // First off we'll special case common, "safe" values to avoid hurting
@@ -934,7 +1043,7 @@ public:
         if (!shouldConsiderBlinding())
             return false;
 
-        return shouldBlindForSpecificArch(value);
+        return shouldBlindPointerForSpecificArch(value);
     }
     
     struct RotatedImmPtr {
@@ -1031,7 +1140,7 @@ public:
 
     void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
     {
-        if (shouldBlind(imm)) {
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
             RegisterID scratchRegister = scratchRegisterForBlinding();
             loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
             convertInt32ToDouble(scratchRegister, dest);
@@ -1067,7 +1176,7 @@ public:
 
     Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
     {
-        if (shouldBlind(right)) {
+        if (shouldBlind(right) && haveScratchRegisterForBlinding()) {
             RegisterID scratchRegister = scratchRegisterForBlinding();
             loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
             return branchPtr(cond, left, scratchRegister);
@@ -1077,7 +1186,7 @@ public:
     
     void storePtr(ImmPtr imm, Address dest)
     {
-        if (shouldBlind(imm)) {
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
             RegisterID scratchRegister = scratchRegisterForBlinding();
             loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
             storePtr(scratchRegister, dest);
@@ -1087,7 +1196,7 @@ public:
 
     void store64(Imm64 imm, Address dest)
     {
-        if (shouldBlind(imm)) {
+        if (shouldBlind(imm) && haveScratchRegisterForBlinding()) {
             RegisterID scratchRegister = scratchRegisterForBlinding();
             loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
             store64(scratchRegister, dest);
@@ -1095,11 +1204,8 @@ public:
             store64(imm.asTrustedImm64(), dest);
     }
 
-#endif
-
 #endif // !CPU(X86_64)
 
-#if ENABLE(JIT_CONSTANT_BLINDING)
     bool shouldBlind(Imm32 imm)
     {
 #if ENABLE(FORCED_JIT_BLINDING)
@@ -1107,7 +1213,7 @@ public:
         // Debug always blind all constants, if only so we know
         // if we've broken blinding during patch development.
         return true;
-#else
+#else // ENABLE(FORCED_JIT_BLINDING)
 
         // First off we'll special case common, "safe" values to avoid hurting
         // performance too much
@@ -1128,7 +1234,7 @@ public:
             return false;
 
         return shouldBlindForSpecificArch(value);
-#endif
+#endif // ENABLE(FORCED_JIT_BLINDING)
     }
 
     struct BlindedImm32 {
@@ -1299,7 +1405,7 @@ public:
     {
         store64(value, addressForPoke(index));
     }
-#endif
+#endif // CPU(X86_64)
     
     void store32(Imm32 imm, Address dest)
     {
@@ -1308,10 +1414,10 @@ public:
             BlindedImm32 blind = xorBlindConstant(imm);
             store32(blind.value1, dest);
             xor32(blind.value2, dest);
-#else
-            if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
-                loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
-                store32(scratchRegister, dest);
+#else // CPU(X86) || CPU(X86_64)
+            if (haveScratchRegisterForBlinding()) {
+                loadXorBlindedConstant(xorBlindConstant(imm), scratchRegisterForBlinding());
+                store32(scratchRegisterForBlinding(), dest);
             } else {
                 // If we don't have a scratch register available for use, we'll just 
                 // place a random number of nops.
@@ -1320,7 +1426,7 @@ public:
                     nop();
                 store32(imm.asTrustedImm32(), dest);
             }
-#endif
+#endif // CPU(X86) || CPU(X86_64)
         } else
             store32(imm.asTrustedImm32(), dest);
     }
@@ -1368,9 +1474,9 @@ public:
     Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
     {
         if (shouldBlind(right)) {
-            if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
-                loadXorBlindedConstant(xorBlindConstant(right), scratchRegister);
-                return branch32(cond, left, scratchRegister);
+            if (haveScratchRegisterForBlinding()) {
+                loadXorBlindedConstant(xorBlindConstant(right), scratchRegisterForBlinding());
+                return branch32(cond, left, scratchRegisterForBlinding());
             }
             // If we don't have a scratch register available for use, we'll just 
             // place a random number of nops.
@@ -1386,14 +1492,12 @@ public:
     Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
     {
         if (src == dest)
-            ASSERT(scratchRegisterForBlinding());
+            ASSERT(haveScratchRegisterForBlinding());
 
         if (shouldBlind(imm)) {
             if (src == dest) {
-                if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
-                    move(src, scratchRegister);
-                    src = scratchRegister;
-                }
+                move(src, scratchRegisterForBlinding());
+                src = scratchRegisterForBlinding();
             }
             loadXorBlindedConstant(xorBlindConstant(imm), dest);
             return branchAdd32(cond, src, dest);  
@@ -1404,14 +1508,12 @@ public:
     Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
     {
         if (src == dest)
-            ASSERT(scratchRegisterForBlinding());
+            ASSERT(haveScratchRegisterForBlinding());
 
         if (shouldBlind(imm)) {
             if (src == dest) {
-                if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
-                    move(src, scratchRegister);
-                    src = scratchRegister;
-                }
+                move(src, scratchRegisterForBlinding());
+                src = scratchRegisterForBlinding();
             }
             loadXorBlindedConstant(xorBlindConstant(imm), dest);
             return branchMul32(cond, src, dest);  
@@ -1432,13 +1534,6 @@ public:
         return branchSub32(cond, src, imm.asTrustedImm32(), dest);            
     }
     
-    // Immediate shifts only have 5 controllable bits
-    // so we'll consider them safe for now.
-    TrustedImm32 trustedImm32ForShift(Imm32 imm)
-    {
-        return TrustedImm32(imm.asTrustedImm32().m_value & 31);
-    }
-
     void lshift32(Imm32 imm, RegisterID dest)
     {
         lshift32(trustedImm32ForShift(imm), dest);
@@ -1468,7 +1563,6 @@ public:
     {
         urshift32(src, trustedImm32ForShift(amount), dest);
     }
-#endif
 };
 
 } // namespace JSC