]> git.saurik.com Git - apple/javascriptcore.git/blobdiff - jit/JITArithmetic.cpp
JavaScriptCore-1218.33.tar.gz
[apple/javascriptcore.git] / jit / JITArithmetic.cpp
index a9390e35f2449d5028c33311689386baf3511ae6..24501478f2678116a39992eef10de76d1a9b47d2 100644 (file)
 #include "JIT.h"
 
 #include "CodeBlock.h"
-#include "JITInlineMethods.h"
+#include "JITInlines.h"
 #include "JITStubCall.h"
 #include "JITStubs.h"
 #include "JSArray.h"
 #include "JSFunction.h"
 #include "Interpreter.h"
+#include "Operations.h"
 #include "ResultType.h"
 #include "SamplingTool.h"
 
@@ -209,8 +210,8 @@ void JIT::emit_op_negate(Instruction* currentInstruction)
     srcNotInt.link(this);
     emitJumpSlowCaseIfNotImmediateNumber(regT0);
 
-    move(TrustedImmPtr(reinterpret_cast<void*>(0x8000000000000000ull)), regT1);
-    xorPtr(regT1, regT0);
+    move(TrustedImm64((int64_t)0x8000000000000000ull), regT1);
+    xor64(regT1, regT0);
 
     end.link(this);
     emitPutVirtualRegister(dst);
@@ -224,7 +225,7 @@ void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEnt
     linkSlowCase(iter); // double check
 
     JITStubCall stubCall(this, cti_op_negate);
-    stubCall.addArgument(regT1, regT0);
+    stubCall.addArgument(regT0);
     stubCall.call(dst);
 }
 
@@ -279,8 +280,8 @@ void JIT::emit_op_rshift(Instruction* currentInstruction)
             Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
             // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
             addSlowCase(emitJumpIfNotImmediateNumber(regT0));
-            addPtr(tagTypeNumberRegister, regT0);
-            movePtrToDouble(regT0, fpRegT0);
+            add64(tagTypeNumberRegister, regT0);
+            move64ToDouble(regT0, fpRegT0);
             addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
             lhsIsInt.link(this);
             emitJumpSlowCaseIfNotImmediateInteger(regT2);
@@ -377,8 +378,8 @@ void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEn
         if (supportsFloatingPointTruncate()) {
             JumpList failures;
             failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
-            addPtr(tagTypeNumberRegister, regT0);
-            movePtrToDouble(regT0, fpRegT0);
+            add64(tagTypeNumberRegister, regT0);
+            move64ToDouble(regT0, fpRegT0);
             failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
             if (shift)
                 urshift32(Imm32(shift & 0x1f), regT0);
@@ -399,8 +400,8 @@ void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEn
             if (supportsFloatingPointTruncate()) {
                 JumpList failures;
                 failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
-                addPtr(tagTypeNumberRegister, regT0);
-                movePtrToDouble(regT0, fpRegT0);
+                add64(tagTypeNumberRegister, regT0);
+                move64ToDouble(regT0, fpRegT0);
                 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
                 failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
                 emitFastArithImmToInt(regT1);
@@ -499,8 +500,8 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
 
         if (supportsFloatingPoint()) {
             Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
-            addPtr(tagTypeNumberRegister, regT0);
-            movePtrToDouble(regT0, fpRegT0);
+            add64(tagTypeNumberRegister, regT0);
+            move64ToDouble(regT0, fpRegT0);
 
             int32_t op2imm = getConstantOperand(op2).asInt32();
 
@@ -525,8 +526,8 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
 
         if (supportsFloatingPoint()) {
             Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
-            addPtr(tagTypeNumberRegister, regT1);
-            movePtrToDouble(regT1, fpRegT1);
+            add64(tagTypeNumberRegister, regT1);
+            move64ToDouble(regT1, fpRegT1);
 
             int32_t op1imm = getConstantOperand(op1).asInt32();
 
@@ -552,10 +553,10 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
             Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
             Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
             Jump fail3 = emitJumpIfImmediateInteger(regT1);
-            addPtr(tagTypeNumberRegister, regT0);
-            addPtr(tagTypeNumberRegister, regT1);
-            movePtrToDouble(regT0, fpRegT0);
-            movePtrToDouble(regT1, fpRegT1);
+            add64(tagTypeNumberRegister, regT0);
+            add64(tagTypeNumberRegister, regT1);
+            move64ToDouble(regT0, fpRegT0);
+            move64ToDouble(regT1, fpRegT1);
 
             emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
 
@@ -585,19 +586,19 @@ void JIT::emit_op_bitand(Instruction* currentInstruction)
         emitGetVirtualRegister(op2, regT0);
         emitJumpSlowCaseIfNotImmediateInteger(regT0);
         int32_t imm = getConstantOperandImmediateInt(op1);
-        andPtr(Imm32(imm), regT0);
+        and64(Imm32(imm), regT0);
         if (imm >= 0)
             emitFastArithIntToImmNoCheck(regT0, regT0);
     } else if (isOperandConstantImmediateInt(op2)) {
         emitGetVirtualRegister(op1, regT0);
         emitJumpSlowCaseIfNotImmediateInteger(regT0);
         int32_t imm = getConstantOperandImmediateInt(op2);
-        andPtr(Imm32(imm), regT0);
+        and64(Imm32(imm), regT0);
         if (imm >= 0)
             emitFastArithIntToImmNoCheck(regT0, regT0);
     } else {
         emitGetVirtualRegisters(op1, regT0, op2, regT1);
-        andPtr(regT1, regT0);
+        and64(regT1, regT0);
         emitJumpSlowCaseIfNotImmediateInteger(regT0);
     }
     emitPutVirtualRegister(result);
@@ -628,61 +629,7 @@ void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEnt
     }
 }
 
-void JIT::emit_op_post_inc(Instruction* currentInstruction)
-{
-    unsigned result = currentInstruction[1].u.operand;
-    unsigned srcDst = currentInstruction[2].u.operand;
-
-    emitGetVirtualRegister(srcDst, regT0);
-    move(regT0, regT1);
-    emitJumpSlowCaseIfNotImmediateInteger(regT0);
-    addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT1));
-    emitFastArithIntToImmNoCheck(regT1, regT1);
-    emitPutVirtualRegister(srcDst, regT1);
-    emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
-    unsigned result = currentInstruction[1].u.operand;
-    unsigned srcDst = currentInstruction[2].u.operand;
-
-    linkSlowCase(iter);
-    linkSlowCase(iter);
-    JITStubCall stubCall(this, cti_op_post_inc);
-    stubCall.addArgument(regT0);
-    stubCall.addArgument(Imm32(srcDst));
-    stubCall.call(result);
-}
-
-void JIT::emit_op_post_dec(Instruction* currentInstruction)
-{
-    unsigned result = currentInstruction[1].u.operand;
-    unsigned srcDst = currentInstruction[2].u.operand;
-
-    emitGetVirtualRegister(srcDst, regT0);
-    move(regT0, regT1);
-    emitJumpSlowCaseIfNotImmediateInteger(regT0);
-    addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT1));
-    emitFastArithIntToImmNoCheck(regT1, regT1);
-    emitPutVirtualRegister(srcDst, regT1);
-    emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
-    unsigned result = currentInstruction[1].u.operand;
-    unsigned srcDst = currentInstruction[2].u.operand;
-
-    linkSlowCase(iter);
-    linkSlowCase(iter);
-    JITStubCall stubCall(this, cti_op_post_dec);
-    stubCall.addArgument(regT0);
-    stubCall.addArgument(Imm32(srcDst));
-    stubCall.call(result);
-}
-
-void JIT::emit_op_pre_inc(Instruction* currentInstruction)
+void JIT::emit_op_inc(Instruction* currentInstruction)
 {
     unsigned srcDst = currentInstruction[1].u.operand;
 
@@ -693,7 +640,7 @@ void JIT::emit_op_pre_inc(Instruction* currentInstruction)
     emitPutVirtualRegister(srcDst);
 }
 
-void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned srcDst = currentInstruction[1].u.operand;
 
@@ -701,12 +648,12 @@ void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEn
     linkSlowCase(iter);
     emitGetVirtualRegister(srcDst, regT0);
     notImm.link(this);
-    JITStubCall stubCall(this, cti_op_pre_inc);
+    JITStubCall stubCall(this, cti_op_inc);
     stubCall.addArgument(regT0);
     stubCall.call(srcDst);
 }
 
-void JIT::emit_op_pre_dec(Instruction* currentInstruction)
+void JIT::emit_op_dec(Instruction* currentInstruction)
 {
     unsigned srcDst = currentInstruction[1].u.operand;
 
@@ -717,7 +664,7 @@ void JIT::emit_op_pre_dec(Instruction* currentInstruction)
     emitPutVirtualRegister(srcDst);
 }
 
-void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
 {
     unsigned srcDst = currentInstruction[1].u.operand;
 
@@ -725,7 +672,7 @@ void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEn
     linkSlowCase(iter);
     emitGetVirtualRegister(srcDst, regT0);
     notImm.link(this);
-    JITStubCall stubCall(this, cti_op_pre_dec);
+    JITStubCall stubCall(this, cti_op_dec);
     stubCall.addArgument(regT0);
     stubCall.call(srcDst);
 }
@@ -792,9 +739,9 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
     stubCall.call(result);
 }
 
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
 {
-    ASSERT_NOT_REACHED();
+    UNREACHABLE_FOR_PLATFORM();
 }
 
 #endif // CPU(X86) || CPU(X86_64)
@@ -818,7 +765,7 @@ void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsign
     else {
         ASSERT(opcodeID == op_mul);
 #if ENABLE(VALUE_PROFILER)
-        if (m_canBeOptimized) {
+        if (shouldEmitProfiling()) {
             // We want to be able to measure if this is taking the slow case just
             // because of negative zero. If this produces positive zero, then we
             // don't want the slow case to be taken because that will throw off
@@ -887,16 +834,16 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
             emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
         emitGetVirtualRegister(op1, regT1);
         convertInt32ToDouble(regT1, fpRegT1);
-        addPtr(tagTypeNumberRegister, regT0);
-        movePtrToDouble(regT0, fpRegT2);
+        add64(tagTypeNumberRegister, regT0);
+        move64ToDouble(regT0, fpRegT2);
     } else if (op2HasImmediateIntFastCase) {
         notImm1.link(this);
         if (!types.first().definitelyIsNumber())
             emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
         emitGetVirtualRegister(op2, regT1);
         convertInt32ToDouble(regT1, fpRegT1);
-        addPtr(tagTypeNumberRegister, regT0);
-        movePtrToDouble(regT0, fpRegT2);
+        add64(tagTypeNumberRegister, regT0);
+        move64ToDouble(regT0, fpRegT2);
     } else {
         // if we get here, eax is not an int32, edx not yet checked.
         notImm1.link(this);
@@ -904,8 +851,8 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
             emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
         if (!types.second().definitelyIsNumber())
             emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
-        addPtr(tagTypeNumberRegister, regT0);
-        movePtrToDouble(regT0, fpRegT1);
+        add64(tagTypeNumberRegister, regT0);
+        move64ToDouble(regT0, fpRegT1);
         Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
         convertInt32ToDouble(regT1, fpRegT2);
         Jump op2wasInteger = jump();
@@ -916,8 +863,8 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
             emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
         convertInt32ToDouble(regT0, fpRegT1);
         op2isDouble.link(this);
-        addPtr(tagTypeNumberRegister, regT1);
-        movePtrToDouble(regT1, fpRegT2);
+        add64(tagTypeNumberRegister, regT1);
+        move64ToDouble(regT1, fpRegT2);
         op2wasInteger.link(this);
     }
 
@@ -931,8 +878,8 @@ void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>:
         ASSERT(opcodeID == op_div);
         divDouble(fpRegT2, fpRegT1);
     }
-    moveDoubleToPtr(fpRegT1, regT0);
-    subPtr(tagTypeNumberRegister, regT0);
+    moveDoubleTo64(fpRegT1, regT0);
+    sub64(tagTypeNumberRegister, regT0);
     emitPutVirtualRegister(result, regT0);
 
     end.link(this);
@@ -1041,8 +988,8 @@ void JIT::emit_op_div(Instruction* currentInstruction)
 
     if (isOperandConstantImmediateDouble(op1)) {
         emitGetVirtualRegister(op1, regT0);
-        addPtr(tagTypeNumberRegister, regT0);
-        movePtrToDouble(regT0, fpRegT0);
+        add64(tagTypeNumberRegister, regT0);
+        move64ToDouble(regT0, fpRegT0);
     } else if (isOperandConstantImmediateInt(op1)) {
         emitLoadInt32ToDouble(op1, fpRegT0);
     } else {
@@ -1053,15 +1000,15 @@ void JIT::emit_op_div(Instruction* currentInstruction)
         convertInt32ToDouble(regT0, fpRegT0);
         Jump skipDoubleLoad = jump();
         notInt.link(this);
-        addPtr(tagTypeNumberRegister, regT0);
-        movePtrToDouble(regT0, fpRegT0);
+        add64(tagTypeNumberRegister, regT0);
+        move64ToDouble(regT0, fpRegT0);
         skipDoubleLoad.link(this);
     }
 
     if (isOperandConstantImmediateDouble(op2)) {
         emitGetVirtualRegister(op2, regT1);
-        addPtr(tagTypeNumberRegister, regT1);
-        movePtrToDouble(regT1, fpRegT1);
+        add64(tagTypeNumberRegister, regT1);
+        move64ToDouble(regT1, fpRegT1);
     } else if (isOperandConstantImmediateInt(op2)) {
         emitLoadInt32ToDouble(op2, fpRegT1);
     } else {
@@ -1072,8 +1019,8 @@ void JIT::emit_op_div(Instruction* currentInstruction)
         convertInt32ToDouble(regT1, fpRegT1);
         Jump skipDoubleLoad = jump();
         notInt.link(this);
-        addPtr(tagTypeNumberRegister, regT1);
-        movePtrToDouble(regT1, fpRegT1);
+        add64(tagTypeNumberRegister, regT1);
+        move64ToDouble(regT1, fpRegT1);
         skipDoubleLoad.link(this);
     }
     divDouble(fpRegT1, fpRegT0);
@@ -1090,23 +1037,25 @@ void JIT::emit_op_div(Instruction* currentInstruction)
     // access). So if we are DFG compiling anything in the program, we want this code to
     // ensure that it produces integers whenever possible.
     
-    // FIXME: This will fail to convert to integer if the result is zero. We should
-    // distinguish between positive zero and negative zero here.
-    
     JumpList notInteger;
     branchConvertDoubleToInt32(fpRegT0, regT0, notInteger, fpRegT1);
     // If we've got an integer, we might as well make that the result of the division.
     emitFastArithReTagImmediate(regT0, regT0);
     Jump isInteger = jump();
     notInteger.link(this);
+    moveDoubleTo64(fpRegT0, regT0);
+    Jump doubleZero = branchTest64(Zero, regT0);
     add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
-    moveDoubleToPtr(fpRegT0, regT0);
-    subPtr(tagTypeNumberRegister, regT0);
+    sub64(tagTypeNumberRegister, regT0);
+    Jump trueDouble = jump();
+    doubleZero.link(this);
+    move(tagTypeNumberRegister, regT0);
+    trueDouble.link(this);
     isInteger.link(this);
 #else
     // Double result.
-    moveDoubleToPtr(fpRegT0, regT0);
-    subPtr(tagTypeNumberRegister, regT0);
+    moveDoubleTo64(fpRegT0, regT0);
+    sub64(tagTypeNumberRegister, regT0);
 #endif
 
     emitPutVirtualRegister(dst, regT0);