#include "CodeBlock.h"
#include "JITInlines.h"
-#include "JITStubCall.h"
#include "JITStubs.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include "ResultType.h"
#include "SamplingTool.h"
+#include "SlowPathCall.h"
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
namespace JSC {
void JIT::emit_op_negate(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
emitLoad(src, regT1, regT0);
void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
-
linkSlowCase(iter); // 0x7fffffff check
linkSlowCase(iter); // double check
- JITStubCall stubCall(this, cti_op_negate);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_negate);
+ slowPathCall.call();
}
-void JIT::emit_compareAndJump(OpcodeID opcode, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
+void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target, RelationalCondition condition)
{
JumpList notInt32Op1;
JumpList notInt32Op2;
end.link(this);
}
-void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
{
if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
linkSlowCase(iter);
linkSlowCase(iter); // double check
}
}
- JITStubCall stubCall(this, stub);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+ emitLoad(op1, regT1, regT0);
+ emitLoad(op2, regT3, regT2);
+ callOperation(operation, regT1, regT0, regT3, regT2);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
}
// LeftShift (<<)
void JIT::emit_op_lshift(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_lshift));
+ emitStoreInt32(dst, regT0, dst == op1);
return;
}
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
lshift32(regT2, regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, dst == op1 || dst == op2, OPCODE_LENGTH(op_lshift));
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
}
void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // int32 check
- JITStubCall stubCall(this, cti_op_lshift);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
+ slowPathCall.call();
}
// RightShift (>>) and UnsignedRightShift (>>>) helper
void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
// Slow case of rshift makes assumptions about what registers hold the
// shift arguments, so any changes must be updated there as well.
urshift32(Imm32(shift), regT0);
else
rshift32(Imm32(shift), regT0);
- } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
- addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
- emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
+ }
+ emitStoreInt32(dst, regT0, dst == op1);
} else {
emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
if (!isOperandConstantImmediateInt(op1))
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- if (isUnsigned) {
+ if (isUnsigned)
urshift32(regT2, regT0);
- addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
- } else
+ else
rshift32(regT2, regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
+ emitStoreInt32(dst, regT0, dst == op1);
}
}
void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
if (isOperandConstantImmediateInt(op2)) {
int shift = getConstantOperand(op2).asInt32() & 0x1f;
// op1 = regT1:regT0
urshift32(Imm32(shift), regT0);
else
rshift32(Imm32(shift), regT0);
- } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
- failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
+ }
move(TrustedImm32(JSValue::Int32Tag), regT1);
emitStoreInt32(dst, regT0, false);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
failures.link(this);
}
- if (isUnsigned && !shift)
- linkSlowCase(iter); // failed to box in hot path
} else {
// op1 = regT1:regT0
// op2 = regT3:regT2
emitLoadDouble(op1, fpRegT0);
failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
- if (isUnsigned) {
+ if (isUnsigned)
urshift32(regT2, regT0);
- failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
- } else
+ else
rshift32(regT2, regT0);
move(TrustedImm32(JSValue::Int32Tag), regT1);
emitStoreInt32(dst, regT0, false);
}
linkSlowCase(iter); // int32 check - op2 is not an int
- if (isUnsigned)
- linkSlowCase(iter); // Can't represent unsigned result as an immediate
}
- JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, isUnsigned ? slow_path_urshift : slow_path_rshift);
+ slowPathCall.call();
}
// RightShift (>>)
emitRightShiftSlowCase(currentInstruction, iter, true);
}
+void JIT::emit_op_unsigned(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+
+ emitLoad(op1, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitStoreInt32(result, regT0, result == op1);
+}
+
+void JIT::emitSlow_op_unsigned(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_unsigned);
+ slowPathCall.call();
+}
+
// BitAnd (&)
void JIT::emit_op_bitand(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
- unsigned op;
+ int op;
int32_t constant;
if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
emitLoad(op, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
and32(Imm32(constant), regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, dst == op, OPCODE_LENGTH(op_bitand));
+ emitStoreInt32(dst, regT0, dst == op);
return;
}
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
and32(regT2, regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitand));
+ emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
}
void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // int32 check
- JITStubCall stubCall(this, cti_op_bitand);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
+ slowPathCall.call();
}
// BitOr (|)
void JIT::emit_op_bitor(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
- unsigned op;
+ int op;
int32_t constant;
if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
emitLoad(op, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
or32(Imm32(constant), regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitor));
+ emitStoreInt32(dst, regT0, op == dst);
return;
}
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
or32(regT2, regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitor));
+ emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
}
void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // int32 check
- JITStubCall stubCall(this, cti_op_bitor);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
+ slowPathCall.call();
}
// BitXor (^)
void JIT::emit_op_bitxor(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
- unsigned op;
+ int op;
int32_t constant;
if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
emitLoad(op, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
xor32(Imm32(constant), regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitxor));
+ emitStoreInt32(dst, regT0, op == dst);
return;
}
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
xor32(regT2, regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitxor));
+ emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
}
void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // int32 check
- JITStubCall stubCall(this, cti_op_bitxor);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
+ slowPathCall.call();
}
void JIT::emit_op_inc(Instruction* currentInstruction)
{
- unsigned srcDst = currentInstruction[1].u.operand;
+ int srcDst = currentInstruction[1].u.operand;
emitLoad(srcDst, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
- emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_inc));
+ emitStoreInt32(srcDst, regT0, true);
}
void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned srcDst = currentInstruction[1].u.operand;
-
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // overflow check
- JITStubCall stubCall(this, cti_op_inc);
- stubCall.addArgument(srcDst);
- stubCall.call(srcDst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_inc);
+ slowPathCall.call();
}
void JIT::emit_op_dec(Instruction* currentInstruction)
{
- unsigned srcDst = currentInstruction[1].u.operand;
+ int srcDst = currentInstruction[1].u.operand;
emitLoad(srcDst, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
- emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_dec));
+ emitStoreInt32(srcDst, regT0, true);
}
void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned srcDst = currentInstruction[1].u.operand;
-
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // overflow check
- JITStubCall stubCall(this, cti_op_dec);
- stubCall.addArgument(srcDst);
- stubCall.call(srcDst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_dec);
+ slowPathCall.call();
}
// Addition (+)
void JIT::emit_op_add(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
addSlowCase();
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
+ slowPathCall.call();
return;
}
JumpList notInt32Op1;
JumpList notInt32Op2;
- unsigned op;
+ int op;
int32_t constant;
if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
end.link(this);
}
-void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+void JIT::emitAdd32Constant(int dst, int op, int32_t constant, ResultType opType)
{
// Int32 case.
emitLoad(op, regT1, regT2);
void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
return;
}
- unsigned op;
+ int op;
int32_t constant;
if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
linkSlowCase(iter); // overflow check
}
}
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
+ slowPathCall.call();
}
// Subtraction (-)
void JIT::emit_op_sub(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
JumpList notInt32Op1;
end.link(this);
}
-void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
+void JIT::emitSub32Constant(int dst, int op, int32_t constant, ResultType opType)
{
// Int32 case.
emitLoad(op, regT1, regT0);
Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
-#if ENABLE(JIT_CONSTANT_BLINDING)
- addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
-#else
- addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2));
-#endif
-
+ addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
emitStoreInt32(dst, regT2, (op == dst));
// Double case.
void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
if (isOperandConstantImmediateInt(op2)) {
}
}
- JITStubCall stubCall(this, cti_op_sub);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_sub);
+ slowPathCall.call();
}
-void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
+void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
{
JumpList end;
emitLoadDouble(op1, fpRegT1);
divDouble(fpRegT0, fpRegT1);
-#if ENABLE(VALUE_PROFILER)
// Is the result actually an integer? The DFG JIT would really like to know. If it's
// not an integer, we increment a count. If this together with the slow case counter
// are below threshold then the DFG JIT will compile this division with a specualtion
add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
emitStoreDouble(dst, fpRegT1);
isInteger.link(this);
-#else
- emitStoreDouble(dst, fpRegT1);
-#endif
break;
}
case op_jless:
case op_div: {
emitLoadDouble(op2, fpRegT2);
divDouble(fpRegT2, fpRegT0);
-#if ENABLE(VALUE_PROFILER)
// Is the result actually an integer? The DFG JIT would really like to know. If it's
// not an integer, we increment a count. If this together with the slow case counter
// are below threshold then the DFG JIT will compile this division with a specualtion
add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
emitStoreDouble(dst, fpRegT0);
isInteger.link(this);
-#else
- emitStoreDouble(dst, fpRegT0);
-#endif
break;
}
case op_jless:
void JIT::emit_op_mul(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-#if ENABLE(VALUE_PROFILER)
m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-#endif
JumpList notInt32Op1;
JumpList notInt32Op2;
void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
Jump overflow = getSlowCase(iter); // overflow check
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
negZero.link(this);
-#if ENABLE(VALUE_PROFILER)
// We only get here if we have a genuine negative zero. Record this,
// so that the speculative JIT knows that we failed speculation
// because of a negative zero.
add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
-#endif
overflow.link(this);
if (!supportsFloatingPoint()) {
}
}
- Label jitStubCall(this);
- JITStubCall stubCall(this, cti_op_mul);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mul);
+ slowPathCall.call();
}
// Division (/)
void JIT::emit_op_div(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-#if ENABLE(VALUE_PROFILER)
m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-#endif
if (!supportsFloatingPoint()) {
addSlowCase(jump());
convertInt32ToDouble(regT0, fpRegT0);
convertInt32ToDouble(regT2, fpRegT1);
divDouble(fpRegT1, fpRegT0);
-#if ENABLE(VALUE_PROFILER)
// Is the result actually an integer? The DFG JIT would really like to know. If it's
// not an integer, we increment a count. If this together with the slow case counter
// are below threshold then the DFG JIT will compile this division with a specualtion
notInteger.link(this);
add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
emitStoreDouble(dst, fpRegT0);
-#else
- emitStoreDouble(dst, fpRegT0);
-#endif
end.append(jump());
// Double divide.
void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
if (!supportsFloatingPoint())
}
}
- JITStubCall stubCall(this, cti_op_div);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
+ slowPathCall.call();
}
// Mod (%)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
#if CPU(X86) || CPU(X86_64)
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
// Make sure registers are correct for x86 IDIV instructions.
ASSERT(regT0 == X86Registers::eax);
ASSERT(regT1 == X86Registers::edx);
numeratorPositive.link(this);
emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
#else
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
+ slowPathCall.call();
#endif
}
void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
#if CPU(X86) || CPU(X86_64)
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(result);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
+ slowPathCall.call();
#else
UNUSED_PARAM(currentInstruction);
UNUSED_PARAM(iter);