2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "CodeBlock.h"
33 #include "JITInlines.h"
34 #include "JITStubCall.h"
37 #include "JSFunction.h"
38 #include "Interpreter.h"
39 #include "Operations.h"
40 #include "ResultType.h"
41 #include "SamplingTool.h"
51 void JIT::emit_op_negate(Instruction
* currentInstruction
)
53 unsigned dst
= currentInstruction
[1].u
.operand
;
54 unsigned src
= currentInstruction
[2].u
.operand
;
56 emitLoad(src
, regT1
, regT0
);
58 Jump srcNotInt
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
59 addSlowCase(branchTest32(Zero
, regT0
, TrustedImm32(0x7fffffff)));
61 emitStoreInt32(dst
, regT0
, (dst
== src
));
66 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
68 xor32(TrustedImm32(1 << 31), regT1
);
69 store32(regT1
, tagFor(dst
));
71 store32(regT0
, payloadFor(dst
));
76 void JIT::emitSlow_op_negate(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
78 unsigned dst
= currentInstruction
[1].u
.operand
;
80 linkSlowCase(iter
); // 0x7fffffff check
81 linkSlowCase(iter
); // double check
83 JITStubCall
stubCall(this, cti_op_negate
);
84 stubCall
.addArgument(regT1
, regT0
);
88 void JIT::emit_compareAndJump(OpcodeID opcode
, unsigned op1
, unsigned op2
, unsigned target
, RelationalCondition condition
)
94 if (isOperandConstantImmediateChar(op1
)) {
95 emitLoad(op2
, regT1
, regT0
);
96 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
98 emitLoadCharacterString(regT0
, regT0
, failures
);
99 addSlowCase(failures
);
100 addJump(branch32(commute(condition
), regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
103 if (isOperandConstantImmediateChar(op2
)) {
104 emitLoad(op1
, regT1
, regT0
);
105 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
107 emitLoadCharacterString(regT0
, regT0
, failures
);
108 addSlowCase(failures
);
109 addJump(branch32(condition
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
112 if (isOperandConstantImmediateInt(op1
)) {
113 emitLoad(op2
, regT3
, regT2
);
114 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
115 addJump(branch32(commute(condition
), regT2
, Imm32(getConstantOperand(op1
).asInt32())), target
);
116 } else if (isOperandConstantImmediateInt(op2
)) {
117 emitLoad(op1
, regT1
, regT0
);
118 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
119 addJump(branch32(condition
, regT0
, Imm32(getConstantOperand(op2
).asInt32())), target
);
121 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
122 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
123 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
124 addJump(branch32(condition
, regT0
, regT2
), target
);
127 if (!supportsFloatingPoint()) {
128 addSlowCase(notInt32Op1
);
129 addSlowCase(notInt32Op2
);
135 emitBinaryDoubleOp(opcode
, target
, op1
, op2
, OperandTypes(), notInt32Op1
, notInt32Op2
, !isOperandConstantImmediateInt(op1
), isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
));
139 void JIT::emit_compareAndJumpSlow(unsigned op1
, unsigned op2
, unsigned target
, DoubleCondition
, int (JIT_STUB
*stub
)(STUB_ARGS_DECLARATION
), bool invert
, Vector
<SlowCaseEntry
>::iterator
& iter
)
141 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
147 if (!supportsFloatingPoint()) {
148 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
149 linkSlowCase(iter
); // int32 check
150 linkSlowCase(iter
); // int32 check
152 if (!isOperandConstantImmediateInt(op1
)) {
153 linkSlowCase(iter
); // double check
154 linkSlowCase(iter
); // int32 check
156 if (isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
))
157 linkSlowCase(iter
); // double check
160 JITStubCall
stubCall(this, stub
);
161 stubCall
.addArgument(op1
);
162 stubCall
.addArgument(op2
);
164 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
169 void JIT::emit_op_lshift(Instruction
* currentInstruction
)
171 unsigned dst
= currentInstruction
[1].u
.operand
;
172 unsigned op1
= currentInstruction
[2].u
.operand
;
173 unsigned op2
= currentInstruction
[3].u
.operand
;
175 if (isOperandConstantImmediateInt(op2
)) {
176 emitLoad(op1
, regT1
, regT0
);
177 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
178 lshift32(Imm32(getConstantOperand(op2
).asInt32()), regT0
);
179 emitStoreAndMapInt32(dst
, regT1
, regT0
, dst
== op1
, OPCODE_LENGTH(op_lshift
));
183 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
184 if (!isOperandConstantImmediateInt(op1
))
185 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
186 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
187 lshift32(regT2
, regT0
);
188 emitStoreAndMapInt32(dst
, regT1
, regT0
, dst
== op1
|| dst
== op2
, OPCODE_LENGTH(op_lshift
));
191 void JIT::emitSlow_op_lshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
193 unsigned dst
= currentInstruction
[1].u
.operand
;
194 unsigned op1
= currentInstruction
[2].u
.operand
;
195 unsigned op2
= currentInstruction
[3].u
.operand
;
197 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
198 linkSlowCase(iter
); // int32 check
199 linkSlowCase(iter
); // int32 check
201 JITStubCall
stubCall(this, cti_op_lshift
);
202 stubCall
.addArgument(op1
);
203 stubCall
.addArgument(op2
);
207 // RightShift (>>) and UnsignedRightShift (>>>) helper
209 void JIT::emitRightShift(Instruction
* currentInstruction
, bool isUnsigned
)
211 unsigned dst
= currentInstruction
[1].u
.operand
;
212 unsigned op1
= currentInstruction
[2].u
.operand
;
213 unsigned op2
= currentInstruction
[3].u
.operand
;
215 // Slow case of rshift makes assumptions about what registers hold the
216 // shift arguments, so any changes must be updated there as well.
217 if (isOperandConstantImmediateInt(op2
)) {
218 emitLoad(op1
, regT1
, regT0
);
219 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
220 int shift
= getConstantOperand(op2
).asInt32() & 0x1f;
223 urshift32(Imm32(shift
), regT0
);
225 rshift32(Imm32(shift
), regT0
);
226 } else if (isUnsigned
) // signed right shift by zero is simply toInt conversion
227 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
228 emitStoreAndMapInt32(dst
, regT1
, regT0
, dst
== op1
, OPCODE_LENGTH(op_rshift
));
230 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
231 if (!isOperandConstantImmediateInt(op1
))
232 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
233 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
235 urshift32(regT2
, regT0
);
236 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
238 rshift32(regT2
, regT0
);
239 emitStoreAndMapInt32(dst
, regT1
, regT0
, dst
== op1
, OPCODE_LENGTH(op_rshift
));
243 void JIT::emitRightShiftSlowCase(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isUnsigned
)
245 unsigned dst
= currentInstruction
[1].u
.operand
;
246 unsigned op1
= currentInstruction
[2].u
.operand
;
247 unsigned op2
= currentInstruction
[3].u
.operand
;
248 if (isOperandConstantImmediateInt(op2
)) {
249 int shift
= getConstantOperand(op2
).asInt32() & 0x1f;
251 linkSlowCase(iter
); // int32 check
252 if (supportsFloatingPointTruncate()) {
254 failures
.append(branch32(AboveOrEqual
, regT1
, TrustedImm32(JSValue::LowestTag
)));
255 emitLoadDouble(op1
, fpRegT0
);
256 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
259 urshift32(Imm32(shift
), regT0
);
261 rshift32(Imm32(shift
), regT0
);
262 } else if (isUnsigned
) // signed right shift by zero is simply toInt conversion
263 failures
.append(branch32(LessThan
, regT0
, TrustedImm32(0)));
264 move(TrustedImm32(JSValue::Int32Tag
), regT1
);
265 emitStoreInt32(dst
, regT0
, false);
266 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
269 if (isUnsigned
&& !shift
)
270 linkSlowCase(iter
); // failed to box in hot path
274 if (!isOperandConstantImmediateInt(op1
)) {
275 linkSlowCase(iter
); // int32 check -- op1 is not an int
276 if (supportsFloatingPointTruncate()) {
278 failures
.append(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
))); // op1 is not a double
279 emitLoadDouble(op1
, fpRegT0
);
280 failures
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
))); // op2 is not an int
281 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
283 urshift32(regT2
, regT0
);
284 failures
.append(branch32(LessThan
, regT0
, TrustedImm32(0)));
286 rshift32(regT2
, regT0
);
287 move(TrustedImm32(JSValue::Int32Tag
), regT1
);
288 emitStoreInt32(dst
, regT0
, false);
289 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
294 linkSlowCase(iter
); // int32 check - op2 is not an int
296 linkSlowCase(iter
); // Can't represent unsigned result as an immediate
299 JITStubCall
stubCall(this, isUnsigned
? cti_op_urshift
: cti_op_rshift
);
300 stubCall
.addArgument(op1
);
301 stubCall
.addArgument(op2
);
307 void JIT::emit_op_rshift(Instruction
* currentInstruction
)
309 emitRightShift(currentInstruction
, false);
312 void JIT::emitSlow_op_rshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
314 emitRightShiftSlowCase(currentInstruction
, iter
, false);
317 // UnsignedRightShift (>>>)
319 void JIT::emit_op_urshift(Instruction
* currentInstruction
)
321 emitRightShift(currentInstruction
, true);
324 void JIT::emitSlow_op_urshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
326 emitRightShiftSlowCase(currentInstruction
, iter
, true);
331 void JIT::emit_op_bitand(Instruction
* currentInstruction
)
333 unsigned dst
= currentInstruction
[1].u
.operand
;
334 unsigned op1
= currentInstruction
[2].u
.operand
;
335 unsigned op2
= currentInstruction
[3].u
.operand
;
339 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
340 emitLoad(op
, regT1
, regT0
);
341 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
342 and32(Imm32(constant
), regT0
);
343 emitStoreAndMapInt32(dst
, regT1
, regT0
, dst
== op
, OPCODE_LENGTH(op_bitand
));
347 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
348 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
349 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
351 emitStoreAndMapInt32(dst
, regT1
, regT0
, (op1
== dst
|| op2
== dst
), OPCODE_LENGTH(op_bitand
));
354 void JIT::emitSlow_op_bitand(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
356 unsigned dst
= currentInstruction
[1].u
.operand
;
357 unsigned op1
= currentInstruction
[2].u
.operand
;
358 unsigned op2
= currentInstruction
[3].u
.operand
;
360 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
361 linkSlowCase(iter
); // int32 check
362 linkSlowCase(iter
); // int32 check
364 JITStubCall
stubCall(this, cti_op_bitand
);
365 stubCall
.addArgument(op1
);
366 stubCall
.addArgument(op2
);
372 void JIT::emit_op_bitor(Instruction
* currentInstruction
)
374 unsigned dst
= currentInstruction
[1].u
.operand
;
375 unsigned op1
= currentInstruction
[2].u
.operand
;
376 unsigned op2
= currentInstruction
[3].u
.operand
;
380 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
381 emitLoad(op
, regT1
, regT0
);
382 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
383 or32(Imm32(constant
), regT0
);
384 emitStoreAndMapInt32(dst
, regT1
, regT0
, op
== dst
, OPCODE_LENGTH(op_bitor
));
388 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
389 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
390 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
392 emitStoreAndMapInt32(dst
, regT1
, regT0
, (op1
== dst
|| op2
== dst
), OPCODE_LENGTH(op_bitor
));
395 void JIT::emitSlow_op_bitor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
397 unsigned dst
= currentInstruction
[1].u
.operand
;
398 unsigned op1
= currentInstruction
[2].u
.operand
;
399 unsigned op2
= currentInstruction
[3].u
.operand
;
401 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
402 linkSlowCase(iter
); // int32 check
403 linkSlowCase(iter
); // int32 check
405 JITStubCall
stubCall(this, cti_op_bitor
);
406 stubCall
.addArgument(op1
);
407 stubCall
.addArgument(op2
);
413 void JIT::emit_op_bitxor(Instruction
* currentInstruction
)
415 unsigned dst
= currentInstruction
[1].u
.operand
;
416 unsigned op1
= currentInstruction
[2].u
.operand
;
417 unsigned op2
= currentInstruction
[3].u
.operand
;
421 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
422 emitLoad(op
, regT1
, regT0
);
423 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
424 xor32(Imm32(constant
), regT0
);
425 emitStoreAndMapInt32(dst
, regT1
, regT0
, op
== dst
, OPCODE_LENGTH(op_bitxor
));
429 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
430 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
431 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
433 emitStoreAndMapInt32(dst
, regT1
, regT0
, (op1
== dst
|| op2
== dst
), OPCODE_LENGTH(op_bitxor
));
436 void JIT::emitSlow_op_bitxor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
438 unsigned dst
= currentInstruction
[1].u
.operand
;
439 unsigned op1
= currentInstruction
[2].u
.operand
;
440 unsigned op2
= currentInstruction
[3].u
.operand
;
442 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
443 linkSlowCase(iter
); // int32 check
444 linkSlowCase(iter
); // int32 check
446 JITStubCall
stubCall(this, cti_op_bitxor
);
447 stubCall
.addArgument(op1
);
448 stubCall
.addArgument(op2
);
452 void JIT::emit_op_inc(Instruction
* currentInstruction
)
454 unsigned srcDst
= currentInstruction
[1].u
.operand
;
456 emitLoad(srcDst
, regT1
, regT0
);
458 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
459 addSlowCase(branchAdd32(Overflow
, TrustedImm32(1), regT0
));
460 emitStoreAndMapInt32(srcDst
, regT1
, regT0
, true, OPCODE_LENGTH(op_inc
));
463 void JIT::emitSlow_op_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
465 unsigned srcDst
= currentInstruction
[1].u
.operand
;
467 linkSlowCase(iter
); // int32 check
468 linkSlowCase(iter
); // overflow check
470 JITStubCall
stubCall(this, cti_op_inc
);
471 stubCall
.addArgument(srcDst
);
472 stubCall
.call(srcDst
);
475 void JIT::emit_op_dec(Instruction
* currentInstruction
)
477 unsigned srcDst
= currentInstruction
[1].u
.operand
;
479 emitLoad(srcDst
, regT1
, regT0
);
481 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
482 addSlowCase(branchSub32(Overflow
, TrustedImm32(1), regT0
));
483 emitStoreAndMapInt32(srcDst
, regT1
, regT0
, true, OPCODE_LENGTH(op_dec
));
486 void JIT::emitSlow_op_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
488 unsigned srcDst
= currentInstruction
[1].u
.operand
;
490 linkSlowCase(iter
); // int32 check
491 linkSlowCase(iter
); // overflow check
493 JITStubCall
stubCall(this, cti_op_dec
);
494 stubCall
.addArgument(srcDst
);
495 stubCall
.call(srcDst
);
500 void JIT::emit_op_add(Instruction
* currentInstruction
)
502 unsigned dst
= currentInstruction
[1].u
.operand
;
503 unsigned op1
= currentInstruction
[2].u
.operand
;
504 unsigned op2
= currentInstruction
[3].u
.operand
;
505 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
507 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
509 JITStubCall
stubCall(this, cti_op_add
);
510 stubCall
.addArgument(op1
);
511 stubCall
.addArgument(op2
);
516 JumpList notInt32Op1
;
517 JumpList notInt32Op2
;
521 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
522 emitAdd32Constant(dst
, op
, constant
, op
== op1
? types
.first() : types
.second());
526 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
527 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
528 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
531 addSlowCase(branchAdd32(Overflow
, regT2
, regT0
));
532 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
534 if (!supportsFloatingPoint()) {
535 addSlowCase(notInt32Op1
);
536 addSlowCase(notInt32Op2
);
542 emitBinaryDoubleOp(op_add
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
546 void JIT::emitAdd32Constant(unsigned dst
, unsigned op
, int32_t constant
, ResultType opType
)
549 emitLoad(op
, regT1
, regT2
);
550 Jump notInt32
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
551 addSlowCase(branchAdd32(Overflow
, regT2
, Imm32(constant
), regT0
));
552 emitStoreInt32(dst
, regT0
, (op
== dst
));
555 if (!supportsFloatingPoint()) {
556 addSlowCase(notInt32
);
562 if (!opType
.definitelyIsNumber())
563 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
564 move(Imm32(constant
), regT2
);
565 convertInt32ToDouble(regT2
, fpRegT0
);
566 emitLoadDouble(op
, fpRegT1
);
567 addDouble(fpRegT1
, fpRegT0
);
568 emitStoreDouble(dst
, fpRegT0
);
573 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
575 unsigned dst
= currentInstruction
[1].u
.operand
;
576 unsigned op1
= currentInstruction
[2].u
.operand
;
577 unsigned op2
= currentInstruction
[3].u
.operand
;
578 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
580 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
581 linkDummySlowCase(iter
);
587 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
588 linkSlowCase(iter
); // overflow check
590 if (!supportsFloatingPoint())
591 linkSlowCase(iter
); // non-sse case
593 ResultType opType
= op
== op1
? types
.first() : types
.second();
594 if (!opType
.definitelyIsNumber())
595 linkSlowCase(iter
); // double check
598 linkSlowCase(iter
); // overflow check
600 if (!supportsFloatingPoint()) {
601 linkSlowCase(iter
); // int32 check
602 linkSlowCase(iter
); // int32 check
604 if (!types
.first().definitelyIsNumber())
605 linkSlowCase(iter
); // double check
607 if (!types
.second().definitelyIsNumber()) {
608 linkSlowCase(iter
); // int32 check
609 linkSlowCase(iter
); // double check
614 JITStubCall
stubCall(this, cti_op_add
);
615 stubCall
.addArgument(op1
);
616 stubCall
.addArgument(op2
);
622 void JIT::emit_op_sub(Instruction
* currentInstruction
)
624 unsigned dst
= currentInstruction
[1].u
.operand
;
625 unsigned op1
= currentInstruction
[2].u
.operand
;
626 unsigned op2
= currentInstruction
[3].u
.operand
;
627 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
629 JumpList notInt32Op1
;
630 JumpList notInt32Op2
;
632 if (isOperandConstantImmediateInt(op2
)) {
633 emitSub32Constant(dst
, op1
, getConstantOperand(op2
).asInt32(), types
.first());
637 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
638 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
639 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
642 addSlowCase(branchSub32(Overflow
, regT2
, regT0
));
643 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
645 if (!supportsFloatingPoint()) {
646 addSlowCase(notInt32Op1
);
647 addSlowCase(notInt32Op2
);
653 emitBinaryDoubleOp(op_sub
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
657 void JIT::emitSub32Constant(unsigned dst
, unsigned op
, int32_t constant
, ResultType opType
)
660 emitLoad(op
, regT1
, regT0
);
661 Jump notInt32
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
662 #if ENABLE(JIT_CONSTANT_BLINDING)
663 addSlowCase(branchSub32(Overflow
, regT0
, Imm32(constant
), regT2
, regT3
));
665 addSlowCase(branchSub32(Overflow
, regT0
, Imm32(constant
), regT2
));
668 emitStoreInt32(dst
, regT2
, (op
== dst
));
671 if (!supportsFloatingPoint()) {
672 addSlowCase(notInt32
);
678 if (!opType
.definitelyIsNumber())
679 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
680 move(Imm32(constant
), regT2
);
681 convertInt32ToDouble(regT2
, fpRegT0
);
682 emitLoadDouble(op
, fpRegT1
);
683 subDouble(fpRegT0
, fpRegT1
);
684 emitStoreDouble(dst
, fpRegT1
);
689 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
691 unsigned dst
= currentInstruction
[1].u
.operand
;
692 unsigned op1
= currentInstruction
[2].u
.operand
;
693 unsigned op2
= currentInstruction
[3].u
.operand
;
694 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
696 if (isOperandConstantImmediateInt(op2
)) {
697 linkSlowCase(iter
); // overflow check
699 if (!supportsFloatingPoint() || !types
.first().definitelyIsNumber())
700 linkSlowCase(iter
); // int32 or double check
702 linkSlowCase(iter
); // overflow check
704 if (!supportsFloatingPoint()) {
705 linkSlowCase(iter
); // int32 check
706 linkSlowCase(iter
); // int32 check
708 if (!types
.first().definitelyIsNumber())
709 linkSlowCase(iter
); // double check
711 if (!types
.second().definitelyIsNumber()) {
712 linkSlowCase(iter
); // int32 check
713 linkSlowCase(iter
); // double check
718 JITStubCall
stubCall(this, cti_op_sub
);
719 stubCall
.addArgument(op1
);
720 stubCall
.addArgument(op2
);
724 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID
, unsigned dst
, unsigned op1
, unsigned op2
, OperandTypes types
, JumpList
& notInt32Op1
, JumpList
& notInt32Op2
, bool op1IsInRegisters
, bool op2IsInRegisters
)
728 if (!notInt32Op1
.empty()) {
729 // Double case 1: Op1 is not int32; Op2 is unknown.
730 notInt32Op1
.link(this);
732 ASSERT(op1IsInRegisters
);
734 // Verify Op1 is double.
735 if (!types
.first().definitelyIsNumber())
736 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
738 if (!op2IsInRegisters
)
739 emitLoad(op2
, regT3
, regT2
);
741 Jump doubleOp2
= branch32(Below
, regT3
, TrustedImm32(JSValue::LowestTag
));
743 if (!types
.second().definitelyIsNumber())
744 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
746 convertInt32ToDouble(regT2
, fpRegT0
);
747 Jump doTheMath
= jump();
749 // Load Op2 as double into double register.
750 doubleOp2
.link(this);
751 emitLoadDouble(op2
, fpRegT0
);
754 doTheMath
.link(this);
757 emitLoadDouble(op1
, fpRegT2
);
758 mulDouble(fpRegT2
, fpRegT0
);
759 emitStoreDouble(dst
, fpRegT0
);
762 emitLoadDouble(op1
, fpRegT2
);
763 addDouble(fpRegT2
, fpRegT0
);
764 emitStoreDouble(dst
, fpRegT0
);
767 emitLoadDouble(op1
, fpRegT1
);
768 subDouble(fpRegT0
, fpRegT1
);
769 emitStoreDouble(dst
, fpRegT1
);
772 emitLoadDouble(op1
, fpRegT1
);
773 divDouble(fpRegT0
, fpRegT1
);
775 #if ENABLE(VALUE_PROFILER)
776 // Is the result actually an integer? The DFG JIT would really like to know. If it's
777 // not an integer, we increment a count. If this together with the slow case counter
778 // are below threshold then the DFG JIT will compile this division with a specualtion
779 // that the remainder is zero.
781 // As well, there are cases where a double result here would cause an important field
782 // in the heap to sometimes have doubles in it, resulting in double predictions getting
783 // propagated to a use site where it might cause damage (such as the index to an array
784 // access). So if we are DFG compiling anything in the program, we want this code to
785 // ensure that it produces integers whenever possible.
787 // FIXME: This will fail to convert to integer if the result is zero. We should
788 // distinguish between positive zero and negative zero here.
791 branchConvertDoubleToInt32(fpRegT1
, regT2
, notInteger
, fpRegT0
);
792 // If we've got an integer, we might as well make that the result of the division.
793 emitStoreInt32(dst
, regT2
);
794 Jump isInteger
= jump();
795 notInteger
.link(this);
796 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
797 emitStoreDouble(dst
, fpRegT1
);
798 isInteger
.link(this);
800 emitStoreDouble(dst
, fpRegT1
);
805 emitLoadDouble(op1
, fpRegT2
);
806 addJump(branchDouble(DoubleLessThan
, fpRegT2
, fpRegT0
), dst
);
809 emitLoadDouble(op1
, fpRegT2
);
810 addJump(branchDouble(DoubleLessThanOrEqual
, fpRegT2
, fpRegT0
), dst
);
813 emitLoadDouble(op1
, fpRegT2
);
814 addJump(branchDouble(DoubleGreaterThan
, fpRegT2
, fpRegT0
), dst
);
817 emitLoadDouble(op1
, fpRegT2
);
818 addJump(branchDouble(DoubleGreaterThanOrEqual
, fpRegT2
, fpRegT0
), dst
);
821 emitLoadDouble(op1
, fpRegT2
);
822 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT0
, fpRegT2
), dst
);
825 emitLoadDouble(op1
, fpRegT2
);
826 addJump(branchDouble(DoubleLessThanOrUnordered
, fpRegT0
, fpRegT2
), dst
);
829 emitLoadDouble(op1
, fpRegT2
);
830 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered
, fpRegT0
, fpRegT2
), dst
);
833 emitLoadDouble(op1
, fpRegT2
);
834 addJump(branchDouble(DoubleGreaterThanOrUnordered
, fpRegT0
, fpRegT2
), dst
);
837 RELEASE_ASSERT_NOT_REACHED();
840 if (!notInt32Op2
.empty())
844 if (!notInt32Op2
.empty()) {
845 // Double case 2: Op1 is int32; Op2 is not int32.
846 notInt32Op2
.link(this);
848 ASSERT(op2IsInRegisters
);
850 if (!op1IsInRegisters
)
851 emitLoadPayload(op1
, regT0
);
853 convertInt32ToDouble(regT0
, fpRegT0
);
855 // Verify op2 is double.
856 if (!types
.second().definitelyIsNumber())
857 addSlowCase(branch32(Above
, regT3
, TrustedImm32(JSValue::LowestTag
)));
862 emitLoadDouble(op2
, fpRegT2
);
863 mulDouble(fpRegT2
, fpRegT0
);
864 emitStoreDouble(dst
, fpRegT0
);
867 emitLoadDouble(op2
, fpRegT2
);
868 addDouble(fpRegT2
, fpRegT0
);
869 emitStoreDouble(dst
, fpRegT0
);
872 emitLoadDouble(op2
, fpRegT2
);
873 subDouble(fpRegT2
, fpRegT0
);
874 emitStoreDouble(dst
, fpRegT0
);
877 emitLoadDouble(op2
, fpRegT2
);
878 divDouble(fpRegT2
, fpRegT0
);
879 #if ENABLE(VALUE_PROFILER)
880 // Is the result actually an integer? The DFG JIT would really like to know. If it's
881 // not an integer, we increment a count. If this together with the slow case counter
882 // are below threshold then the DFG JIT will compile this division with a specualtion
883 // that the remainder is zero.
885 // As well, there are cases where a double result here would cause an important field
886 // in the heap to sometimes have doubles in it, resulting in double predictions getting
887 // propagated to a use site where it might cause damage (such as the index to an array
888 // access). So if we are DFG compiling anything in the program, we want this code to
889 // ensure that it produces integers whenever possible.
891 // FIXME: This will fail to convert to integer if the result is zero. We should
892 // distinguish between positive zero and negative zero here.
895 branchConvertDoubleToInt32(fpRegT0
, regT2
, notInteger
, fpRegT1
);
896 // If we've got an integer, we might as well make that the result of the division.
897 emitStoreInt32(dst
, regT2
);
898 Jump isInteger
= jump();
899 notInteger
.link(this);
900 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
901 emitStoreDouble(dst
, fpRegT0
);
902 isInteger
.link(this);
904 emitStoreDouble(dst
, fpRegT0
);
909 emitLoadDouble(op2
, fpRegT1
);
910 addJump(branchDouble(DoubleLessThan
, fpRegT0
, fpRegT1
), dst
);
913 emitLoadDouble(op2
, fpRegT1
);
914 addJump(branchDouble(DoubleLessThanOrEqual
, fpRegT0
, fpRegT1
), dst
);
917 emitLoadDouble(op2
, fpRegT1
);
918 addJump(branchDouble(DoubleGreaterThan
, fpRegT0
, fpRegT1
), dst
);
921 emitLoadDouble(op2
, fpRegT1
);
922 addJump(branchDouble(DoubleGreaterThanOrEqual
, fpRegT0
, fpRegT1
), dst
);
925 emitLoadDouble(op2
, fpRegT1
);
926 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), dst
);
929 emitLoadDouble(op2
, fpRegT1
);
930 addJump(branchDouble(DoubleLessThanOrUnordered
, fpRegT1
, fpRegT0
), dst
);
933 emitLoadDouble(op2
, fpRegT1
);
934 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), dst
);
937 emitLoadDouble(op2
, fpRegT1
);
938 addJump(branchDouble(DoubleGreaterThanOrUnordered
, fpRegT1
, fpRegT0
), dst
);
941 RELEASE_ASSERT_NOT_REACHED();
948 // Multiplication (*)
950 void JIT::emit_op_mul(Instruction
* currentInstruction
)
952 unsigned dst
= currentInstruction
[1].u
.operand
;
953 unsigned op1
= currentInstruction
[2].u
.operand
;
954 unsigned op2
= currentInstruction
[3].u
.operand
;
955 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
957 #if ENABLE(VALUE_PROFILER)
958 m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
961 JumpList notInt32Op1
;
962 JumpList notInt32Op2
;
964 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
965 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
966 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
970 addSlowCase(branchMul32(Overflow
, regT2
, regT0
));
971 addSlowCase(branchTest32(Zero
, regT0
));
972 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
974 if (!supportsFloatingPoint()) {
975 addSlowCase(notInt32Op1
);
976 addSlowCase(notInt32Op2
);
982 emitBinaryDoubleOp(op_mul
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
986 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
988 unsigned dst
= currentInstruction
[1].u
.operand
;
989 unsigned op1
= currentInstruction
[2].u
.operand
;
990 unsigned op2
= currentInstruction
[3].u
.operand
;
991 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
993 Jump overflow
= getSlowCase(iter
); // overflow check
994 linkSlowCase(iter
); // zero result check
996 Jump negZero
= branchOr32(Signed
, regT2
, regT3
);
997 emitStoreInt32(dst
, TrustedImm32(0), (op1
== dst
|| op2
== dst
));
999 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul
));
1002 #if ENABLE(VALUE_PROFILER)
1003 // We only get here if we have a genuine negative zero. Record this,
1004 // so that the speculative JIT knows that we failed speculation
1005 // because of a negative zero.
1006 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
1008 overflow
.link(this);
1010 if (!supportsFloatingPoint()) {
1011 linkSlowCase(iter
); // int32 check
1012 linkSlowCase(iter
); // int32 check
1015 if (supportsFloatingPoint()) {
1016 if (!types
.first().definitelyIsNumber())
1017 linkSlowCase(iter
); // double check
1019 if (!types
.second().definitelyIsNumber()) {
1020 linkSlowCase(iter
); // int32 check
1021 linkSlowCase(iter
); // double check
1025 Label
jitStubCall(this);
1026 JITStubCall
stubCall(this, cti_op_mul
);
1027 stubCall
.addArgument(op1
);
1028 stubCall
.addArgument(op2
);
1034 void JIT::emit_op_div(Instruction
* currentInstruction
)
1036 unsigned dst
= currentInstruction
[1].u
.operand
;
1037 unsigned op1
= currentInstruction
[2].u
.operand
;
1038 unsigned op2
= currentInstruction
[3].u
.operand
;
1039 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1041 #if ENABLE(VALUE_PROFILER)
1042 m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
1045 if (!supportsFloatingPoint()) {
1046 addSlowCase(jump());
1051 JumpList notInt32Op1
;
1052 JumpList notInt32Op2
;
1056 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
1058 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1059 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
1061 convertInt32ToDouble(regT0
, fpRegT0
);
1062 convertInt32ToDouble(regT2
, fpRegT1
);
1063 divDouble(fpRegT1
, fpRegT0
);
1064 #if ENABLE(VALUE_PROFILER)
1065 // Is the result actually an integer? The DFG JIT would really like to know. If it's
1066 // not an integer, we increment a count. If this together with the slow case counter
1067 // are below threshold then the DFG JIT will compile this division with a specualtion
1068 // that the remainder is zero.
1070 // As well, there are cases where a double result here would cause an important field
1071 // in the heap to sometimes have doubles in it, resulting in double predictions getting
1072 // propagated to a use site where it might cause damage (such as the index to an array
1073 // access). So if we are DFG compiling anything in the program, we want this code to
1074 // ensure that it produces integers whenever possible.
1076 // FIXME: This will fail to convert to integer if the result is zero. We should
1077 // distinguish between positive zero and negative zero here.
1079 JumpList notInteger
;
1080 branchConvertDoubleToInt32(fpRegT0
, regT2
, notInteger
, fpRegT1
);
1081 // If we've got an integer, we might as well make that the result of the division.
1082 emitStoreInt32(dst
, regT2
);
1084 notInteger
.link(this);
1085 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
1086 emitStoreDouble(dst
, fpRegT0
);
1088 emitStoreDouble(dst
, fpRegT0
);
1093 emitBinaryDoubleOp(op_div
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
1097 void JIT::emitSlow_op_div(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1099 unsigned dst
= currentInstruction
[1].u
.operand
;
1100 unsigned op1
= currentInstruction
[2].u
.operand
;
1101 unsigned op2
= currentInstruction
[3].u
.operand
;
1102 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1104 if (!supportsFloatingPoint())
1107 if (!types
.first().definitelyIsNumber())
1108 linkSlowCase(iter
); // double check
1110 if (!types
.second().definitelyIsNumber()) {
1111 linkSlowCase(iter
); // int32 check
1112 linkSlowCase(iter
); // double check
1116 JITStubCall
stubCall(this, cti_op_div
);
1117 stubCall
.addArgument(op1
);
1118 stubCall
.addArgument(op2
);
1124 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1126 void JIT::emit_op_mod(Instruction
* currentInstruction
)
1128 unsigned dst
= currentInstruction
[1].u
.operand
;
1129 unsigned op1
= currentInstruction
[2].u
.operand
;
1130 unsigned op2
= currentInstruction
[3].u
.operand
;
1132 #if CPU(X86) || CPU(X86_64)
1133 // Make sure registers are correct for x86 IDIV instructions.
1134 ASSERT(regT0
== X86Registers::eax
);
1135 ASSERT(regT1
== X86Registers::edx
);
1136 ASSERT(regT2
== X86Registers::ecx
);
1137 ASSERT(regT3
== X86Registers::ebx
);
1139 emitLoad2(op1
, regT0
, regT3
, op2
, regT1
, regT2
);
1140 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1141 addSlowCase(branch32(NotEqual
, regT0
, TrustedImm32(JSValue::Int32Tag
)));
1144 addSlowCase(branchTest32(Zero
, regT2
));
1145 Jump denominatorNotNeg1
= branch32(NotEqual
, regT2
, TrustedImm32(-1));
1146 addSlowCase(branch32(Equal
, regT0
, TrustedImm32(-2147483647-1)));
1147 denominatorNotNeg1
.link(this);
1149 m_assembler
.idivl_r(regT2
);
1150 Jump numeratorPositive
= branch32(GreaterThanOrEqual
, regT3
, TrustedImm32(0));
1151 addSlowCase(branchTest32(Zero
, regT1
));
1152 numeratorPositive
.link(this);
1153 emitStoreInt32(dst
, regT1
, (op1
== dst
|| op2
== dst
));
1155 JITStubCall
stubCall(this, cti_op_mod
);
1156 stubCall
.addArgument(op1
);
1157 stubCall
.addArgument(op2
);
1162 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1164 #if CPU(X86) || CPU(X86_64)
1165 unsigned result
= currentInstruction
[1].u
.operand
;
1166 unsigned op1
= currentInstruction
[2].u
.operand
;
1167 unsigned op2
= currentInstruction
[3].u
.operand
;
1173 JITStubCall
stubCall(this, cti_op_mod
);
1174 stubCall
.addArgument(op1
);
1175 stubCall
.addArgument(op2
);
1176 stubCall
.call(result
);
1178 UNUSED_PARAM(currentInstruction
);
1180 // We would have really useful assertions here if it wasn't for the compiler's
1181 // insistence on attribute noreturn.
1182 // RELEASE_ASSERT_NOT_REACHED();
1186 /* ------------------------------ END: OP_MOD ------------------------------ */
1190 #endif // USE(JSVALUE32_64)
1191 #endif // ENABLE(JIT)