2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "CodeBlock.h"
33 #include "JITInlines.h"
36 #include "JSFunction.h"
37 #include "Interpreter.h"
38 #include "JSCInlines.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
41 #include "SlowPathCall.h"
46 void JIT::emit_op_negate(Instruction
* currentInstruction
)
48 int dst
= currentInstruction
[1].u
.operand
;
49 int src
= currentInstruction
[2].u
.operand
;
51 emitLoad(src
, regT1
, regT0
);
53 Jump srcNotInt
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
54 addSlowCase(branchTest32(Zero
, regT0
, TrustedImm32(0x7fffffff)));
56 emitStoreInt32(dst
, regT0
, (dst
== src
));
61 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
63 xor32(TrustedImm32(1 << 31), regT1
);
64 store32(regT1
, tagFor(dst
));
66 store32(regT0
, payloadFor(dst
));
71 void JIT::emitSlow_op_negate(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
73 linkSlowCase(iter
); // 0x7fffffff check
74 linkSlowCase(iter
); // double check
76 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_negate
);
80 void JIT::emit_compareAndJump(OpcodeID opcode
, int op1
, int op2
, unsigned target
, RelationalCondition condition
)
86 if (isOperandConstantImmediateChar(op1
)) {
87 emitLoad(op2
, regT1
, regT0
);
88 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
90 emitLoadCharacterString(regT0
, regT0
, failures
);
91 addSlowCase(failures
);
92 addJump(branch32(commute(condition
), regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
95 if (isOperandConstantImmediateChar(op2
)) {
96 emitLoad(op1
, regT1
, regT0
);
97 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
99 emitLoadCharacterString(regT0
, regT0
, failures
);
100 addSlowCase(failures
);
101 addJump(branch32(condition
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
104 if (isOperandConstantImmediateInt(op1
)) {
105 emitLoad(op2
, regT3
, regT2
);
106 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
107 addJump(branch32(commute(condition
), regT2
, Imm32(getConstantOperand(op1
).asInt32())), target
);
108 } else if (isOperandConstantImmediateInt(op2
)) {
109 emitLoad(op1
, regT1
, regT0
);
110 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
111 addJump(branch32(condition
, regT0
, Imm32(getConstantOperand(op2
).asInt32())), target
);
113 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
114 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
115 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
116 addJump(branch32(condition
, regT0
, regT2
), target
);
119 if (!supportsFloatingPoint()) {
120 addSlowCase(notInt32Op1
);
121 addSlowCase(notInt32Op2
);
127 emitBinaryDoubleOp(opcode
, target
, op1
, op2
, OperandTypes(), notInt32Op1
, notInt32Op2
, !isOperandConstantImmediateInt(op1
), isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
));
131 void JIT::emit_compareAndJumpSlow(int op1
, int op2
, unsigned target
, DoubleCondition
, size_t (JIT_OPERATION
*operation
)(ExecState
*, EncodedJSValue
, EncodedJSValue
), bool invert
, Vector
<SlowCaseEntry
>::iterator
& iter
)
133 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
139 if (!supportsFloatingPoint()) {
140 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
141 linkSlowCase(iter
); // int32 check
142 linkSlowCase(iter
); // int32 check
144 if (!isOperandConstantImmediateInt(op1
)) {
145 linkSlowCase(iter
); // double check
146 linkSlowCase(iter
); // int32 check
148 if (isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
))
149 linkSlowCase(iter
); // double check
152 emitLoad(op1
, regT1
, regT0
);
153 emitLoad(op2
, regT3
, regT2
);
154 callOperation(operation
, regT1
, regT0
, regT3
, regT2
);
155 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, returnValueGPR
), target
);
160 void JIT::emit_op_lshift(Instruction
* currentInstruction
)
162 int dst
= currentInstruction
[1].u
.operand
;
163 int op1
= currentInstruction
[2].u
.operand
;
164 int op2
= currentInstruction
[3].u
.operand
;
166 if (isOperandConstantImmediateInt(op2
)) {
167 emitLoad(op1
, regT1
, regT0
);
168 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
169 lshift32(Imm32(getConstantOperand(op2
).asInt32()), regT0
);
170 emitStoreInt32(dst
, regT0
, dst
== op1
);
174 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
175 if (!isOperandConstantImmediateInt(op1
))
176 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
177 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
178 lshift32(regT2
, regT0
);
179 emitStoreInt32(dst
, regT0
, dst
== op1
|| dst
== op2
);
182 void JIT::emitSlow_op_lshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
184 int op1
= currentInstruction
[2].u
.operand
;
185 int op2
= currentInstruction
[3].u
.operand
;
187 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
188 linkSlowCase(iter
); // int32 check
189 linkSlowCase(iter
); // int32 check
191 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_lshift
);
195 // RightShift (>>) and UnsignedRightShift (>>>) helper
197 void JIT::emitRightShift(Instruction
* currentInstruction
, bool isUnsigned
)
199 int dst
= currentInstruction
[1].u
.operand
;
200 int op1
= currentInstruction
[2].u
.operand
;
201 int op2
= currentInstruction
[3].u
.operand
;
203 // Slow case of rshift makes assumptions about what registers hold the
204 // shift arguments, so any changes must be updated there as well.
205 if (isOperandConstantImmediateInt(op2
)) {
206 emitLoad(op1
, regT1
, regT0
);
207 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
208 int shift
= getConstantOperand(op2
).asInt32() & 0x1f;
211 urshift32(Imm32(shift
), regT0
);
213 rshift32(Imm32(shift
), regT0
);
215 emitStoreInt32(dst
, regT0
, dst
== op1
);
217 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
218 if (!isOperandConstantImmediateInt(op1
))
219 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
220 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
222 urshift32(regT2
, regT0
);
224 rshift32(regT2
, regT0
);
225 emitStoreInt32(dst
, regT0
, dst
== op1
);
229 void JIT::emitRightShiftSlowCase(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isUnsigned
)
231 int dst
= currentInstruction
[1].u
.operand
;
232 int op1
= currentInstruction
[2].u
.operand
;
233 int op2
= currentInstruction
[3].u
.operand
;
234 if (isOperandConstantImmediateInt(op2
)) {
235 int shift
= getConstantOperand(op2
).asInt32() & 0x1f;
237 linkSlowCase(iter
); // int32 check
238 if (supportsFloatingPointTruncate()) {
240 failures
.append(branch32(AboveOrEqual
, regT1
, TrustedImm32(JSValue::LowestTag
)));
241 emitLoadDouble(op1
, fpRegT0
);
242 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
245 urshift32(Imm32(shift
), regT0
);
247 rshift32(Imm32(shift
), regT0
);
249 move(TrustedImm32(JSValue::Int32Tag
), regT1
);
250 emitStoreInt32(dst
, regT0
, false);
251 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
257 if (!isOperandConstantImmediateInt(op1
)) {
258 linkSlowCase(iter
); // int32 check -- op1 is not an int
259 if (supportsFloatingPointTruncate()) {
261 failures
.append(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
))); // op1 is not a double
262 emitLoadDouble(op1
, fpRegT0
);
263 failures
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
))); // op2 is not an int
264 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
266 urshift32(regT2
, regT0
);
268 rshift32(regT2
, regT0
);
269 move(TrustedImm32(JSValue::Int32Tag
), regT1
);
270 emitStoreInt32(dst
, regT0
, false);
271 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
276 linkSlowCase(iter
); // int32 check - op2 is not an int
279 JITSlowPathCall
slowPathCall(this, currentInstruction
, isUnsigned
? slow_path_urshift
: slow_path_rshift
);
285 void JIT::emit_op_rshift(Instruction
* currentInstruction
)
287 emitRightShift(currentInstruction
, false);
290 void JIT::emitSlow_op_rshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
292 emitRightShiftSlowCase(currentInstruction
, iter
, false);
295 // UnsignedRightShift (>>>)
297 void JIT::emit_op_urshift(Instruction
* currentInstruction
)
299 emitRightShift(currentInstruction
, true);
302 void JIT::emitSlow_op_urshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
304 emitRightShiftSlowCase(currentInstruction
, iter
, true);
307 void JIT::emit_op_unsigned(Instruction
* currentInstruction
)
309 int result
= currentInstruction
[1].u
.operand
;
310 int op1
= currentInstruction
[2].u
.operand
;
312 emitLoad(op1
, regT1
, regT0
);
314 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
315 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
316 emitStoreInt32(result
, regT0
, result
== op1
);
319 void JIT::emitSlow_op_unsigned(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
324 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_unsigned
);
330 void JIT::emit_op_bitand(Instruction
* currentInstruction
)
332 int dst
= currentInstruction
[1].u
.operand
;
333 int op1
= currentInstruction
[2].u
.operand
;
334 int op2
= currentInstruction
[3].u
.operand
;
338 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
339 emitLoad(op
, regT1
, regT0
);
340 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
341 and32(Imm32(constant
), regT0
);
342 emitStoreInt32(dst
, regT0
, dst
== op
);
346 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
347 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
348 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
350 emitStoreInt32(dst
, regT0
, op1
== dst
|| op2
== dst
);
353 void JIT::emitSlow_op_bitand(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
355 int op1
= currentInstruction
[2].u
.operand
;
356 int op2
= currentInstruction
[3].u
.operand
;
358 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
359 linkSlowCase(iter
); // int32 check
360 linkSlowCase(iter
); // int32 check
362 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_bitand
);
368 void JIT::emit_op_bitor(Instruction
* currentInstruction
)
370 int dst
= currentInstruction
[1].u
.operand
;
371 int op1
= currentInstruction
[2].u
.operand
;
372 int op2
= currentInstruction
[3].u
.operand
;
376 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
377 emitLoad(op
, regT1
, regT0
);
378 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
379 or32(Imm32(constant
), regT0
);
380 emitStoreInt32(dst
, regT0
, op
== dst
);
384 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
385 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
386 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
388 emitStoreInt32(dst
, regT0
, op1
== dst
|| op2
== dst
);
391 void JIT::emitSlow_op_bitor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
393 int op1
= currentInstruction
[2].u
.operand
;
394 int op2
= currentInstruction
[3].u
.operand
;
396 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
397 linkSlowCase(iter
); // int32 check
398 linkSlowCase(iter
); // int32 check
400 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_bitor
);
406 void JIT::emit_op_bitxor(Instruction
* currentInstruction
)
408 int dst
= currentInstruction
[1].u
.operand
;
409 int op1
= currentInstruction
[2].u
.operand
;
410 int op2
= currentInstruction
[3].u
.operand
;
414 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
415 emitLoad(op
, regT1
, regT0
);
416 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
417 xor32(Imm32(constant
), regT0
);
418 emitStoreInt32(dst
, regT0
, op
== dst
);
422 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
423 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
424 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
426 emitStoreInt32(dst
, regT0
, op1
== dst
|| op2
== dst
);
429 void JIT::emitSlow_op_bitxor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
431 int op1
= currentInstruction
[2].u
.operand
;
432 int op2
= currentInstruction
[3].u
.operand
;
434 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
435 linkSlowCase(iter
); // int32 check
436 linkSlowCase(iter
); // int32 check
438 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_bitxor
);
442 void JIT::emit_op_inc(Instruction
* currentInstruction
)
444 int srcDst
= currentInstruction
[1].u
.operand
;
446 emitLoad(srcDst
, regT1
, regT0
);
448 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
449 addSlowCase(branchAdd32(Overflow
, TrustedImm32(1), regT0
));
450 emitStoreInt32(srcDst
, regT0
, true);
453 void JIT::emitSlow_op_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
455 linkSlowCase(iter
); // int32 check
456 linkSlowCase(iter
); // overflow check
458 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_inc
);
462 void JIT::emit_op_dec(Instruction
* currentInstruction
)
464 int srcDst
= currentInstruction
[1].u
.operand
;
466 emitLoad(srcDst
, regT1
, regT0
);
468 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
469 addSlowCase(branchSub32(Overflow
, TrustedImm32(1), regT0
));
470 emitStoreInt32(srcDst
, regT0
, true);
473 void JIT::emitSlow_op_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
475 linkSlowCase(iter
); // int32 check
476 linkSlowCase(iter
); // overflow check
478 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_dec
);
484 void JIT::emit_op_add(Instruction
* currentInstruction
)
486 int dst
= currentInstruction
[1].u
.operand
;
487 int op1
= currentInstruction
[2].u
.operand
;
488 int op2
= currentInstruction
[3].u
.operand
;
489 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
491 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
493 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_add
);
498 JumpList notInt32Op1
;
499 JumpList notInt32Op2
;
503 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
504 emitAdd32Constant(dst
, op
, constant
, op
== op1
? types
.first() : types
.second());
508 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
509 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
510 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
513 addSlowCase(branchAdd32(Overflow
, regT2
, regT0
));
514 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
516 if (!supportsFloatingPoint()) {
517 addSlowCase(notInt32Op1
);
518 addSlowCase(notInt32Op2
);
524 emitBinaryDoubleOp(op_add
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
528 void JIT::emitAdd32Constant(int dst
, int op
, int32_t constant
, ResultType opType
)
531 emitLoad(op
, regT1
, regT2
);
532 Jump notInt32
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
533 addSlowCase(branchAdd32(Overflow
, regT2
, Imm32(constant
), regT0
));
534 emitStoreInt32(dst
, regT0
, (op
== dst
));
537 if (!supportsFloatingPoint()) {
538 addSlowCase(notInt32
);
544 if (!opType
.definitelyIsNumber())
545 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
546 move(Imm32(constant
), regT2
);
547 convertInt32ToDouble(regT2
, fpRegT0
);
548 emitLoadDouble(op
, fpRegT1
);
549 addDouble(fpRegT1
, fpRegT0
);
550 emitStoreDouble(dst
, fpRegT0
);
555 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
557 int op1
= currentInstruction
[2].u
.operand
;
558 int op2
= currentInstruction
[3].u
.operand
;
559 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
561 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
562 linkDummySlowCase(iter
);
568 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
569 linkSlowCase(iter
); // overflow check
571 if (!supportsFloatingPoint())
572 linkSlowCase(iter
); // non-sse case
574 ResultType opType
= op
== op1
? types
.first() : types
.second();
575 if (!opType
.definitelyIsNumber())
576 linkSlowCase(iter
); // double check
579 linkSlowCase(iter
); // overflow check
581 if (!supportsFloatingPoint()) {
582 linkSlowCase(iter
); // int32 check
583 linkSlowCase(iter
); // int32 check
585 if (!types
.first().definitelyIsNumber())
586 linkSlowCase(iter
); // double check
588 if (!types
.second().definitelyIsNumber()) {
589 linkSlowCase(iter
); // int32 check
590 linkSlowCase(iter
); // double check
595 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_add
);
601 void JIT::emit_op_sub(Instruction
* currentInstruction
)
603 int dst
= currentInstruction
[1].u
.operand
;
604 int op1
= currentInstruction
[2].u
.operand
;
605 int op2
= currentInstruction
[3].u
.operand
;
606 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
608 JumpList notInt32Op1
;
609 JumpList notInt32Op2
;
611 if (isOperandConstantImmediateInt(op2
)) {
612 emitSub32Constant(dst
, op1
, getConstantOperand(op2
).asInt32(), types
.first());
616 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
617 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
618 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
621 addSlowCase(branchSub32(Overflow
, regT2
, regT0
));
622 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
624 if (!supportsFloatingPoint()) {
625 addSlowCase(notInt32Op1
);
626 addSlowCase(notInt32Op2
);
632 emitBinaryDoubleOp(op_sub
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
636 void JIT::emitSub32Constant(int dst
, int op
, int32_t constant
, ResultType opType
)
639 emitLoad(op
, regT1
, regT0
);
640 Jump notInt32
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
641 addSlowCase(branchSub32(Overflow
, regT0
, Imm32(constant
), regT2
, regT3
));
642 emitStoreInt32(dst
, regT2
, (op
== dst
));
645 if (!supportsFloatingPoint()) {
646 addSlowCase(notInt32
);
652 if (!opType
.definitelyIsNumber())
653 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
654 move(Imm32(constant
), regT2
);
655 convertInt32ToDouble(regT2
, fpRegT0
);
656 emitLoadDouble(op
, fpRegT1
);
657 subDouble(fpRegT0
, fpRegT1
);
658 emitStoreDouble(dst
, fpRegT1
);
663 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
665 int op2
= currentInstruction
[3].u
.operand
;
666 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
668 if (isOperandConstantImmediateInt(op2
)) {
669 linkSlowCase(iter
); // overflow check
671 if (!supportsFloatingPoint() || !types
.first().definitelyIsNumber())
672 linkSlowCase(iter
); // int32 or double check
674 linkSlowCase(iter
); // overflow check
676 if (!supportsFloatingPoint()) {
677 linkSlowCase(iter
); // int32 check
678 linkSlowCase(iter
); // int32 check
680 if (!types
.first().definitelyIsNumber())
681 linkSlowCase(iter
); // double check
683 if (!types
.second().definitelyIsNumber()) {
684 linkSlowCase(iter
); // int32 check
685 linkSlowCase(iter
); // double check
690 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_sub
);
694 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID
, int dst
, int op1
, int op2
, OperandTypes types
, JumpList
& notInt32Op1
, JumpList
& notInt32Op2
, bool op1IsInRegisters
, bool op2IsInRegisters
)
698 if (!notInt32Op1
.empty()) {
699 // Double case 1: Op1 is not int32; Op2 is unknown.
700 notInt32Op1
.link(this);
702 ASSERT(op1IsInRegisters
);
704 // Verify Op1 is double.
705 if (!types
.first().definitelyIsNumber())
706 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
708 if (!op2IsInRegisters
)
709 emitLoad(op2
, regT3
, regT2
);
711 Jump doubleOp2
= branch32(Below
, regT3
, TrustedImm32(JSValue::LowestTag
));
713 if (!types
.second().definitelyIsNumber())
714 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
716 convertInt32ToDouble(regT2
, fpRegT0
);
717 Jump doTheMath
= jump();
719 // Load Op2 as double into double register.
720 doubleOp2
.link(this);
721 emitLoadDouble(op2
, fpRegT0
);
724 doTheMath
.link(this);
727 emitLoadDouble(op1
, fpRegT2
);
728 mulDouble(fpRegT2
, fpRegT0
);
729 emitStoreDouble(dst
, fpRegT0
);
732 emitLoadDouble(op1
, fpRegT2
);
733 addDouble(fpRegT2
, fpRegT0
);
734 emitStoreDouble(dst
, fpRegT0
);
737 emitLoadDouble(op1
, fpRegT1
);
738 subDouble(fpRegT0
, fpRegT1
);
739 emitStoreDouble(dst
, fpRegT1
);
742 emitLoadDouble(op1
, fpRegT1
);
743 divDouble(fpRegT0
, fpRegT1
);
745 // Is the result actually an integer? The DFG JIT would really like to know. If it's
746 // not an integer, we increment a count. If this together with the slow case counter
747 // are below threshold then the DFG JIT will compile this division with a specualtion
748 // that the remainder is zero.
750 // As well, there are cases where a double result here would cause an important field
751 // in the heap to sometimes have doubles in it, resulting in double predictions getting
752 // propagated to a use site where it might cause damage (such as the index to an array
753 // access). So if we are DFG compiling anything in the program, we want this code to
754 // ensure that it produces integers whenever possible.
756 // FIXME: This will fail to convert to integer if the result is zero. We should
757 // distinguish between positive zero and negative zero here.
760 branchConvertDoubleToInt32(fpRegT1
, regT2
, notInteger
, fpRegT0
);
761 // If we've got an integer, we might as well make that the result of the division.
762 emitStoreInt32(dst
, regT2
);
763 Jump isInteger
= jump();
764 notInteger
.link(this);
765 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
766 emitStoreDouble(dst
, fpRegT1
);
767 isInteger
.link(this);
771 emitLoadDouble(op1
, fpRegT2
);
772 addJump(branchDouble(DoubleLessThan
, fpRegT2
, fpRegT0
), dst
);
775 emitLoadDouble(op1
, fpRegT2
);
776 addJump(branchDouble(DoubleLessThanOrEqual
, fpRegT2
, fpRegT0
), dst
);
779 emitLoadDouble(op1
, fpRegT2
);
780 addJump(branchDouble(DoubleGreaterThan
, fpRegT2
, fpRegT0
), dst
);
783 emitLoadDouble(op1
, fpRegT2
);
784 addJump(branchDouble(DoubleGreaterThanOrEqual
, fpRegT2
, fpRegT0
), dst
);
787 emitLoadDouble(op1
, fpRegT2
);
788 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT0
, fpRegT2
), dst
);
791 emitLoadDouble(op1
, fpRegT2
);
792 addJump(branchDouble(DoubleLessThanOrUnordered
, fpRegT0
, fpRegT2
), dst
);
795 emitLoadDouble(op1
, fpRegT2
);
796 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered
, fpRegT0
, fpRegT2
), dst
);
799 emitLoadDouble(op1
, fpRegT2
);
800 addJump(branchDouble(DoubleGreaterThanOrUnordered
, fpRegT0
, fpRegT2
), dst
);
803 RELEASE_ASSERT_NOT_REACHED();
806 if (!notInt32Op2
.empty())
810 if (!notInt32Op2
.empty()) {
811 // Double case 2: Op1 is int32; Op2 is not int32.
812 notInt32Op2
.link(this);
814 ASSERT(op2IsInRegisters
);
816 if (!op1IsInRegisters
)
817 emitLoadPayload(op1
, regT0
);
819 convertInt32ToDouble(regT0
, fpRegT0
);
821 // Verify op2 is double.
822 if (!types
.second().definitelyIsNumber())
823 addSlowCase(branch32(Above
, regT3
, TrustedImm32(JSValue::LowestTag
)));
828 emitLoadDouble(op2
, fpRegT2
);
829 mulDouble(fpRegT2
, fpRegT0
);
830 emitStoreDouble(dst
, fpRegT0
);
833 emitLoadDouble(op2
, fpRegT2
);
834 addDouble(fpRegT2
, fpRegT0
);
835 emitStoreDouble(dst
, fpRegT0
);
838 emitLoadDouble(op2
, fpRegT2
);
839 subDouble(fpRegT2
, fpRegT0
);
840 emitStoreDouble(dst
, fpRegT0
);
843 emitLoadDouble(op2
, fpRegT2
);
844 divDouble(fpRegT2
, fpRegT0
);
845 // Is the result actually an integer? The DFG JIT would really like to know. If it's
846 // not an integer, we increment a count. If this together with the slow case counter
847 // are below threshold then the DFG JIT will compile this division with a specualtion
848 // that the remainder is zero.
850 // As well, there are cases where a double result here would cause an important field
851 // in the heap to sometimes have doubles in it, resulting in double predictions getting
852 // propagated to a use site where it might cause damage (such as the index to an array
853 // access). So if we are DFG compiling anything in the program, we want this code to
854 // ensure that it produces integers whenever possible.
856 // FIXME: This will fail to convert to integer if the result is zero. We should
857 // distinguish between positive zero and negative zero here.
860 branchConvertDoubleToInt32(fpRegT0
, regT2
, notInteger
, fpRegT1
);
861 // If we've got an integer, we might as well make that the result of the division.
862 emitStoreInt32(dst
, regT2
);
863 Jump isInteger
= jump();
864 notInteger
.link(this);
865 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
866 emitStoreDouble(dst
, fpRegT0
);
867 isInteger
.link(this);
871 emitLoadDouble(op2
, fpRegT1
);
872 addJump(branchDouble(DoubleLessThan
, fpRegT0
, fpRegT1
), dst
);
875 emitLoadDouble(op2
, fpRegT1
);
876 addJump(branchDouble(DoubleLessThanOrEqual
, fpRegT0
, fpRegT1
), dst
);
879 emitLoadDouble(op2
, fpRegT1
);
880 addJump(branchDouble(DoubleGreaterThan
, fpRegT0
, fpRegT1
), dst
);
883 emitLoadDouble(op2
, fpRegT1
);
884 addJump(branchDouble(DoubleGreaterThanOrEqual
, fpRegT0
, fpRegT1
), dst
);
887 emitLoadDouble(op2
, fpRegT1
);
888 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), dst
);
891 emitLoadDouble(op2
, fpRegT1
);
892 addJump(branchDouble(DoubleLessThanOrUnordered
, fpRegT1
, fpRegT0
), dst
);
895 emitLoadDouble(op2
, fpRegT1
);
896 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), dst
);
899 emitLoadDouble(op2
, fpRegT1
);
900 addJump(branchDouble(DoubleGreaterThanOrUnordered
, fpRegT1
, fpRegT0
), dst
);
903 RELEASE_ASSERT_NOT_REACHED();
910 // Multiplication (*)
912 void JIT::emit_op_mul(Instruction
* currentInstruction
)
914 int dst
= currentInstruction
[1].u
.operand
;
915 int op1
= currentInstruction
[2].u
.operand
;
916 int op2
= currentInstruction
[3].u
.operand
;
917 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
919 m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
921 JumpList notInt32Op1
;
922 JumpList notInt32Op2
;
924 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
925 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
926 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
930 addSlowCase(branchMul32(Overflow
, regT2
, regT0
));
931 addSlowCase(branchTest32(Zero
, regT0
));
932 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
934 if (!supportsFloatingPoint()) {
935 addSlowCase(notInt32Op1
);
936 addSlowCase(notInt32Op2
);
942 emitBinaryDoubleOp(op_mul
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
946 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
948 int dst
= currentInstruction
[1].u
.operand
;
949 int op1
= currentInstruction
[2].u
.operand
;
950 int op2
= currentInstruction
[3].u
.operand
;
951 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
953 Jump overflow
= getSlowCase(iter
); // overflow check
954 linkSlowCase(iter
); // zero result check
956 Jump negZero
= branchOr32(Signed
, regT2
, regT3
);
957 emitStoreInt32(dst
, TrustedImm32(0), (op1
== dst
|| op2
== dst
));
959 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul
));
962 // We only get here if we have a genuine negative zero. Record this,
963 // so that the speculative JIT knows that we failed speculation
964 // because of a negative zero.
965 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
968 if (!supportsFloatingPoint()) {
969 linkSlowCase(iter
); // int32 check
970 linkSlowCase(iter
); // int32 check
973 if (supportsFloatingPoint()) {
974 if (!types
.first().definitelyIsNumber())
975 linkSlowCase(iter
); // double check
977 if (!types
.second().definitelyIsNumber()) {
978 linkSlowCase(iter
); // int32 check
979 linkSlowCase(iter
); // double check
983 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_mul
);
989 void JIT::emit_op_div(Instruction
* currentInstruction
)
991 int dst
= currentInstruction
[1].u
.operand
;
992 int op1
= currentInstruction
[2].u
.operand
;
993 int op2
= currentInstruction
[3].u
.operand
;
994 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
996 m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
998 if (!supportsFloatingPoint()) {
1004 JumpList notInt32Op1
;
1005 JumpList notInt32Op2
;
1009 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
1011 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1012 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
1014 convertInt32ToDouble(regT0
, fpRegT0
);
1015 convertInt32ToDouble(regT2
, fpRegT1
);
1016 divDouble(fpRegT1
, fpRegT0
);
1017 // Is the result actually an integer? The DFG JIT would really like to know. If it's
1018 // not an integer, we increment a count. If this together with the slow case counter
1019 // are below threshold then the DFG JIT will compile this division with a specualtion
1020 // that the remainder is zero.
1022 // As well, there are cases where a double result here would cause an important field
1023 // in the heap to sometimes have doubles in it, resulting in double predictions getting
1024 // propagated to a use site where it might cause damage (such as the index to an array
1025 // access). So if we are DFG compiling anything in the program, we want this code to
1026 // ensure that it produces integers whenever possible.
1028 // FIXME: This will fail to convert to integer if the result is zero. We should
1029 // distinguish between positive zero and negative zero here.
1031 JumpList notInteger
;
1032 branchConvertDoubleToInt32(fpRegT0
, regT2
, notInteger
, fpRegT1
);
1033 // If we've got an integer, we might as well make that the result of the division.
1034 emitStoreInt32(dst
, regT2
);
1036 notInteger
.link(this);
1037 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
1038 emitStoreDouble(dst
, fpRegT0
);
1042 emitBinaryDoubleOp(op_div
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
1046 void JIT::emitSlow_op_div(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1048 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1050 if (!supportsFloatingPoint())
1053 if (!types
.first().definitelyIsNumber())
1054 linkSlowCase(iter
); // double check
1056 if (!types
.second().definitelyIsNumber()) {
1057 linkSlowCase(iter
); // int32 check
1058 linkSlowCase(iter
); // double check
1062 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_div
);
1063 slowPathCall
.call();
1068 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1070 void JIT::emit_op_mod(Instruction
* currentInstruction
)
1072 #if CPU(X86) || CPU(X86_64)
1073 int dst
= currentInstruction
[1].u
.operand
;
1074 int op1
= currentInstruction
[2].u
.operand
;
1075 int op2
= currentInstruction
[3].u
.operand
;
1077 // Make sure registers are correct for x86 IDIV instructions.
1078 ASSERT(regT0
== X86Registers::eax
);
1079 ASSERT(regT1
== X86Registers::edx
);
1080 ASSERT(regT2
== X86Registers::ecx
);
1081 ASSERT(regT3
== X86Registers::ebx
);
1083 emitLoad2(op1
, regT0
, regT3
, op2
, regT1
, regT2
);
1084 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1085 addSlowCase(branch32(NotEqual
, regT0
, TrustedImm32(JSValue::Int32Tag
)));
1088 addSlowCase(branchTest32(Zero
, regT2
));
1089 Jump denominatorNotNeg1
= branch32(NotEqual
, regT2
, TrustedImm32(-1));
1090 addSlowCase(branch32(Equal
, regT0
, TrustedImm32(-2147483647-1)));
1091 denominatorNotNeg1
.link(this);
1093 m_assembler
.idivl_r(regT2
);
1094 Jump numeratorPositive
= branch32(GreaterThanOrEqual
, regT3
, TrustedImm32(0));
1095 addSlowCase(branchTest32(Zero
, regT1
));
1096 numeratorPositive
.link(this);
1097 emitStoreInt32(dst
, regT1
, (op1
== dst
|| op2
== dst
));
1099 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_mod
);
1100 slowPathCall
.call();
1104 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1106 #if CPU(X86) || CPU(X86_64)
1112 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_mod
);
1113 slowPathCall
.call();
1115 UNUSED_PARAM(currentInstruction
);
1117 // We would have really useful assertions here if it wasn't for the compiler's
1118 // insistence on attribute noreturn.
1119 // RELEASE_ASSERT_NOT_REACHED();
1123 /* ------------------------------ END: OP_MOD ------------------------------ */
1127 #endif // USE(JSVALUE32_64)
1128 #endif // ENABLE(JIT)