2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "JITInlines.h"
33 #include "JITOperations.h"
36 #include "JSFunction.h"
37 #include "Interpreter.h"
38 #include "JSCInlines.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
41 #include "SlowPathCall.h"
46 void JIT::emit_op_jless(Instruction
* currentInstruction
)
48 int op1
= currentInstruction
[1].u
.operand
;
49 int op2
= currentInstruction
[2].u
.operand
;
50 unsigned target
= currentInstruction
[3].u
.operand
;
52 emit_compareAndJump(op_jless
, op1
, op2
, target
, LessThan
);
55 void JIT::emit_op_jlesseq(Instruction
* currentInstruction
)
57 int op1
= currentInstruction
[1].u
.operand
;
58 int op2
= currentInstruction
[2].u
.operand
;
59 unsigned target
= currentInstruction
[3].u
.operand
;
61 emit_compareAndJump(op_jlesseq
, op1
, op2
, target
, LessThanOrEqual
);
64 void JIT::emit_op_jgreater(Instruction
* currentInstruction
)
66 int op1
= currentInstruction
[1].u
.operand
;
67 int op2
= currentInstruction
[2].u
.operand
;
68 unsigned target
= currentInstruction
[3].u
.operand
;
70 emit_compareAndJump(op_jgreater
, op1
, op2
, target
, GreaterThan
);
73 void JIT::emit_op_jgreatereq(Instruction
* currentInstruction
)
75 int op1
= currentInstruction
[1].u
.operand
;
76 int op2
= currentInstruction
[2].u
.operand
;
77 unsigned target
= currentInstruction
[3].u
.operand
;
79 emit_compareAndJump(op_jgreatereq
, op1
, op2
, target
, GreaterThanOrEqual
);
82 void JIT::emit_op_jnless(Instruction
* currentInstruction
)
84 int op1
= currentInstruction
[1].u
.operand
;
85 int op2
= currentInstruction
[2].u
.operand
;
86 unsigned target
= currentInstruction
[3].u
.operand
;
88 emit_compareAndJump(op_jnless
, op1
, op2
, target
, GreaterThanOrEqual
);
91 void JIT::emit_op_jnlesseq(Instruction
* currentInstruction
)
93 int op1
= currentInstruction
[1].u
.operand
;
94 int op2
= currentInstruction
[2].u
.operand
;
95 unsigned target
= currentInstruction
[3].u
.operand
;
97 emit_compareAndJump(op_jnlesseq
, op1
, op2
, target
, GreaterThan
);
100 void JIT::emit_op_jngreater(Instruction
* currentInstruction
)
102 int op1
= currentInstruction
[1].u
.operand
;
103 int op2
= currentInstruction
[2].u
.operand
;
104 unsigned target
= currentInstruction
[3].u
.operand
;
106 emit_compareAndJump(op_jngreater
, op1
, op2
, target
, LessThanOrEqual
);
109 void JIT::emit_op_jngreatereq(Instruction
* currentInstruction
)
111 int op1
= currentInstruction
[1].u
.operand
;
112 int op2
= currentInstruction
[2].u
.operand
;
113 unsigned target
= currentInstruction
[3].u
.operand
;
115 emit_compareAndJump(op_jngreatereq
, op1
, op2
, target
, LessThan
);
118 void JIT::emitSlow_op_jless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
120 int op1
= currentInstruction
[1].u
.operand
;
121 int op2
= currentInstruction
[2].u
.operand
;
122 unsigned target
= currentInstruction
[3].u
.operand
;
124 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleLessThan
, operationCompareLess
, false, iter
);
127 void JIT::emitSlow_op_jlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
129 int op1
= currentInstruction
[1].u
.operand
;
130 int op2
= currentInstruction
[2].u
.operand
;
131 unsigned target
= currentInstruction
[3].u
.operand
;
133 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleLessThanOrEqual
, operationCompareLessEq
, false, iter
);
136 void JIT::emitSlow_op_jgreater(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
138 int op1
= currentInstruction
[1].u
.operand
;
139 int op2
= currentInstruction
[2].u
.operand
;
140 unsigned target
= currentInstruction
[3].u
.operand
;
142 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleGreaterThan
, operationCompareGreater
, false, iter
);
145 void JIT::emitSlow_op_jgreatereq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
147 int op1
= currentInstruction
[1].u
.operand
;
148 int op2
= currentInstruction
[2].u
.operand
;
149 unsigned target
= currentInstruction
[3].u
.operand
;
151 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleGreaterThanOrEqual
, operationCompareGreaterEq
, false, iter
);
154 void JIT::emitSlow_op_jnless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
156 int op1
= currentInstruction
[1].u
.operand
;
157 int op2
= currentInstruction
[2].u
.operand
;
158 unsigned target
= currentInstruction
[3].u
.operand
;
160 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleGreaterThanOrEqualOrUnordered
, operationCompareLess
, true, iter
);
163 void JIT::emitSlow_op_jnlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
165 int op1
= currentInstruction
[1].u
.operand
;
166 int op2
= currentInstruction
[2].u
.operand
;
167 unsigned target
= currentInstruction
[3].u
.operand
;
169 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleGreaterThanOrUnordered
, operationCompareLessEq
, true, iter
);
172 void JIT::emitSlow_op_jngreater(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
174 int op1
= currentInstruction
[1].u
.operand
;
175 int op2
= currentInstruction
[2].u
.operand
;
176 unsigned target
= currentInstruction
[3].u
.operand
;
178 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleLessThanOrEqualOrUnordered
, operationCompareGreater
, true, iter
);
181 void JIT::emitSlow_op_jngreatereq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
183 int op1
= currentInstruction
[1].u
.operand
;
184 int op2
= currentInstruction
[2].u
.operand
;
185 unsigned target
= currentInstruction
[3].u
.operand
;
187 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleLessThanOrUnordered
, operationCompareGreaterEq
, true, iter
);
192 void JIT::emit_op_negate(Instruction
* currentInstruction
)
194 int dst
= currentInstruction
[1].u
.operand
;
195 int src
= currentInstruction
[2].u
.operand
;
197 emitGetVirtualRegister(src
, regT0
);
199 Jump srcNotInt
= emitJumpIfNotImmediateInteger(regT0
);
200 addSlowCase(branchTest32(Zero
, regT0
, TrustedImm32(0x7fffffff)));
202 emitFastArithReTagImmediate(regT0
, regT0
);
206 srcNotInt
.link(this);
207 emitJumpSlowCaseIfNotImmediateNumber(regT0
);
209 move(TrustedImm64((int64_t)0x8000000000000000ull
), regT1
);
213 emitPutVirtualRegister(dst
);
216 void JIT::emitSlow_op_negate(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
218 linkSlowCase(iter
); // 0x7fffffff check
219 linkSlowCase(iter
); // double check
221 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_negate
);
225 void JIT::emit_op_lshift(Instruction
* currentInstruction
)
227 int result
= currentInstruction
[1].u
.operand
;
228 int op1
= currentInstruction
[2].u
.operand
;
229 int op2
= currentInstruction
[3].u
.operand
;
231 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
232 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
233 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
234 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
235 emitFastArithImmToInt(regT0
);
236 emitFastArithImmToInt(regT2
);
237 lshift32(regT2
, regT0
);
238 emitFastArithReTagImmediate(regT0
, regT0
);
239 emitPutVirtualRegister(result
);
242 void JIT::emitSlow_op_lshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
246 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_lshift
);
250 void JIT::emit_op_rshift(Instruction
* currentInstruction
)
252 int result
= currentInstruction
[1].u
.operand
;
253 int op1
= currentInstruction
[2].u
.operand
;
254 int op2
= currentInstruction
[3].u
.operand
;
256 if (isOperandConstantImmediateInt(op2
)) {
257 // isOperandConstantImmediateInt(op2) => 1 SlowCase
258 emitGetVirtualRegister(op1
, regT0
);
259 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
260 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
261 rshift32(Imm32(getConstantOperandImmediateInt(op2
) & 0x1f), regT0
);
263 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
264 if (supportsFloatingPointTruncate()) {
265 Jump lhsIsInt
= emitJumpIfImmediateInteger(regT0
);
266 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
267 addSlowCase(emitJumpIfNotImmediateNumber(regT0
));
268 add64(tagTypeNumberRegister
, regT0
);
269 move64ToDouble(regT0
, fpRegT0
);
270 addSlowCase(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
272 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
274 // !supportsFloatingPoint() => 2 SlowCases
275 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
276 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
278 emitFastArithImmToInt(regT2
);
279 rshift32(regT2
, regT0
);
281 emitFastArithIntToImmNoCheck(regT0
, regT0
);
282 emitPutVirtualRegister(result
);
285 void JIT::emitSlow_op_rshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
287 int op2
= currentInstruction
[3].u
.operand
;
289 if (isOperandConstantImmediateInt(op2
))
293 if (supportsFloatingPointTruncate()) {
303 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_rshift
);
307 void JIT::emit_op_urshift(Instruction
* currentInstruction
)
309 int result
= currentInstruction
[1].u
.operand
;
310 int op1
= currentInstruction
[2].u
.operand
;
311 int op2
= currentInstruction
[3].u
.operand
;
313 if (isOperandConstantImmediateInt(op2
)) {
314 // isOperandConstantImmediateInt(op2) => 1 SlowCase
315 emitGetVirtualRegister(op1
, regT0
);
316 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
317 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
318 urshift32(Imm32(getConstantOperandImmediateInt(op2
) & 0x1f), regT0
);
320 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
321 if (supportsFloatingPointTruncate()) {
322 Jump lhsIsInt
= emitJumpIfImmediateInteger(regT0
);
323 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
324 addSlowCase(emitJumpIfNotImmediateNumber(regT0
));
325 add64(tagTypeNumberRegister
, regT0
);
326 move64ToDouble(regT0
, fpRegT0
);
327 addSlowCase(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
329 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
331 // !supportsFloatingPoint() => 2 SlowCases
332 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
333 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
335 emitFastArithImmToInt(regT2
);
336 urshift32(regT2
, regT0
);
338 emitFastArithIntToImmNoCheck(regT0
, regT0
);
339 emitPutVirtualRegister(result
);
342 void JIT::emitSlow_op_urshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
344 int op2
= currentInstruction
[3].u
.operand
;
346 if (isOperandConstantImmediateInt(op2
))
350 if (supportsFloatingPointTruncate()) {
360 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_urshift
);
364 void JIT::emit_op_unsigned(Instruction
* currentInstruction
)
366 int result
= currentInstruction
[1].u
.operand
;
367 int op1
= currentInstruction
[2].u
.operand
;
369 emitGetVirtualRegister(op1
, regT0
);
370 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
371 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
372 emitFastArithReTagImmediate(regT0
, regT0
);
373 emitPutVirtualRegister(result
, regT0
);
376 void JIT::emitSlow_op_unsigned(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
381 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_unsigned
);
385 void JIT::emit_compareAndJump(OpcodeID
, int op1
, int op2
, unsigned target
, RelationalCondition condition
)
387 // We generate inline code for the following cases in the fast path:
388 // - int immediate to constant int immediate
389 // - constant int immediate to int immediate
390 // - int immediate to int immediate
392 if (isOperandConstantImmediateChar(op1
)) {
393 emitGetVirtualRegister(op2
, regT0
);
394 addSlowCase(emitJumpIfNotJSCell(regT0
));
396 emitLoadCharacterString(regT0
, regT0
, failures
);
397 addSlowCase(failures
);
398 addJump(branch32(commute(condition
), regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
401 if (isOperandConstantImmediateChar(op2
)) {
402 emitGetVirtualRegister(op1
, regT0
);
403 addSlowCase(emitJumpIfNotJSCell(regT0
));
405 emitLoadCharacterString(regT0
, regT0
, failures
);
406 addSlowCase(failures
);
407 addJump(branch32(condition
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
410 if (isOperandConstantImmediateInt(op2
)) {
411 emitGetVirtualRegister(op1
, regT0
);
412 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
413 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
414 addJump(branch32(condition
, regT0
, Imm32(op2imm
)), target
);
415 } else if (isOperandConstantImmediateInt(op1
)) {
416 emitGetVirtualRegister(op2
, regT1
);
417 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
418 int32_t op1imm
= getConstantOperandImmediateInt(op1
);
419 addJump(branch32(commute(condition
), regT1
, Imm32(op1imm
)), target
);
421 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
422 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
423 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
425 addJump(branch32(condition
, regT0
, regT1
), target
);
429 void JIT::emit_compareAndJumpSlow(int op1
, int op2
, unsigned target
, DoubleCondition condition
, size_t (JIT_OPERATION
*operation
)(ExecState
*, EncodedJSValue
, EncodedJSValue
), bool invert
, Vector
<SlowCaseEntry
>::iterator
& iter
)
431 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jlesseq
), OPCODE_LENGTH_op_jlesseq_equals_op_jless
);
432 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jnless
), OPCODE_LENGTH_op_jnless_equals_op_jless
);
433 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jnlesseq
), OPCODE_LENGTH_op_jnlesseq_equals_op_jless
);
434 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jgreater
), OPCODE_LENGTH_op_jgreater_equals_op_jless
);
435 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jgreatereq
), OPCODE_LENGTH_op_jgreatereq_equals_op_jless
);
436 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jngreater
), OPCODE_LENGTH_op_jngreater_equals_op_jless
);
437 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jngreatereq
), OPCODE_LENGTH_op_jngreatereq_equals_op_jless
);
439 // We generate inline code for the following cases in the slow path:
440 // - floating-point number to constant int immediate
441 // - constant int immediate to floating-point number
442 // - floating-point number to floating-point number.
443 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
449 emitGetVirtualRegister(op1
, argumentGPR0
);
450 emitGetVirtualRegister(op2
, argumentGPR1
);
451 callOperation(operation
, argumentGPR0
, argumentGPR1
);
452 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, returnValueGPR
), target
);
456 if (isOperandConstantImmediateInt(op2
)) {
459 if (supportsFloatingPoint()) {
460 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
461 add64(tagTypeNumberRegister
, regT0
);
462 move64ToDouble(regT0
, fpRegT0
);
464 int32_t op2imm
= getConstantOperand(op2
).asInt32();
466 move(Imm32(op2imm
), regT1
);
467 convertInt32ToDouble(regT1
, fpRegT1
);
469 emitJumpSlowToHot(branchDouble(condition
, fpRegT0
, fpRegT1
), target
);
471 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless
));
476 emitGetVirtualRegister(op2
, regT1
);
477 callOperation(operation
, regT0
, regT1
);
478 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, returnValueGPR
), target
);
479 } else if (isOperandConstantImmediateInt(op1
)) {
482 if (supportsFloatingPoint()) {
483 Jump fail1
= emitJumpIfNotImmediateNumber(regT1
);
484 add64(tagTypeNumberRegister
, regT1
);
485 move64ToDouble(regT1
, fpRegT1
);
487 int32_t op1imm
= getConstantOperand(op1
).asInt32();
489 move(Imm32(op1imm
), regT0
);
490 convertInt32ToDouble(regT0
, fpRegT0
);
492 emitJumpSlowToHot(branchDouble(condition
, fpRegT0
, fpRegT1
), target
);
494 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless
));
499 emitGetVirtualRegister(op1
, regT2
);
500 callOperation(operation
, regT2
, regT1
);
501 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, returnValueGPR
), target
);
505 if (supportsFloatingPoint()) {
506 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
507 Jump fail2
= emitJumpIfNotImmediateNumber(regT1
);
508 Jump fail3
= emitJumpIfImmediateInteger(regT1
);
509 add64(tagTypeNumberRegister
, regT0
);
510 add64(tagTypeNumberRegister
, regT1
);
511 move64ToDouble(regT0
, fpRegT0
);
512 move64ToDouble(regT1
, fpRegT1
);
514 emitJumpSlowToHot(branchDouble(condition
, fpRegT0
, fpRegT1
), target
);
516 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless
));
524 callOperation(operation
, regT0
, regT1
);
525 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, returnValueGPR
), target
);
529 void JIT::emit_op_bitand(Instruction
* currentInstruction
)
531 int result
= currentInstruction
[1].u
.operand
;
532 int op1
= currentInstruction
[2].u
.operand
;
533 int op2
= currentInstruction
[3].u
.operand
;
535 if (isOperandConstantImmediateInt(op1
)) {
536 emitGetVirtualRegister(op2
, regT0
);
537 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
538 int32_t imm
= getConstantOperandImmediateInt(op1
);
539 and64(Imm32(imm
), regT0
);
541 emitFastArithIntToImmNoCheck(regT0
, regT0
);
542 } else if (isOperandConstantImmediateInt(op2
)) {
543 emitGetVirtualRegister(op1
, regT0
);
544 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
545 int32_t imm
= getConstantOperandImmediateInt(op2
);
546 and64(Imm32(imm
), regT0
);
548 emitFastArithIntToImmNoCheck(regT0
, regT0
);
550 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
552 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
554 emitPutVirtualRegister(result
);
557 void JIT::emitSlow_op_bitand(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
561 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_bitand
);
565 void JIT::emit_op_inc(Instruction
* currentInstruction
)
567 int srcDst
= currentInstruction
[1].u
.operand
;
569 emitGetVirtualRegister(srcDst
, regT0
);
570 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
571 addSlowCase(branchAdd32(Overflow
, TrustedImm32(1), regT0
));
572 emitFastArithIntToImmNoCheck(regT0
, regT0
);
573 emitPutVirtualRegister(srcDst
);
576 void JIT::emitSlow_op_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
580 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_inc
);
584 void JIT::emit_op_dec(Instruction
* currentInstruction
)
586 int srcDst
= currentInstruction
[1].u
.operand
;
588 emitGetVirtualRegister(srcDst
, regT0
);
589 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
590 addSlowCase(branchSub32(Overflow
, TrustedImm32(1), regT0
));
591 emitFastArithIntToImmNoCheck(regT0
, regT0
);
592 emitPutVirtualRegister(srcDst
);
595 void JIT::emitSlow_op_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
599 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_dec
);
603 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
605 #if CPU(X86) || CPU(X86_64)
607 void JIT::emit_op_mod(Instruction
* currentInstruction
)
609 int result
= currentInstruction
[1].u
.operand
;
610 int op1
= currentInstruction
[2].u
.operand
;
611 int op2
= currentInstruction
[3].u
.operand
;
613 // Make sure registers are correct for x86 IDIV instructions.
614 ASSERT(regT0
== X86Registers::eax
);
615 ASSERT(regT1
== X86Registers::edx
);
616 ASSERT(regT2
== X86Registers::ecx
);
618 emitGetVirtualRegisters(op1
, regT3
, op2
, regT2
);
619 emitJumpSlowCaseIfNotImmediateInteger(regT3
);
620 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
623 addSlowCase(branchTest32(Zero
, regT2
));
624 Jump denominatorNotNeg1
= branch32(NotEqual
, regT2
, TrustedImm32(-1));
625 addSlowCase(branch32(Equal
, regT0
, TrustedImm32(-2147483647-1)));
626 denominatorNotNeg1
.link(this);
628 m_assembler
.idivl_r(regT2
);
629 Jump numeratorPositive
= branch32(GreaterThanOrEqual
, regT3
, TrustedImm32(0));
630 addSlowCase(branchTest32(Zero
, regT1
));
631 numeratorPositive
.link(this);
632 emitFastArithReTagImmediate(regT1
, regT0
);
633 emitPutVirtualRegister(result
);
636 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
643 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_mod
);
647 #else // CPU(X86) || CPU(X86_64)
649 void JIT::emit_op_mod(Instruction
* currentInstruction
)
651 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_mod
);
655 void JIT::emitSlow_op_mod(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
657 UNREACHABLE_FOR_PLATFORM();
660 #endif // CPU(X86) || CPU(X86_64)
662 /* ------------------------------ END: OP_MOD ------------------------------ */
664 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
666 void JIT::compileBinaryArithOp(OpcodeID opcodeID
, int, int op1
, int op2
, OperandTypes
)
668 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
669 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
670 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
671 RareCaseProfile
* profile
= m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
672 if (opcodeID
== op_add
)
673 addSlowCase(branchAdd32(Overflow
, regT1
, regT0
));
674 else if (opcodeID
== op_sub
)
675 addSlowCase(branchSub32(Overflow
, regT1
, regT0
));
677 ASSERT(opcodeID
== op_mul
);
678 if (shouldEmitProfiling()) {
679 // We want to be able to measure if this is taking the slow case just
680 // because of negative zero. If this produces positive zero, then we
681 // don't want the slow case to be taken because that will throw off
682 // speculative compilation.
684 addSlowCase(branchMul32(Overflow
, regT1
, regT2
));
686 done
.append(branchTest32(NonZero
, regT2
));
687 Jump negativeZero
= branch32(LessThan
, regT0
, TrustedImm32(0));
688 done
.append(branch32(GreaterThanOrEqual
, regT1
, TrustedImm32(0)));
689 negativeZero
.link(this);
690 // We only get here if we have a genuine negative zero. Record this,
691 // so that the speculative JIT knows that we failed speculation
692 // because of a negative zero.
693 add32(TrustedImm32(1), AbsoluteAddress(&profile
->m_counter
));
698 addSlowCase(branchMul32(Overflow
, regT1
, regT0
));
699 addSlowCase(branchTest32(Zero
, regT0
));
702 emitFastArithIntToImmNoCheck(regT0
, regT0
);
705 void JIT::compileBinaryArithOpSlowCase(Instruction
* currentInstruction
, OpcodeID opcodeID
, Vector
<SlowCaseEntry
>::iterator
& iter
, int result
, int op1
, int op2
, OperandTypes types
, bool op1HasImmediateIntFastCase
, bool op2HasImmediateIntFastCase
)
707 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
708 COMPILE_ASSERT(((TagTypeNumber
+ DoubleEncodeOffset
) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0
);
712 if (op1HasImmediateIntFastCase
) {
713 notImm2
= getSlowCase(iter
);
714 } else if (op2HasImmediateIntFastCase
) {
715 notImm1
= getSlowCase(iter
);
717 notImm1
= getSlowCase(iter
);
718 notImm2
= getSlowCase(iter
);
721 linkSlowCase(iter
); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
722 if (opcodeID
== op_mul
&& !op1HasImmediateIntFastCase
&& !op2HasImmediateIntFastCase
) // op_mul has an extra slow case to handle 0 * negative number.
725 Label
stubFunctionCall(this);
727 JITSlowPathCall
slowPathCall(this, currentInstruction
, opcodeID
== op_add
? slow_path_add
: opcodeID
== op_sub
? slow_path_sub
: slow_path_mul
);
731 if (op1HasImmediateIntFastCase
) {
733 if (!types
.second().definitelyIsNumber())
734 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
735 emitGetVirtualRegister(op1
, regT1
);
736 convertInt32ToDouble(regT1
, fpRegT1
);
737 add64(tagTypeNumberRegister
, regT0
);
738 move64ToDouble(regT0
, fpRegT2
);
739 } else if (op2HasImmediateIntFastCase
) {
741 if (!types
.first().definitelyIsNumber())
742 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
743 emitGetVirtualRegister(op2
, regT1
);
744 convertInt32ToDouble(regT1
, fpRegT1
);
745 add64(tagTypeNumberRegister
, regT0
);
746 move64ToDouble(regT0
, fpRegT2
);
748 // if we get here, eax is not an int32, edx not yet checked.
750 if (!types
.first().definitelyIsNumber())
751 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
752 if (!types
.second().definitelyIsNumber())
753 emitJumpIfNotImmediateNumber(regT1
).linkTo(stubFunctionCall
, this);
754 add64(tagTypeNumberRegister
, regT0
);
755 move64ToDouble(regT0
, fpRegT1
);
756 Jump op2isDouble
= emitJumpIfNotImmediateInteger(regT1
);
757 convertInt32ToDouble(regT1
, fpRegT2
);
758 Jump op2wasInteger
= jump();
760 // if we get here, eax IS an int32, edx is not.
762 if (!types
.second().definitelyIsNumber())
763 emitJumpIfNotImmediateNumber(regT1
).linkTo(stubFunctionCall
, this);
764 convertInt32ToDouble(regT0
, fpRegT1
);
765 op2isDouble
.link(this);
766 add64(tagTypeNumberRegister
, regT1
);
767 move64ToDouble(regT1
, fpRegT2
);
768 op2wasInteger
.link(this);
771 if (opcodeID
== op_add
)
772 addDouble(fpRegT2
, fpRegT1
);
773 else if (opcodeID
== op_sub
)
774 subDouble(fpRegT2
, fpRegT1
);
775 else if (opcodeID
== op_mul
)
776 mulDouble(fpRegT2
, fpRegT1
);
778 ASSERT(opcodeID
== op_div
);
779 divDouble(fpRegT2
, fpRegT1
);
781 moveDoubleTo64(fpRegT1
, regT0
);
782 sub64(tagTypeNumberRegister
, regT0
);
783 emitPutVirtualRegister(result
, regT0
);
788 void JIT::emit_op_add(Instruction
* currentInstruction
)
790 int result
= currentInstruction
[1].u
.operand
;
791 int op1
= currentInstruction
[2].u
.operand
;
792 int op2
= currentInstruction
[3].u
.operand
;
793 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
795 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
797 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_add
);
802 if (isOperandConstantImmediateInt(op1
)) {
803 emitGetVirtualRegister(op2
, regT0
);
804 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
805 addSlowCase(branchAdd32(Overflow
, regT0
, Imm32(getConstantOperandImmediateInt(op1
)), regT1
));
806 emitFastArithIntToImmNoCheck(regT1
, regT0
);
807 } else if (isOperandConstantImmediateInt(op2
)) {
808 emitGetVirtualRegister(op1
, regT0
);
809 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
810 addSlowCase(branchAdd32(Overflow
, regT0
, Imm32(getConstantOperandImmediateInt(op2
)), regT1
));
811 emitFastArithIntToImmNoCheck(regT1
, regT0
);
813 compileBinaryArithOp(op_add
, result
, op1
, op2
, types
);
815 emitPutVirtualRegister(result
);
818 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
820 int result
= currentInstruction
[1].u
.operand
;
821 int op1
= currentInstruction
[2].u
.operand
;
822 int op2
= currentInstruction
[3].u
.operand
;
823 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
825 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
826 linkDummySlowCase(iter
);
830 bool op1HasImmediateIntFastCase
= isOperandConstantImmediateInt(op1
);
831 bool op2HasImmediateIntFastCase
= !op1HasImmediateIntFastCase
&& isOperandConstantImmediateInt(op2
);
832 compileBinaryArithOpSlowCase(currentInstruction
, op_add
, iter
, result
, op1
, op2
, types
, op1HasImmediateIntFastCase
, op2HasImmediateIntFastCase
);
835 void JIT::emit_op_mul(Instruction
* currentInstruction
)
837 int result
= currentInstruction
[1].u
.operand
;
838 int op1
= currentInstruction
[2].u
.operand
;
839 int op2
= currentInstruction
[3].u
.operand
;
840 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
842 // For now, only plant a fast int case if the constant operand is greater than zero.
844 if (isOperandConstantImmediateInt(op1
) && ((value
= getConstantOperandImmediateInt(op1
)) > 0)) {
845 // Add a special fast case profile because the DFG JIT will expect one.
846 m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
847 emitGetVirtualRegister(op2
, regT0
);
848 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
849 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT1
));
850 emitFastArithReTagImmediate(regT1
, regT0
);
851 } else if (isOperandConstantImmediateInt(op2
) && ((value
= getConstantOperandImmediateInt(op2
)) > 0)) {
852 // Add a special fast case profile because the DFG JIT will expect one.
853 m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
854 emitGetVirtualRegister(op1
, regT0
);
855 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
856 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT1
));
857 emitFastArithReTagImmediate(regT1
, regT0
);
859 compileBinaryArithOp(op_mul
, result
, op1
, op2
, types
);
861 emitPutVirtualRegister(result
);
864 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
866 int result
= currentInstruction
[1].u
.operand
;
867 int op1
= currentInstruction
[2].u
.operand
;
868 int op2
= currentInstruction
[3].u
.operand
;
869 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
871 bool op1HasImmediateIntFastCase
= isOperandConstantImmediateInt(op1
) && getConstantOperandImmediateInt(op1
) > 0;
872 bool op2HasImmediateIntFastCase
= !op1HasImmediateIntFastCase
&& isOperandConstantImmediateInt(op2
) && getConstantOperandImmediateInt(op2
) > 0;
873 compileBinaryArithOpSlowCase(currentInstruction
, op_mul
, iter
, result
, op1
, op2
, types
, op1HasImmediateIntFastCase
, op2HasImmediateIntFastCase
);
876 void JIT::emit_op_div(Instruction
* currentInstruction
)
878 int dst
= currentInstruction
[1].u
.operand
;
879 int op1
= currentInstruction
[2].u
.operand
;
880 int op2
= currentInstruction
[3].u
.operand
;
881 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
883 if (isOperandConstantImmediateDouble(op1
)) {
884 emitGetVirtualRegister(op1
, regT0
);
885 add64(tagTypeNumberRegister
, regT0
);
886 move64ToDouble(regT0
, fpRegT0
);
887 } else if (isOperandConstantImmediateInt(op1
)) {
888 emitLoadInt32ToDouble(op1
, fpRegT0
);
890 emitGetVirtualRegister(op1
, regT0
);
891 if (!types
.first().definitelyIsNumber())
892 emitJumpSlowCaseIfNotImmediateNumber(regT0
);
893 Jump notInt
= emitJumpIfNotImmediateInteger(regT0
);
894 convertInt32ToDouble(regT0
, fpRegT0
);
895 Jump skipDoubleLoad
= jump();
897 add64(tagTypeNumberRegister
, regT0
);
898 move64ToDouble(regT0
, fpRegT0
);
899 skipDoubleLoad
.link(this);
902 if (isOperandConstantImmediateDouble(op2
)) {
903 emitGetVirtualRegister(op2
, regT1
);
904 add64(tagTypeNumberRegister
, regT1
);
905 move64ToDouble(regT1
, fpRegT1
);
906 } else if (isOperandConstantImmediateInt(op2
)) {
907 emitLoadInt32ToDouble(op2
, fpRegT1
);
909 emitGetVirtualRegister(op2
, regT1
);
910 if (!types
.second().definitelyIsNumber())
911 emitJumpSlowCaseIfNotImmediateNumber(regT1
);
912 Jump notInt
= emitJumpIfNotImmediateInteger(regT1
);
913 convertInt32ToDouble(regT1
, fpRegT1
);
914 Jump skipDoubleLoad
= jump();
916 add64(tagTypeNumberRegister
, regT1
);
917 move64ToDouble(regT1
, fpRegT1
);
918 skipDoubleLoad
.link(this);
920 divDouble(fpRegT1
, fpRegT0
);
922 // Is the result actually an integer? The DFG JIT would really like to know. If it's
923 // not an integer, we increment a count. If this together with the slow case counter
924 // are below threshold then the DFG JIT will compile this division with a specualtion
925 // that the remainder is zero.
927 // As well, there are cases where a double result here would cause an important field
928 // in the heap to sometimes have doubles in it, resulting in double predictions getting
929 // propagated to a use site where it might cause damage (such as the index to an array
930 // access). So if we are DFG compiling anything in the program, we want this code to
931 // ensure that it produces integers whenever possible.
934 branchConvertDoubleToInt32(fpRegT0
, regT0
, notInteger
, fpRegT1
);
935 // If we've got an integer, we might as well make that the result of the division.
936 emitFastArithReTagImmediate(regT0
, regT0
);
937 Jump isInteger
= jump();
938 notInteger
.link(this);
939 moveDoubleTo64(fpRegT0
, regT0
);
940 Jump doubleZero
= branchTest64(Zero
, regT0
);
941 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
)->m_counter
));
942 sub64(tagTypeNumberRegister
, regT0
);
943 Jump trueDouble
= jump();
944 doubleZero
.link(this);
945 move(tagTypeNumberRegister
, regT0
);
946 trueDouble
.link(this);
947 isInteger
.link(this);
949 emitPutVirtualRegister(dst
, regT0
);
952 void JIT::emitSlow_op_div(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
954 int op1
= currentInstruction
[2].u
.operand
;
955 int op2
= currentInstruction
[3].u
.operand
;
956 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
957 if (types
.first().definitelyIsNumber() && types
.second().definitelyIsNumber()) {
958 if (!ASSERT_DISABLED
)
959 abortWithReason(JITDivOperandsAreNotNumbers
);
962 if (!isOperandConstantImmediateDouble(op1
) && !isOperandConstantImmediateInt(op1
)) {
963 if (!types
.first().definitelyIsNumber())
966 if (!isOperandConstantImmediateDouble(op2
) && !isOperandConstantImmediateInt(op2
)) {
967 if (!types
.second().definitelyIsNumber())
970 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
971 JITSlowPathCall
slowPathCall(this, currentInstruction
, slow_path_div
);
975 void JIT::emit_op_sub(Instruction
* currentInstruction
)
977 int result
= currentInstruction
[1].u
.operand
;
978 int op1
= currentInstruction
[2].u
.operand
;
979 int op2
= currentInstruction
[3].u
.operand
;
980 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
982 compileBinaryArithOp(op_sub
, result
, op1
, op2
, types
);
983 emitPutVirtualRegister(result
);
986 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
988 int result
= currentInstruction
[1].u
.operand
;
989 int op1
= currentInstruction
[2].u
.operand
;
990 int op2
= currentInstruction
[3].u
.operand
;
991 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
993 compileBinaryArithOpSlowCase(currentInstruction
, op_sub
, iter
, result
, op1
, op2
, types
, false, false);
996 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
998 #endif // USE(JSVALUE64)
1002 #endif // ENABLE(JIT)