2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "JITInlines.h"
33 #include "JITStubCall.h"
36 #include "JSFunction.h"
37 #include "Interpreter.h"
38 #include "Operations.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
50 void JIT::emit_op_jless(Instruction
* currentInstruction
)
52 unsigned op1
= currentInstruction
[1].u
.operand
;
53 unsigned op2
= currentInstruction
[2].u
.operand
;
54 unsigned target
= currentInstruction
[3].u
.operand
;
56 emit_compareAndJump(op_jless
, op1
, op2
, target
, LessThan
);
59 void JIT::emit_op_jlesseq(Instruction
* currentInstruction
)
61 unsigned op1
= currentInstruction
[1].u
.operand
;
62 unsigned op2
= currentInstruction
[2].u
.operand
;
63 unsigned target
= currentInstruction
[3].u
.operand
;
65 emit_compareAndJump(op_jlesseq
, op1
, op2
, target
, LessThanOrEqual
);
68 void JIT::emit_op_jgreater(Instruction
* currentInstruction
)
70 unsigned op1
= currentInstruction
[1].u
.operand
;
71 unsigned op2
= currentInstruction
[2].u
.operand
;
72 unsigned target
= currentInstruction
[3].u
.operand
;
74 emit_compareAndJump(op_jgreater
, op1
, op2
, target
, GreaterThan
);
77 void JIT::emit_op_jgreatereq(Instruction
* currentInstruction
)
79 unsigned op1
= currentInstruction
[1].u
.operand
;
80 unsigned op2
= currentInstruction
[2].u
.operand
;
81 unsigned target
= currentInstruction
[3].u
.operand
;
83 emit_compareAndJump(op_jgreatereq
, op1
, op2
, target
, GreaterThanOrEqual
);
86 void JIT::emit_op_jnless(Instruction
* currentInstruction
)
88 unsigned op1
= currentInstruction
[1].u
.operand
;
89 unsigned op2
= currentInstruction
[2].u
.operand
;
90 unsigned target
= currentInstruction
[3].u
.operand
;
92 emit_compareAndJump(op_jnless
, op1
, op2
, target
, GreaterThanOrEqual
);
95 void JIT::emit_op_jnlesseq(Instruction
* currentInstruction
)
97 unsigned op1
= currentInstruction
[1].u
.operand
;
98 unsigned op2
= currentInstruction
[2].u
.operand
;
99 unsigned target
= currentInstruction
[3].u
.operand
;
101 emit_compareAndJump(op_jnlesseq
, op1
, op2
, target
, GreaterThan
);
104 void JIT::emit_op_jngreater(Instruction
* currentInstruction
)
106 unsigned op1
= currentInstruction
[1].u
.operand
;
107 unsigned op2
= currentInstruction
[2].u
.operand
;
108 unsigned target
= currentInstruction
[3].u
.operand
;
110 emit_compareAndJump(op_jngreater
, op1
, op2
, target
, LessThanOrEqual
);
113 void JIT::emit_op_jngreatereq(Instruction
* currentInstruction
)
115 unsigned op1
= currentInstruction
[1].u
.operand
;
116 unsigned op2
= currentInstruction
[2].u
.operand
;
117 unsigned target
= currentInstruction
[3].u
.operand
;
119 emit_compareAndJump(op_jngreatereq
, op1
, op2
, target
, LessThan
);
122 void JIT::emitSlow_op_jless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
124 unsigned op1
= currentInstruction
[1].u
.operand
;
125 unsigned op2
= currentInstruction
[2].u
.operand
;
126 unsigned target
= currentInstruction
[3].u
.operand
;
128 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleLessThan
, cti_op_jless
, false, iter
);
131 void JIT::emitSlow_op_jlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
133 unsigned op1
= currentInstruction
[1].u
.operand
;
134 unsigned op2
= currentInstruction
[2].u
.operand
;
135 unsigned target
= currentInstruction
[3].u
.operand
;
137 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleLessThanOrEqual
, cti_op_jlesseq
, false, iter
);
140 void JIT::emitSlow_op_jgreater(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
142 unsigned op1
= currentInstruction
[1].u
.operand
;
143 unsigned op2
= currentInstruction
[2].u
.operand
;
144 unsigned target
= currentInstruction
[3].u
.operand
;
146 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleGreaterThan
, cti_op_jgreater
, false, iter
);
149 void JIT::emitSlow_op_jgreatereq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
151 unsigned op1
= currentInstruction
[1].u
.operand
;
152 unsigned op2
= currentInstruction
[2].u
.operand
;
153 unsigned target
= currentInstruction
[3].u
.operand
;
155 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleGreaterThanOrEqual
, cti_op_jgreatereq
, false, iter
);
158 void JIT::emitSlow_op_jnless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
160 unsigned op1
= currentInstruction
[1].u
.operand
;
161 unsigned op2
= currentInstruction
[2].u
.operand
;
162 unsigned target
= currentInstruction
[3].u
.operand
;
164 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleGreaterThanOrEqualOrUnordered
, cti_op_jless
, true, iter
);
167 void JIT::emitSlow_op_jnlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
169 unsigned op1
= currentInstruction
[1].u
.operand
;
170 unsigned op2
= currentInstruction
[2].u
.operand
;
171 unsigned target
= currentInstruction
[3].u
.operand
;
173 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleGreaterThanOrUnordered
, cti_op_jlesseq
, true, iter
);
176 void JIT::emitSlow_op_jngreater(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
178 unsigned op1
= currentInstruction
[1].u
.operand
;
179 unsigned op2
= currentInstruction
[2].u
.operand
;
180 unsigned target
= currentInstruction
[3].u
.operand
;
182 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleLessThanOrEqualOrUnordered
, cti_op_jgreater
, true, iter
);
185 void JIT::emitSlow_op_jngreatereq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
187 unsigned op1
= currentInstruction
[1].u
.operand
;
188 unsigned op2
= currentInstruction
[2].u
.operand
;
189 unsigned target
= currentInstruction
[3].u
.operand
;
191 emit_compareAndJumpSlow(op1
, op2
, target
, DoubleLessThanOrUnordered
, cti_op_jgreatereq
, true, iter
);
196 void JIT::emit_op_negate(Instruction
* currentInstruction
)
198 unsigned dst
= currentInstruction
[1].u
.operand
;
199 unsigned src
= currentInstruction
[2].u
.operand
;
201 emitGetVirtualRegister(src
, regT0
);
203 Jump srcNotInt
= emitJumpIfNotImmediateInteger(regT0
);
204 addSlowCase(branchTest32(Zero
, regT0
, TrustedImm32(0x7fffffff)));
206 emitFastArithReTagImmediate(regT0
, regT0
);
210 srcNotInt
.link(this);
211 emitJumpSlowCaseIfNotImmediateNumber(regT0
);
213 move(TrustedImm64((int64_t)0x8000000000000000ull
), regT1
);
217 emitPutVirtualRegister(dst
);
220 void JIT::emitSlow_op_negate(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
222 unsigned dst
= currentInstruction
[1].u
.operand
;
224 linkSlowCase(iter
); // 0x7fffffff check
225 linkSlowCase(iter
); // double check
227 JITStubCall
stubCall(this, cti_op_negate
);
228 stubCall
.addArgument(regT0
);
232 void JIT::emit_op_lshift(Instruction
* currentInstruction
)
234 unsigned result
= currentInstruction
[1].u
.operand
;
235 unsigned op1
= currentInstruction
[2].u
.operand
;
236 unsigned op2
= currentInstruction
[3].u
.operand
;
238 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
239 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
240 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
241 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
242 emitFastArithImmToInt(regT0
);
243 emitFastArithImmToInt(regT2
);
244 lshift32(regT2
, regT0
);
245 emitFastArithReTagImmediate(regT0
, regT0
);
246 emitPutVirtualRegister(result
);
249 void JIT::emitSlow_op_lshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
251 unsigned result
= currentInstruction
[1].u
.operand
;
252 unsigned op1
= currentInstruction
[2].u
.operand
;
253 unsigned op2
= currentInstruction
[3].u
.operand
;
259 JITStubCall
stubCall(this, cti_op_lshift
);
260 stubCall
.addArgument(regT0
);
261 stubCall
.addArgument(regT2
);
262 stubCall
.call(result
);
265 void JIT::emit_op_rshift(Instruction
* currentInstruction
)
267 unsigned result
= currentInstruction
[1].u
.operand
;
268 unsigned op1
= currentInstruction
[2].u
.operand
;
269 unsigned op2
= currentInstruction
[3].u
.operand
;
271 if (isOperandConstantImmediateInt(op2
)) {
272 // isOperandConstantImmediateInt(op2) => 1 SlowCase
273 emitGetVirtualRegister(op1
, regT0
);
274 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
275 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
276 rshift32(Imm32(getConstantOperandImmediateInt(op2
) & 0x1f), regT0
);
278 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
279 if (supportsFloatingPointTruncate()) {
280 Jump lhsIsInt
= emitJumpIfImmediateInteger(regT0
);
281 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
282 addSlowCase(emitJumpIfNotImmediateNumber(regT0
));
283 add64(tagTypeNumberRegister
, regT0
);
284 move64ToDouble(regT0
, fpRegT0
);
285 addSlowCase(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
287 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
289 // !supportsFloatingPoint() => 2 SlowCases
290 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
291 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
293 emitFastArithImmToInt(regT2
);
294 rshift32(regT2
, regT0
);
296 emitFastArithIntToImmNoCheck(regT0
, regT0
);
297 emitPutVirtualRegister(result
);
300 void JIT::emitSlow_op_rshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
302 unsigned result
= currentInstruction
[1].u
.operand
;
303 unsigned op1
= currentInstruction
[2].u
.operand
;
304 unsigned op2
= currentInstruction
[3].u
.operand
;
306 JITStubCall
stubCall(this, cti_op_rshift
);
308 if (isOperandConstantImmediateInt(op2
)) {
310 stubCall
.addArgument(regT0
);
311 stubCall
.addArgument(op2
, regT2
);
313 if (supportsFloatingPointTruncate()) {
317 // We're reloading op1 to regT0 as we can no longer guarantee that
318 // we have not munged the operand. It may have already been shifted
319 // correctly, but it still will not have been tagged.
320 stubCall
.addArgument(op1
, regT0
);
321 stubCall
.addArgument(regT2
);
325 stubCall
.addArgument(regT0
);
326 stubCall
.addArgument(regT2
);
330 stubCall
.call(result
);
333 void JIT::emit_op_urshift(Instruction
* currentInstruction
)
335 unsigned dst
= currentInstruction
[1].u
.operand
;
336 unsigned op1
= currentInstruction
[2].u
.operand
;
337 unsigned op2
= currentInstruction
[3].u
.operand
;
339 // Slow case of urshift makes assumptions about what registers hold the
340 // shift arguments, so any changes must be updated there as well.
341 if (isOperandConstantImmediateInt(op2
)) {
342 emitGetVirtualRegister(op1
, regT0
);
343 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
344 emitFastArithImmToInt(regT0
);
345 int shift
= getConstantOperand(op2
).asInt32();
347 urshift32(Imm32(shift
& 0x1f), regT0
);
348 // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
349 // a toUint conversion, which can result in a value we can represent
350 // as an immediate int.
351 if (shift
< 0 || !(shift
& 31))
352 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
353 emitFastArithReTagImmediate(regT0
, regT0
);
354 emitPutVirtualRegister(dst
, regT0
);
357 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
358 if (!isOperandConstantImmediateInt(op1
))
359 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
360 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
361 emitFastArithImmToInt(regT0
);
362 emitFastArithImmToInt(regT1
);
363 urshift32(regT1
, regT0
);
364 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
365 emitFastArithReTagImmediate(regT0
, regT0
);
366 emitPutVirtualRegister(dst
, regT0
);
369 void JIT::emitSlow_op_urshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
371 unsigned dst
= currentInstruction
[1].u
.operand
;
372 unsigned op1
= currentInstruction
[2].u
.operand
;
373 unsigned op2
= currentInstruction
[3].u
.operand
;
374 if (isOperandConstantImmediateInt(op2
)) {
375 int shift
= getConstantOperand(op2
).asInt32();
377 linkSlowCase(iter
); // int32 check
378 if (supportsFloatingPointTruncate()) {
380 failures
.append(emitJumpIfNotImmediateNumber(regT0
)); // op1 is not a double
381 add64(tagTypeNumberRegister
, regT0
);
382 move64ToDouble(regT0
, fpRegT0
);
383 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
385 urshift32(Imm32(shift
& 0x1f), regT0
);
386 if (shift
< 0 || !(shift
& 31))
387 failures
.append(branch32(LessThan
, regT0
, TrustedImm32(0)));
388 emitFastArithReTagImmediate(regT0
, regT0
);
389 emitPutVirtualRegister(dst
, regT0
);
390 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
393 if (shift
< 0 || !(shift
& 31))
394 linkSlowCase(iter
); // failed to box in hot path
398 if (!isOperandConstantImmediateInt(op1
)) {
399 linkSlowCase(iter
); // int32 check -- op1 is not an int
400 if (supportsFloatingPointTruncate()) {
402 failures
.append(emitJumpIfNotImmediateNumber(regT0
)); // op1 is not a double
403 add64(tagTypeNumberRegister
, regT0
);
404 move64ToDouble(regT0
, fpRegT0
);
405 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
406 failures
.append(emitJumpIfNotImmediateInteger(regT1
)); // op2 is not an int
407 emitFastArithImmToInt(regT1
);
408 urshift32(regT1
, regT0
);
409 failures
.append(branch32(LessThan
, regT0
, TrustedImm32(0)));
410 emitFastArithReTagImmediate(regT0
, regT0
);
411 emitPutVirtualRegister(dst
, regT0
);
412 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
417 linkSlowCase(iter
); // int32 check - op2 is not an int
418 linkSlowCase(iter
); // Can't represent unsigned result as an immediate
421 JITStubCall
stubCall(this, cti_op_urshift
);
422 stubCall
.addArgument(op1
, regT0
);
423 stubCall
.addArgument(op2
, regT1
);
427 void JIT::emit_compareAndJump(OpcodeID
, unsigned op1
, unsigned op2
, unsigned target
, RelationalCondition condition
)
429 // We generate inline code for the following cases in the fast path:
430 // - int immediate to constant int immediate
431 // - constant int immediate to int immediate
432 // - int immediate to int immediate
434 if (isOperandConstantImmediateChar(op1
)) {
435 emitGetVirtualRegister(op2
, regT0
);
436 addSlowCase(emitJumpIfNotJSCell(regT0
));
438 emitLoadCharacterString(regT0
, regT0
, failures
);
439 addSlowCase(failures
);
440 addJump(branch32(commute(condition
), regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
443 if (isOperandConstantImmediateChar(op2
)) {
444 emitGetVirtualRegister(op1
, regT0
);
445 addSlowCase(emitJumpIfNotJSCell(regT0
));
447 emitLoadCharacterString(regT0
, regT0
, failures
);
448 addSlowCase(failures
);
449 addJump(branch32(condition
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
452 if (isOperandConstantImmediateInt(op2
)) {
453 emitGetVirtualRegister(op1
, regT0
);
454 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
455 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
456 addJump(branch32(condition
, regT0
, Imm32(op2imm
)), target
);
457 } else if (isOperandConstantImmediateInt(op1
)) {
458 emitGetVirtualRegister(op2
, regT1
);
459 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
460 int32_t op1imm
= getConstantOperandImmediateInt(op1
);
461 addJump(branch32(commute(condition
), regT1
, Imm32(op1imm
)), target
);
463 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
464 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
465 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
467 addJump(branch32(condition
, regT0
, regT1
), target
);
471 void JIT::emit_compareAndJumpSlow(unsigned op1
, unsigned op2
, unsigned target
, DoubleCondition condition
, int (JIT_STUB
*stub
)(STUB_ARGS_DECLARATION
), bool invert
, Vector
<SlowCaseEntry
>::iterator
& iter
)
473 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jlesseq
), OPCODE_LENGTH_op_jlesseq_equals_op_jless
);
474 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jnless
), OPCODE_LENGTH_op_jnless_equals_op_jless
);
475 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jnlesseq
), OPCODE_LENGTH_op_jnlesseq_equals_op_jless
);
476 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jgreater
), OPCODE_LENGTH_op_jgreater_equals_op_jless
);
477 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jgreatereq
), OPCODE_LENGTH_op_jgreatereq_equals_op_jless
);
478 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jngreater
), OPCODE_LENGTH_op_jngreater_equals_op_jless
);
479 COMPILE_ASSERT(OPCODE_LENGTH(op_jless
) == OPCODE_LENGTH(op_jngreatereq
), OPCODE_LENGTH_op_jngreatereq_equals_op_jless
);
481 // We generate inline code for the following cases in the slow path:
482 // - floating-point number to constant int immediate
483 // - constant int immediate to floating-point number
484 // - floating-point number to floating-point number.
485 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
490 JITStubCall
stubCall(this, stub
);
491 stubCall
.addArgument(op1
, regT0
);
492 stubCall
.addArgument(op2
, regT1
);
494 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
498 if (isOperandConstantImmediateInt(op2
)) {
501 if (supportsFloatingPoint()) {
502 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
503 add64(tagTypeNumberRegister
, regT0
);
504 move64ToDouble(regT0
, fpRegT0
);
506 int32_t op2imm
= getConstantOperand(op2
).asInt32();
508 move(Imm32(op2imm
), regT1
);
509 convertInt32ToDouble(regT1
, fpRegT1
);
511 emitJumpSlowToHot(branchDouble(condition
, fpRegT0
, fpRegT1
), target
);
513 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless
));
518 JITStubCall
stubCall(this, stub
);
519 stubCall
.addArgument(regT0
);
520 stubCall
.addArgument(op2
, regT2
);
522 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
524 } else if (isOperandConstantImmediateInt(op1
)) {
527 if (supportsFloatingPoint()) {
528 Jump fail1
= emitJumpIfNotImmediateNumber(regT1
);
529 add64(tagTypeNumberRegister
, regT1
);
530 move64ToDouble(regT1
, fpRegT1
);
532 int32_t op1imm
= getConstantOperand(op1
).asInt32();
534 move(Imm32(op1imm
), regT0
);
535 convertInt32ToDouble(regT0
, fpRegT0
);
537 emitJumpSlowToHot(branchDouble(condition
, fpRegT0
, fpRegT1
), target
);
539 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless
));
544 JITStubCall
stubCall(this, stub
);
545 stubCall
.addArgument(op1
, regT2
);
546 stubCall
.addArgument(regT1
);
548 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
552 if (supportsFloatingPoint()) {
553 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
554 Jump fail2
= emitJumpIfNotImmediateNumber(regT1
);
555 Jump fail3
= emitJumpIfImmediateInteger(regT1
);
556 add64(tagTypeNumberRegister
, regT0
);
557 add64(tagTypeNumberRegister
, regT1
);
558 move64ToDouble(regT0
, fpRegT0
);
559 move64ToDouble(regT1
, fpRegT1
);
561 emitJumpSlowToHot(branchDouble(condition
, fpRegT0
, fpRegT1
), target
);
563 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless
));
571 JITStubCall
stubCall(this, stub
);
572 stubCall
.addArgument(regT0
);
573 stubCall
.addArgument(regT1
);
575 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
579 void JIT::emit_op_bitand(Instruction
* currentInstruction
)
581 unsigned result
= currentInstruction
[1].u
.operand
;
582 unsigned op1
= currentInstruction
[2].u
.operand
;
583 unsigned op2
= currentInstruction
[3].u
.operand
;
585 if (isOperandConstantImmediateInt(op1
)) {
586 emitGetVirtualRegister(op2
, regT0
);
587 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
588 int32_t imm
= getConstantOperandImmediateInt(op1
);
589 and64(Imm32(imm
), regT0
);
591 emitFastArithIntToImmNoCheck(regT0
, regT0
);
592 } else if (isOperandConstantImmediateInt(op2
)) {
593 emitGetVirtualRegister(op1
, regT0
);
594 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
595 int32_t imm
= getConstantOperandImmediateInt(op2
);
596 and64(Imm32(imm
), regT0
);
598 emitFastArithIntToImmNoCheck(regT0
, regT0
);
600 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
602 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
604 emitPutVirtualRegister(result
);
607 void JIT::emitSlow_op_bitand(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
609 unsigned result
= currentInstruction
[1].u
.operand
;
610 unsigned op1
= currentInstruction
[2].u
.operand
;
611 unsigned op2
= currentInstruction
[3].u
.operand
;
614 if (isOperandConstantImmediateInt(op1
)) {
615 JITStubCall
stubCall(this, cti_op_bitand
);
616 stubCall
.addArgument(op1
, regT2
);
617 stubCall
.addArgument(regT0
);
618 stubCall
.call(result
);
619 } else if (isOperandConstantImmediateInt(op2
)) {
620 JITStubCall
stubCall(this, cti_op_bitand
);
621 stubCall
.addArgument(regT0
);
622 stubCall
.addArgument(op2
, regT2
);
623 stubCall
.call(result
);
625 JITStubCall
stubCall(this, cti_op_bitand
);
626 stubCall
.addArgument(op1
, regT2
);
627 stubCall
.addArgument(regT1
);
628 stubCall
.call(result
);
632 void JIT::emit_op_inc(Instruction
* currentInstruction
)
634 unsigned srcDst
= currentInstruction
[1].u
.operand
;
636 emitGetVirtualRegister(srcDst
, regT0
);
637 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
638 addSlowCase(branchAdd32(Overflow
, TrustedImm32(1), regT0
));
639 emitFastArithIntToImmNoCheck(regT0
, regT0
);
640 emitPutVirtualRegister(srcDst
);
643 void JIT::emitSlow_op_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
645 unsigned srcDst
= currentInstruction
[1].u
.operand
;
647 Jump notImm
= getSlowCase(iter
);
649 emitGetVirtualRegister(srcDst
, regT0
);
651 JITStubCall
stubCall(this, cti_op_inc
);
652 stubCall
.addArgument(regT0
);
653 stubCall
.call(srcDst
);
656 void JIT::emit_op_dec(Instruction
* currentInstruction
)
658 unsigned srcDst
= currentInstruction
[1].u
.operand
;
660 emitGetVirtualRegister(srcDst
, regT0
);
661 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
662 addSlowCase(branchSub32(Overflow
, TrustedImm32(1), regT0
));
663 emitFastArithIntToImmNoCheck(regT0
, regT0
);
664 emitPutVirtualRegister(srcDst
);
667 void JIT::emitSlow_op_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
669 unsigned srcDst
= currentInstruction
[1].u
.operand
;
671 Jump notImm
= getSlowCase(iter
);
673 emitGetVirtualRegister(srcDst
, regT0
);
675 JITStubCall
stubCall(this, cti_op_dec
);
676 stubCall
.addArgument(regT0
);
677 stubCall
.call(srcDst
);
680 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
682 #if CPU(X86) || CPU(X86_64)
684 void JIT::emit_op_mod(Instruction
* currentInstruction
)
686 unsigned result
= currentInstruction
[1].u
.operand
;
687 unsigned op1
= currentInstruction
[2].u
.operand
;
688 unsigned op2
= currentInstruction
[3].u
.operand
;
690 // Make sure registers are correct for x86 IDIV instructions.
691 ASSERT(regT0
== X86Registers::eax
);
692 ASSERT(regT1
== X86Registers::edx
);
693 ASSERT(regT2
== X86Registers::ecx
);
695 emitGetVirtualRegisters(op1
, regT3
, op2
, regT2
);
696 emitJumpSlowCaseIfNotImmediateInteger(regT3
);
697 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
700 addSlowCase(branchTest32(Zero
, regT2
));
701 Jump denominatorNotNeg1
= branch32(NotEqual
, regT2
, TrustedImm32(-1));
702 addSlowCase(branch32(Equal
, regT0
, TrustedImm32(-2147483647-1)));
703 denominatorNotNeg1
.link(this);
705 m_assembler
.idivl_r(regT2
);
706 Jump numeratorPositive
= branch32(GreaterThanOrEqual
, regT3
, TrustedImm32(0));
707 addSlowCase(branchTest32(Zero
, regT1
));
708 numeratorPositive
.link(this);
709 emitFastArithReTagImmediate(regT1
, regT0
);
710 emitPutVirtualRegister(result
);
713 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
715 unsigned result
= currentInstruction
[1].u
.operand
;
722 JITStubCall
stubCall(this, cti_op_mod
);
723 stubCall
.addArgument(regT3
);
724 stubCall
.addArgument(regT2
);
725 stubCall
.call(result
);
728 #else // CPU(X86) || CPU(X86_64)
730 void JIT::emit_op_mod(Instruction
* currentInstruction
)
732 unsigned result
= currentInstruction
[1].u
.operand
;
733 unsigned op1
= currentInstruction
[2].u
.operand
;
734 unsigned op2
= currentInstruction
[3].u
.operand
;
736 JITStubCall
stubCall(this, cti_op_mod
);
737 stubCall
.addArgument(op1
, regT2
);
738 stubCall
.addArgument(op2
, regT2
);
739 stubCall
.call(result
);
742 void JIT::emitSlow_op_mod(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
744 UNREACHABLE_FOR_PLATFORM();
747 #endif // CPU(X86) || CPU(X86_64)
749 /* ------------------------------ END: OP_MOD ------------------------------ */
751 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
753 void JIT::compileBinaryArithOp(OpcodeID opcodeID
, unsigned, unsigned op1
, unsigned op2
, OperandTypes
)
755 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
756 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
757 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
758 #if ENABLE(VALUE_PROFILER)
759 RareCaseProfile
* profile
= m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
761 if (opcodeID
== op_add
)
762 addSlowCase(branchAdd32(Overflow
, regT1
, regT0
));
763 else if (opcodeID
== op_sub
)
764 addSlowCase(branchSub32(Overflow
, regT1
, regT0
));
766 ASSERT(opcodeID
== op_mul
);
767 #if ENABLE(VALUE_PROFILER)
768 if (shouldEmitProfiling()) {
769 // We want to be able to measure if this is taking the slow case just
770 // because of negative zero. If this produces positive zero, then we
771 // don't want the slow case to be taken because that will throw off
772 // speculative compilation.
774 addSlowCase(branchMul32(Overflow
, regT1
, regT2
));
776 done
.append(branchTest32(NonZero
, regT2
));
777 Jump negativeZero
= branch32(LessThan
, regT0
, TrustedImm32(0));
778 done
.append(branch32(GreaterThanOrEqual
, regT1
, TrustedImm32(0)));
779 negativeZero
.link(this);
780 // We only get here if we have a genuine negative zero. Record this,
781 // so that the speculative JIT knows that we failed speculation
782 // because of a negative zero.
783 add32(TrustedImm32(1), AbsoluteAddress(&profile
->m_counter
));
788 addSlowCase(branchMul32(Overflow
, regT1
, regT0
));
789 addSlowCase(branchTest32(Zero
, regT0
));
792 addSlowCase(branchMul32(Overflow
, regT1
, regT0
));
793 addSlowCase(branchTest32(Zero
, regT0
));
796 emitFastArithIntToImmNoCheck(regT0
, regT0
);
799 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID
, Vector
<SlowCaseEntry
>::iterator
& iter
, unsigned result
, unsigned op1
, unsigned op2
, OperandTypes types
, bool op1HasImmediateIntFastCase
, bool op2HasImmediateIntFastCase
)
801 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
802 COMPILE_ASSERT(((TagTypeNumber
+ DoubleEncodeOffset
) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0
);
806 if (op1HasImmediateIntFastCase
) {
807 notImm2
= getSlowCase(iter
);
808 } else if (op2HasImmediateIntFastCase
) {
809 notImm1
= getSlowCase(iter
);
811 notImm1
= getSlowCase(iter
);
812 notImm2
= getSlowCase(iter
);
815 linkSlowCase(iter
); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
816 if (opcodeID
== op_mul
&& !op1HasImmediateIntFastCase
&& !op2HasImmediateIntFastCase
) // op_mul has an extra slow case to handle 0 * negative number.
818 emitGetVirtualRegister(op1
, regT0
);
820 Label
stubFunctionCall(this);
821 JITStubCall
stubCall(this, opcodeID
== op_add
? cti_op_add
: opcodeID
== op_sub
? cti_op_sub
: cti_op_mul
);
822 if (op1HasImmediateIntFastCase
|| op2HasImmediateIntFastCase
) {
823 emitGetVirtualRegister(op1
, regT0
);
824 emitGetVirtualRegister(op2
, regT1
);
826 stubCall
.addArgument(regT0
);
827 stubCall
.addArgument(regT1
);
828 stubCall
.call(result
);
831 if (op1HasImmediateIntFastCase
) {
833 if (!types
.second().definitelyIsNumber())
834 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
835 emitGetVirtualRegister(op1
, regT1
);
836 convertInt32ToDouble(regT1
, fpRegT1
);
837 add64(tagTypeNumberRegister
, regT0
);
838 move64ToDouble(regT0
, fpRegT2
);
839 } else if (op2HasImmediateIntFastCase
) {
841 if (!types
.first().definitelyIsNumber())
842 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
843 emitGetVirtualRegister(op2
, regT1
);
844 convertInt32ToDouble(regT1
, fpRegT1
);
845 add64(tagTypeNumberRegister
, regT0
);
846 move64ToDouble(regT0
, fpRegT2
);
848 // if we get here, eax is not an int32, edx not yet checked.
850 if (!types
.first().definitelyIsNumber())
851 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
852 if (!types
.second().definitelyIsNumber())
853 emitJumpIfNotImmediateNumber(regT1
).linkTo(stubFunctionCall
, this);
854 add64(tagTypeNumberRegister
, regT0
);
855 move64ToDouble(regT0
, fpRegT1
);
856 Jump op2isDouble
= emitJumpIfNotImmediateInteger(regT1
);
857 convertInt32ToDouble(regT1
, fpRegT2
);
858 Jump op2wasInteger
= jump();
860 // if we get here, eax IS an int32, edx is not.
862 if (!types
.second().definitelyIsNumber())
863 emitJumpIfNotImmediateNumber(regT1
).linkTo(stubFunctionCall
, this);
864 convertInt32ToDouble(regT0
, fpRegT1
);
865 op2isDouble
.link(this);
866 add64(tagTypeNumberRegister
, regT1
);
867 move64ToDouble(regT1
, fpRegT2
);
868 op2wasInteger
.link(this);
871 if (opcodeID
== op_add
)
872 addDouble(fpRegT2
, fpRegT1
);
873 else if (opcodeID
== op_sub
)
874 subDouble(fpRegT2
, fpRegT1
);
875 else if (opcodeID
== op_mul
)
876 mulDouble(fpRegT2
, fpRegT1
);
878 ASSERT(opcodeID
== op_div
);
879 divDouble(fpRegT2
, fpRegT1
);
881 moveDoubleTo64(fpRegT1
, regT0
);
882 sub64(tagTypeNumberRegister
, regT0
);
883 emitPutVirtualRegister(result
, regT0
);
888 void JIT::emit_op_add(Instruction
* currentInstruction
)
890 unsigned result
= currentInstruction
[1].u
.operand
;
891 unsigned op1
= currentInstruction
[2].u
.operand
;
892 unsigned op2
= currentInstruction
[3].u
.operand
;
893 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
895 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
897 JITStubCall
stubCall(this, cti_op_add
);
898 stubCall
.addArgument(op1
, regT2
);
899 stubCall
.addArgument(op2
, regT2
);
900 stubCall
.call(result
);
904 if (isOperandConstantImmediateInt(op1
)) {
905 emitGetVirtualRegister(op2
, regT0
);
906 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
907 addSlowCase(branchAdd32(Overflow
, regT0
, Imm32(getConstantOperandImmediateInt(op1
)), regT1
));
908 emitFastArithIntToImmNoCheck(regT1
, regT0
);
909 } else if (isOperandConstantImmediateInt(op2
)) {
910 emitGetVirtualRegister(op1
, regT0
);
911 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
912 addSlowCase(branchAdd32(Overflow
, regT0
, Imm32(getConstantOperandImmediateInt(op2
)), regT1
));
913 emitFastArithIntToImmNoCheck(regT1
, regT0
);
915 compileBinaryArithOp(op_add
, result
, op1
, op2
, types
);
917 emitPutVirtualRegister(result
);
920 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
922 unsigned result
= currentInstruction
[1].u
.operand
;
923 unsigned op1
= currentInstruction
[2].u
.operand
;
924 unsigned op2
= currentInstruction
[3].u
.operand
;
925 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
927 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
928 linkDummySlowCase(iter
);
932 bool op1HasImmediateIntFastCase
= isOperandConstantImmediateInt(op1
);
933 bool op2HasImmediateIntFastCase
= !op1HasImmediateIntFastCase
&& isOperandConstantImmediateInt(op2
);
934 compileBinaryArithOpSlowCase(op_add
, iter
, result
, op1
, op2
, types
, op1HasImmediateIntFastCase
, op2HasImmediateIntFastCase
);
937 void JIT::emit_op_mul(Instruction
* currentInstruction
)
939 unsigned result
= currentInstruction
[1].u
.operand
;
940 unsigned op1
= currentInstruction
[2].u
.operand
;
941 unsigned op2
= currentInstruction
[3].u
.operand
;
942 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
944 // For now, only plant a fast int case if the constant operand is greater than zero.
946 if (isOperandConstantImmediateInt(op1
) && ((value
= getConstantOperandImmediateInt(op1
)) > 0)) {
947 #if ENABLE(VALUE_PROFILER)
948 // Add a special fast case profile because the DFG JIT will expect one.
949 m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
951 emitGetVirtualRegister(op2
, regT0
);
952 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
953 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT1
));
954 emitFastArithReTagImmediate(regT1
, regT0
);
955 } else if (isOperandConstantImmediateInt(op2
) && ((value
= getConstantOperandImmediateInt(op2
)) > 0)) {
956 #if ENABLE(VALUE_PROFILER)
957 // Add a special fast case profile because the DFG JIT will expect one.
958 m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
960 emitGetVirtualRegister(op1
, regT0
);
961 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
962 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT1
));
963 emitFastArithReTagImmediate(regT1
, regT0
);
965 compileBinaryArithOp(op_mul
, result
, op1
, op2
, types
);
967 emitPutVirtualRegister(result
);
970 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
972 unsigned result
= currentInstruction
[1].u
.operand
;
973 unsigned op1
= currentInstruction
[2].u
.operand
;
974 unsigned op2
= currentInstruction
[3].u
.operand
;
975 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
977 bool op1HasImmediateIntFastCase
= isOperandConstantImmediateInt(op1
) && getConstantOperandImmediateInt(op1
) > 0;
978 bool op2HasImmediateIntFastCase
= !op1HasImmediateIntFastCase
&& isOperandConstantImmediateInt(op2
) && getConstantOperandImmediateInt(op2
) > 0;
979 compileBinaryArithOpSlowCase(op_mul
, iter
, result
, op1
, op2
, types
, op1HasImmediateIntFastCase
, op2HasImmediateIntFastCase
);
982 void JIT::emit_op_div(Instruction
* currentInstruction
)
984 unsigned dst
= currentInstruction
[1].u
.operand
;
985 unsigned op1
= currentInstruction
[2].u
.operand
;
986 unsigned op2
= currentInstruction
[3].u
.operand
;
987 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
989 if (isOperandConstantImmediateDouble(op1
)) {
990 emitGetVirtualRegister(op1
, regT0
);
991 add64(tagTypeNumberRegister
, regT0
);
992 move64ToDouble(regT0
, fpRegT0
);
993 } else if (isOperandConstantImmediateInt(op1
)) {
994 emitLoadInt32ToDouble(op1
, fpRegT0
);
996 emitGetVirtualRegister(op1
, regT0
);
997 if (!types
.first().definitelyIsNumber())
998 emitJumpSlowCaseIfNotImmediateNumber(regT0
);
999 Jump notInt
= emitJumpIfNotImmediateInteger(regT0
);
1000 convertInt32ToDouble(regT0
, fpRegT0
);
1001 Jump skipDoubleLoad
= jump();
1003 add64(tagTypeNumberRegister
, regT0
);
1004 move64ToDouble(regT0
, fpRegT0
);
1005 skipDoubleLoad
.link(this);
1008 if (isOperandConstantImmediateDouble(op2
)) {
1009 emitGetVirtualRegister(op2
, regT1
);
1010 add64(tagTypeNumberRegister
, regT1
);
1011 move64ToDouble(regT1
, fpRegT1
);
1012 } else if (isOperandConstantImmediateInt(op2
)) {
1013 emitLoadInt32ToDouble(op2
, fpRegT1
);
1015 emitGetVirtualRegister(op2
, regT1
);
1016 if (!types
.second().definitelyIsNumber())
1017 emitJumpSlowCaseIfNotImmediateNumber(regT1
);
1018 Jump notInt
= emitJumpIfNotImmediateInteger(regT1
);
1019 convertInt32ToDouble(regT1
, fpRegT1
);
1020 Jump skipDoubleLoad
= jump();
1022 add64(tagTypeNumberRegister
, regT1
);
1023 move64ToDouble(regT1
, fpRegT1
);
1024 skipDoubleLoad
.link(this);
1026 divDouble(fpRegT1
, fpRegT0
);
1028 #if ENABLE(VALUE_PROFILER)
1029 // Is the result actually an integer? The DFG JIT would really like to know. If it's
1030 // not an integer, we increment a count. If this together with the slow case counter
1031 // are below threshold then the DFG JIT will compile this division with a specualtion
1032 // that the remainder is zero.
1034 // As well, there are cases where a double result here would cause an important field
1035 // in the heap to sometimes have doubles in it, resulting in double predictions getting
1036 // propagated to a use site where it might cause damage (such as the index to an array
1037 // access). So if we are DFG compiling anything in the program, we want this code to
1038 // ensure that it produces integers whenever possible.
1040 JumpList notInteger
;
1041 branchConvertDoubleToInt32(fpRegT0
, regT0
, notInteger
, fpRegT1
);
1042 // If we've got an integer, we might as well make that the result of the division.
1043 emitFastArithReTagImmediate(regT0
, regT0
);
1044 Jump isInteger
= jump();
1045 notInteger
.link(this);
1046 moveDoubleTo64(fpRegT0
, regT0
);
1047 Jump doubleZero
= branchTest64(Zero
, regT0
);
1048 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
)->m_counter
));
1049 sub64(tagTypeNumberRegister
, regT0
);
1050 Jump trueDouble
= jump();
1051 doubleZero
.link(this);
1052 move(tagTypeNumberRegister
, regT0
);
1053 trueDouble
.link(this);
1054 isInteger
.link(this);
1057 moveDoubleTo64(fpRegT0
, regT0
);
1058 sub64(tagTypeNumberRegister
, regT0
);
1061 emitPutVirtualRegister(dst
, regT0
);
1064 void JIT::emitSlow_op_div(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1066 unsigned result
= currentInstruction
[1].u
.operand
;
1067 unsigned op1
= currentInstruction
[2].u
.operand
;
1068 unsigned op2
= currentInstruction
[3].u
.operand
;
1069 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1070 if (types
.first().definitelyIsNumber() && types
.second().definitelyIsNumber()) {
1076 if (!isOperandConstantImmediateDouble(op1
) && !isOperandConstantImmediateInt(op1
)) {
1077 if (!types
.first().definitelyIsNumber())
1080 if (!isOperandConstantImmediateDouble(op2
) && !isOperandConstantImmediateInt(op2
)) {
1081 if (!types
.second().definitelyIsNumber())
1084 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1085 JITStubCall
stubCall(this, cti_op_div
);
1086 stubCall
.addArgument(op1
, regT2
);
1087 stubCall
.addArgument(op2
, regT2
);
1088 stubCall
.call(result
);
1091 void JIT::emit_op_sub(Instruction
* currentInstruction
)
1093 unsigned result
= currentInstruction
[1].u
.operand
;
1094 unsigned op1
= currentInstruction
[2].u
.operand
;
1095 unsigned op2
= currentInstruction
[3].u
.operand
;
1096 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1098 compileBinaryArithOp(op_sub
, result
, op1
, op2
, types
);
1099 emitPutVirtualRegister(result
);
1102 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1104 unsigned result
= currentInstruction
[1].u
.operand
;
1105 unsigned op1
= currentInstruction
[2].u
.operand
;
1106 unsigned op2
= currentInstruction
[3].u
.operand
;
1107 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1109 compileBinaryArithOpSlowCase(op_sub
, iter
, result
, op1
, op2
, types
, false, false);
1112 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
1114 #endif // USE(JSVALUE64)
1118 #endif // ENABLE(JIT)