2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "CodeBlock.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
37 #include "JSFunction.h"
38 #include "Interpreter.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
50 void JIT::emit_op_lshift(Instruction
* currentInstruction
)
52 unsigned result
= currentInstruction
[1].u
.operand
;
53 unsigned op1
= currentInstruction
[2].u
.operand
;
54 unsigned op2
= currentInstruction
[3].u
.operand
;
56 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
57 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
58 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
59 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
60 emitFastArithImmToInt(regT0
);
61 emitFastArithImmToInt(regT2
);
62 lshift32(regT2
, regT0
);
63 emitFastArithReTagImmediate(regT0
, regT0
);
64 emitPutVirtualRegister(result
);
67 void JIT::emitSlow_op_lshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
69 unsigned result
= currentInstruction
[1].u
.operand
;
70 unsigned op1
= currentInstruction
[2].u
.operand
;
71 unsigned op2
= currentInstruction
[3].u
.operand
;
77 JITStubCall
stubCall(this, cti_op_lshift
);
78 stubCall
.addArgument(regT0
);
79 stubCall
.addArgument(regT2
);
80 stubCall
.call(result
);
83 void JIT::emit_op_rshift(Instruction
* currentInstruction
)
85 unsigned result
= currentInstruction
[1].u
.operand
;
86 unsigned op1
= currentInstruction
[2].u
.operand
;
87 unsigned op2
= currentInstruction
[3].u
.operand
;
89 if (isOperandConstantImmediateInt(op2
)) {
90 // isOperandConstantImmediateInt(op2) => 1 SlowCase
91 emitGetVirtualRegister(op1
, regT0
);
92 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
93 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
94 rshift32(Imm32(getConstantOperandImmediateInt(op2
) & 0x1f), regT0
);
96 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
97 if (supportsFloatingPointTruncate()) {
98 Jump lhsIsInt
= emitJumpIfImmediateInteger(regT0
);
99 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
100 addSlowCase(emitJumpIfNotImmediateNumber(regT0
));
101 addPtr(tagTypeNumberRegister
, regT0
);
102 movePtrToDouble(regT0
, fpRegT0
);
103 addSlowCase(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
105 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
107 // !supportsFloatingPoint() => 2 SlowCases
108 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
109 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
111 emitFastArithImmToInt(regT2
);
112 rshift32(regT2
, regT0
);
114 emitFastArithIntToImmNoCheck(regT0
, regT0
);
115 emitPutVirtualRegister(result
);
118 void JIT::emitSlow_op_rshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
120 unsigned result
= currentInstruction
[1].u
.operand
;
121 unsigned op1
= currentInstruction
[2].u
.operand
;
122 unsigned op2
= currentInstruction
[3].u
.operand
;
124 JITStubCall
stubCall(this, cti_op_rshift
);
126 if (isOperandConstantImmediateInt(op2
)) {
128 stubCall
.addArgument(regT0
);
129 stubCall
.addArgument(op2
, regT2
);
131 if (supportsFloatingPointTruncate()) {
135 // We're reloading op1 to regT0 as we can no longer guarantee that
136 // we have not munged the operand. It may have already been shifted
137 // correctly, but it still will not have been tagged.
138 stubCall
.addArgument(op1
, regT0
);
139 stubCall
.addArgument(regT2
);
143 stubCall
.addArgument(regT0
);
144 stubCall
.addArgument(regT2
);
148 stubCall
.call(result
);
151 void JIT::emit_op_urshift(Instruction
* currentInstruction
)
153 unsigned dst
= currentInstruction
[1].u
.operand
;
154 unsigned op1
= currentInstruction
[2].u
.operand
;
155 unsigned op2
= currentInstruction
[3].u
.operand
;
157 // Slow case of urshift makes assumptions about what registers hold the
158 // shift arguments, so any changes must be updated there as well.
159 if (isOperandConstantImmediateInt(op2
)) {
160 emitGetVirtualRegister(op1
, regT0
);
161 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
162 emitFastArithImmToInt(regT0
);
163 int shift
= getConstantOperand(op2
).asInt32();
165 urshift32(Imm32(shift
& 0x1f), regT0
);
166 // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
167 // a toUint conversion, which can result in a value we can represent
168 // as an immediate int.
169 if (shift
< 0 || !(shift
& 31))
170 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
171 emitFastArithReTagImmediate(regT0
, regT0
);
172 emitPutVirtualRegister(dst
, regT0
);
175 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
176 if (!isOperandConstantImmediateInt(op1
))
177 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
178 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
179 emitFastArithImmToInt(regT0
);
180 emitFastArithImmToInt(regT1
);
181 urshift32(regT1
, regT0
);
182 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
183 emitFastArithReTagImmediate(regT0
, regT0
);
184 emitPutVirtualRegister(dst
, regT0
);
187 void JIT::emitSlow_op_urshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
189 unsigned dst
= currentInstruction
[1].u
.operand
;
190 unsigned op1
= currentInstruction
[2].u
.operand
;
191 unsigned op2
= currentInstruction
[3].u
.operand
;
192 if (isOperandConstantImmediateInt(op2
)) {
193 int shift
= getConstantOperand(op2
).asInt32();
195 linkSlowCase(iter
); // int32 check
196 if (supportsFloatingPointTruncate()) {
198 failures
.append(emitJumpIfNotImmediateNumber(regT0
)); // op1 is not a double
199 addPtr(tagTypeNumberRegister
, regT0
);
200 movePtrToDouble(regT0
, fpRegT0
);
201 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
203 urshift32(Imm32(shift
& 0x1f), regT0
);
204 if (shift
< 0 || !(shift
& 31))
205 failures
.append(branch32(LessThan
, regT0
, TrustedImm32(0)));
206 emitFastArithReTagImmediate(regT0
, regT0
);
207 emitPutVirtualRegister(dst
, regT0
);
208 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
211 if (shift
< 0 || !(shift
& 31))
212 linkSlowCase(iter
); // failed to box in hot path
216 if (!isOperandConstantImmediateInt(op1
)) {
217 linkSlowCase(iter
); // int32 check -- op1 is not an int
218 if (supportsFloatingPointTruncate()) {
220 failures
.append(emitJumpIfNotImmediateNumber(regT0
)); // op1 is not a double
221 addPtr(tagTypeNumberRegister
, regT0
);
222 movePtrToDouble(regT0
, fpRegT0
);
223 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
224 failures
.append(emitJumpIfNotImmediateInteger(regT1
)); // op2 is not an int
225 emitFastArithImmToInt(regT1
);
226 urshift32(regT1
, regT0
);
227 failures
.append(branch32(LessThan
, regT0
, TrustedImm32(0)));
228 emitFastArithReTagImmediate(regT0
, regT0
);
229 emitPutVirtualRegister(dst
, regT0
);
230 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
235 linkSlowCase(iter
); // int32 check - op2 is not an int
236 linkSlowCase(iter
); // Can't represent unsigned result as an immediate
239 JITStubCall
stubCall(this, cti_op_urshift
);
240 stubCall
.addArgument(op1
, regT0
);
241 stubCall
.addArgument(op2
, regT1
);
245 void JIT::emit_op_jnless(Instruction
* currentInstruction
)
247 unsigned op1
= currentInstruction
[1].u
.operand
;
248 unsigned op2
= currentInstruction
[2].u
.operand
;
249 unsigned target
= currentInstruction
[3].u
.operand
;
251 // We generate inline code for the following cases in the fast path:
252 // - int immediate to constant int immediate
253 // - constant int immediate to int immediate
254 // - int immediate to int immediate
256 if (isOperandConstantImmediateChar(op1
)) {
257 emitGetVirtualRegister(op2
, regT0
);
258 addSlowCase(emitJumpIfNotJSCell(regT0
));
260 emitLoadCharacterString(regT0
, regT0
, failures
);
261 addSlowCase(failures
);
262 addJump(branch32(LessThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
265 if (isOperandConstantImmediateChar(op2
)) {
266 emitGetVirtualRegister(op1
, regT0
);
267 addSlowCase(emitJumpIfNotJSCell(regT0
));
269 emitLoadCharacterString(regT0
, regT0
, failures
);
270 addSlowCase(failures
);
271 addJump(branch32(GreaterThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
274 if (isOperandConstantImmediateInt(op2
)) {
275 emitGetVirtualRegister(op1
, regT0
);
276 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
277 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
278 addJump(branch32(GreaterThanOrEqual
, regT0
, Imm32(op2imm
)), target
);
279 } else if (isOperandConstantImmediateInt(op1
)) {
280 emitGetVirtualRegister(op2
, regT1
);
281 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
282 int32_t op1imm
= getConstantOperandImmediateInt(op1
);
283 addJump(branch32(LessThanOrEqual
, regT1
, Imm32(op1imm
)), target
);
285 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
286 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
287 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
289 addJump(branch32(GreaterThanOrEqual
, regT0
, regT1
), target
);
293 void JIT::emitSlow_op_jnless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
295 unsigned op1
= currentInstruction
[1].u
.operand
;
296 unsigned op2
= currentInstruction
[2].u
.operand
;
297 unsigned target
= currentInstruction
[3].u
.operand
;
299 // We generate inline code for the following cases in the slow path:
300 // - floating-point number to constant int immediate
301 // - constant int immediate to floating-point number
302 // - floating-point number to floating-point number.
303 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
308 JITStubCall
stubCall(this, cti_op_jless
);
309 stubCall
.addArgument(op1
, regT0
);
310 stubCall
.addArgument(op2
, regT1
);
312 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
);
316 if (isOperandConstantImmediateInt(op2
)) {
319 if (supportsFloatingPoint()) {
320 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
321 addPtr(tagTypeNumberRegister
, regT0
);
322 movePtrToDouble(regT0
, fpRegT0
);
324 int32_t op2imm
= getConstantOperand(op2
).asInt32();;
326 move(Imm32(op2imm
), regT1
);
327 convertInt32ToDouble(regT1
, fpRegT1
);
329 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), target
);
331 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
336 JITStubCall
stubCall(this, cti_op_jless
);
337 stubCall
.addArgument(regT0
);
338 stubCall
.addArgument(op2
, regT2
);
340 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
);
342 } else if (isOperandConstantImmediateInt(op1
)) {
345 if (supportsFloatingPoint()) {
346 Jump fail1
= emitJumpIfNotImmediateNumber(regT1
);
347 addPtr(tagTypeNumberRegister
, regT1
);
348 movePtrToDouble(regT1
, fpRegT1
);
350 int32_t op1imm
= getConstantOperand(op1
).asInt32();;
352 move(Imm32(op1imm
), regT0
);
353 convertInt32ToDouble(regT0
, fpRegT0
);
355 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), target
);
357 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
362 JITStubCall
stubCall(this, cti_op_jless
);
363 stubCall
.addArgument(op1
, regT2
);
364 stubCall
.addArgument(regT1
);
366 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
);
371 if (supportsFloatingPoint()) {
372 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
373 Jump fail2
= emitJumpIfNotImmediateNumber(regT1
);
374 Jump fail3
= emitJumpIfImmediateInteger(regT1
);
375 addPtr(tagTypeNumberRegister
, regT0
);
376 addPtr(tagTypeNumberRegister
, regT1
);
377 movePtrToDouble(regT0
, fpRegT0
);
378 movePtrToDouble(regT1
, fpRegT1
);
380 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), target
);
382 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
390 JITStubCall
stubCall(this, cti_op_jless
);
391 stubCall
.addArgument(regT0
);
392 stubCall
.addArgument(regT1
);
394 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
);
398 void JIT::emit_op_jless(Instruction
* currentInstruction
)
400 unsigned op1
= currentInstruction
[1].u
.operand
;
401 unsigned op2
= currentInstruction
[2].u
.operand
;
402 unsigned target
= currentInstruction
[3].u
.operand
;
404 // We generate inline code for the following cases in the fast path:
405 // - int immediate to constant int immediate
406 // - constant int immediate to int immediate
407 // - int immediate to int immediate
409 if (isOperandConstantImmediateChar(op1
)) {
410 emitGetVirtualRegister(op2
, regT0
);
411 addSlowCase(emitJumpIfNotJSCell(regT0
));
413 emitLoadCharacterString(regT0
, regT0
, failures
);
414 addSlowCase(failures
);
415 addJump(branch32(GreaterThan
, regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
418 if (isOperandConstantImmediateChar(op2
)) {
419 emitGetVirtualRegister(op1
, regT0
);
420 addSlowCase(emitJumpIfNotJSCell(regT0
));
422 emitLoadCharacterString(regT0
, regT0
, failures
);
423 addSlowCase(failures
);
424 addJump(branch32(LessThan
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
427 if (isOperandConstantImmediateInt(op2
)) {
428 emitGetVirtualRegister(op1
, regT0
);
429 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
430 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
431 addJump(branch32(LessThan
, regT0
, Imm32(op2imm
)), target
);
432 } else if (isOperandConstantImmediateInt(op1
)) {
433 emitGetVirtualRegister(op2
, regT1
);
434 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
435 int32_t op1imm
= getConstantOperandImmediateInt(op1
);
436 addJump(branch32(GreaterThan
, regT1
, Imm32(op1imm
)), target
);
438 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
439 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
440 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
442 addJump(branch32(LessThan
, regT0
, regT1
), target
);
446 void JIT::emitSlow_op_jless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
448 unsigned op1
= currentInstruction
[1].u
.operand
;
449 unsigned op2
= currentInstruction
[2].u
.operand
;
450 unsigned target
= currentInstruction
[3].u
.operand
;
452 // We generate inline code for the following cases in the slow path:
453 // - floating-point number to constant int immediate
454 // - constant int immediate to floating-point number
455 // - floating-point number to floating-point number.
456 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
461 JITStubCall
stubCall(this, cti_op_jless
);
462 stubCall
.addArgument(op1
, regT0
);
463 stubCall
.addArgument(op2
, regT1
);
465 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
469 if (isOperandConstantImmediateInt(op2
)) {
472 if (supportsFloatingPoint()) {
473 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
474 addPtr(tagTypeNumberRegister
, regT0
);
475 movePtrToDouble(regT0
, fpRegT0
);
477 int32_t op2imm
= getConstantOperand(op2
).asInt32();
479 move(Imm32(op2imm
), regT1
);
480 convertInt32ToDouble(regT1
, fpRegT1
);
482 emitJumpSlowToHot(branchDouble(DoubleLessThan
, fpRegT0
, fpRegT1
), target
);
484 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
489 JITStubCall
stubCall(this, cti_op_jless
);
490 stubCall
.addArgument(regT0
);
491 stubCall
.addArgument(op2
, regT2
);
493 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
495 } else if (isOperandConstantImmediateInt(op1
)) {
498 if (supportsFloatingPoint()) {
499 Jump fail1
= emitJumpIfNotImmediateNumber(regT1
);
500 addPtr(tagTypeNumberRegister
, regT1
);
501 movePtrToDouble(regT1
, fpRegT1
);
503 int32_t op1imm
= getConstantOperand(op1
).asInt32();
505 move(Imm32(op1imm
), regT0
);
506 convertInt32ToDouble(regT0
, fpRegT0
);
508 emitJumpSlowToHot(branchDouble(DoubleLessThan
, fpRegT0
, fpRegT1
), target
);
510 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
515 JITStubCall
stubCall(this, cti_op_jless
);
516 stubCall
.addArgument(op1
, regT2
);
517 stubCall
.addArgument(regT1
);
519 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
524 if (supportsFloatingPoint()) {
525 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
526 Jump fail2
= emitJumpIfNotImmediateNumber(regT1
);
527 Jump fail3
= emitJumpIfImmediateInteger(regT1
);
528 addPtr(tagTypeNumberRegister
, regT0
);
529 addPtr(tagTypeNumberRegister
, regT1
);
530 movePtrToDouble(regT0
, fpRegT0
);
531 movePtrToDouble(regT1
, fpRegT1
);
533 emitJumpSlowToHot(branchDouble(DoubleLessThan
, fpRegT0
, fpRegT1
), target
);
535 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
543 JITStubCall
stubCall(this, cti_op_jless
);
544 stubCall
.addArgument(regT0
);
545 stubCall
.addArgument(regT1
);
547 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
551 void JIT::emit_op_jlesseq(Instruction
* currentInstruction
, bool invert
)
553 unsigned op1
= currentInstruction
[1].u
.operand
;
554 unsigned op2
= currentInstruction
[2].u
.operand
;
555 unsigned target
= currentInstruction
[3].u
.operand
;
557 // We generate inline code for the following cases in the fast path:
558 // - int immediate to constant int immediate
559 // - constant int immediate to int immediate
560 // - int immediate to int immediate
562 if (isOperandConstantImmediateChar(op1
)) {
563 emitGetVirtualRegister(op2
, regT0
);
564 addSlowCase(emitJumpIfNotJSCell(regT0
));
566 emitLoadCharacterString(regT0
, regT0
, failures
);
567 addSlowCase(failures
);
568 addJump(branch32(invert
? LessThan
: GreaterThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
571 if (isOperandConstantImmediateChar(op2
)) {
572 emitGetVirtualRegister(op1
, regT0
);
573 addSlowCase(emitJumpIfNotJSCell(regT0
));
575 emitLoadCharacterString(regT0
, regT0
, failures
);
576 addSlowCase(failures
);
577 addJump(branch32(invert
? GreaterThan
: LessThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
580 if (isOperandConstantImmediateInt(op2
)) {
581 emitGetVirtualRegister(op1
, regT0
);
582 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
583 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
584 addJump(branch32(invert
? GreaterThan
: LessThanOrEqual
, regT0
, Imm32(op2imm
)), target
);
585 } else if (isOperandConstantImmediateInt(op1
)) {
586 emitGetVirtualRegister(op2
, regT1
);
587 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
588 int32_t op1imm
= getConstantOperandImmediateInt(op1
);
589 addJump(branch32(invert
? LessThan
: GreaterThanOrEqual
, regT1
, Imm32(op1imm
)), target
);
591 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
592 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
593 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
595 addJump(branch32(invert
? GreaterThan
: LessThanOrEqual
, regT0
, regT1
), target
);
599 void JIT::emitSlow_op_jlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool invert
)
601 unsigned op1
= currentInstruction
[1].u
.operand
;
602 unsigned op2
= currentInstruction
[2].u
.operand
;
603 unsigned target
= currentInstruction
[3].u
.operand
;
605 // We generate inline code for the following cases in the slow path:
606 // - floating-point number to constant int immediate
607 // - constant int immediate to floating-point number
608 // - floating-point number to floating-point number.
610 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
615 JITStubCall
stubCall(this, cti_op_jlesseq
);
616 stubCall
.addArgument(op1
, regT0
);
617 stubCall
.addArgument(op2
, regT1
);
619 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
623 if (isOperandConstantImmediateInt(op2
)) {
626 if (supportsFloatingPoint()) {
627 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
628 addPtr(tagTypeNumberRegister
, regT0
);
629 movePtrToDouble(regT0
, fpRegT0
);
631 int32_t op2imm
= getConstantOperand(op2
).asInt32();;
633 move(Imm32(op2imm
), regT1
);
634 convertInt32ToDouble(regT1
, fpRegT1
);
636 emitJumpSlowToHot(branchDouble(invert
? DoubleLessThanOrUnordered
: DoubleGreaterThanOrEqual
, fpRegT1
, fpRegT0
), target
);
638 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq
));
643 JITStubCall
stubCall(this, cti_op_jlesseq
);
644 stubCall
.addArgument(regT0
);
645 stubCall
.addArgument(op2
, regT2
);
647 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
649 } else if (isOperandConstantImmediateInt(op1
)) {
652 if (supportsFloatingPoint()) {
653 Jump fail1
= emitJumpIfNotImmediateNumber(regT1
);
654 addPtr(tagTypeNumberRegister
, regT1
);
655 movePtrToDouble(regT1
, fpRegT1
);
657 int32_t op1imm
= getConstantOperand(op1
).asInt32();;
659 move(Imm32(op1imm
), regT0
);
660 convertInt32ToDouble(regT0
, fpRegT0
);
662 emitJumpSlowToHot(branchDouble(invert
? DoubleLessThanOrUnordered
: DoubleGreaterThanOrEqual
, fpRegT1
, fpRegT0
), target
);
664 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq
));
669 JITStubCall
stubCall(this, cti_op_jlesseq
);
670 stubCall
.addArgument(op1
, regT2
);
671 stubCall
.addArgument(regT1
);
673 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
678 if (supportsFloatingPoint()) {
679 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
680 Jump fail2
= emitJumpIfNotImmediateNumber(regT1
);
681 Jump fail3
= emitJumpIfImmediateInteger(regT1
);
682 addPtr(tagTypeNumberRegister
, regT0
);
683 addPtr(tagTypeNumberRegister
, regT1
);
684 movePtrToDouble(regT0
, fpRegT0
);
685 movePtrToDouble(regT1
, fpRegT1
);
687 emitJumpSlowToHot(branchDouble(invert
? DoubleLessThanOrUnordered
: DoubleGreaterThanOrEqual
, fpRegT1
, fpRegT0
), target
);
689 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq
));
697 JITStubCall
stubCall(this, cti_op_jlesseq
);
698 stubCall
.addArgument(regT0
);
699 stubCall
.addArgument(regT1
);
701 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
705 void JIT::emit_op_jnlesseq(Instruction
* currentInstruction
)
707 emit_op_jlesseq(currentInstruction
, true);
710 void JIT::emitSlow_op_jnlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
712 emitSlow_op_jlesseq(currentInstruction
, iter
, true);
715 void JIT::emit_op_bitand(Instruction
* currentInstruction
)
717 unsigned result
= currentInstruction
[1].u
.operand
;
718 unsigned op1
= currentInstruction
[2].u
.operand
;
719 unsigned op2
= currentInstruction
[3].u
.operand
;
721 if (isOperandConstantImmediateInt(op1
)) {
722 emitGetVirtualRegister(op2
, regT0
);
723 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
724 int32_t imm
= getConstantOperandImmediateInt(op1
);
725 andPtr(Imm32(imm
), regT0
);
727 emitFastArithIntToImmNoCheck(regT0
, regT0
);
728 } else if (isOperandConstantImmediateInt(op2
)) {
729 emitGetVirtualRegister(op1
, regT0
);
730 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
731 int32_t imm
= getConstantOperandImmediateInt(op2
);
732 andPtr(Imm32(imm
), regT0
);
734 emitFastArithIntToImmNoCheck(regT0
, regT0
);
736 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
737 andPtr(regT1
, regT0
);
738 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
740 emitPutVirtualRegister(result
);
743 void JIT::emitSlow_op_bitand(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
745 unsigned result
= currentInstruction
[1].u
.operand
;
746 unsigned op1
= currentInstruction
[2].u
.operand
;
747 unsigned op2
= currentInstruction
[3].u
.operand
;
750 if (isOperandConstantImmediateInt(op1
)) {
751 JITStubCall
stubCall(this, cti_op_bitand
);
752 stubCall
.addArgument(op1
, regT2
);
753 stubCall
.addArgument(regT0
);
754 stubCall
.call(result
);
755 } else if (isOperandConstantImmediateInt(op2
)) {
756 JITStubCall
stubCall(this, cti_op_bitand
);
757 stubCall
.addArgument(regT0
);
758 stubCall
.addArgument(op2
, regT2
);
759 stubCall
.call(result
);
761 JITStubCall
stubCall(this, cti_op_bitand
);
762 stubCall
.addArgument(op1
, regT2
);
763 stubCall
.addArgument(regT1
);
764 stubCall
.call(result
);
768 void JIT::emit_op_post_inc(Instruction
* currentInstruction
)
770 unsigned result
= currentInstruction
[1].u
.operand
;
771 unsigned srcDst
= currentInstruction
[2].u
.operand
;
773 emitGetVirtualRegister(srcDst
, regT0
);
775 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
776 addSlowCase(branchAdd32(Overflow
, TrustedImm32(1), regT1
));
777 emitFastArithIntToImmNoCheck(regT1
, regT1
);
778 emitPutVirtualRegister(srcDst
, regT1
);
779 emitPutVirtualRegister(result
);
782 void JIT::emitSlow_op_post_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
784 unsigned result
= currentInstruction
[1].u
.operand
;
785 unsigned srcDst
= currentInstruction
[2].u
.operand
;
789 JITStubCall
stubCall(this, cti_op_post_inc
);
790 stubCall
.addArgument(regT0
);
791 stubCall
.addArgument(Imm32(srcDst
));
792 stubCall
.call(result
);
795 void JIT::emit_op_post_dec(Instruction
* currentInstruction
)
797 unsigned result
= currentInstruction
[1].u
.operand
;
798 unsigned srcDst
= currentInstruction
[2].u
.operand
;
800 emitGetVirtualRegister(srcDst
, regT0
);
802 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
803 addSlowCase(branchSub32(Zero
, TrustedImm32(1), regT1
));
804 emitFastArithIntToImmNoCheck(regT1
, regT1
);
805 emitPutVirtualRegister(srcDst
, regT1
);
806 emitPutVirtualRegister(result
);
809 void JIT::emitSlow_op_post_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
811 unsigned result
= currentInstruction
[1].u
.operand
;
812 unsigned srcDst
= currentInstruction
[2].u
.operand
;
816 JITStubCall
stubCall(this, cti_op_post_dec
);
817 stubCall
.addArgument(regT0
);
818 stubCall
.addArgument(Imm32(srcDst
));
819 stubCall
.call(result
);
822 void JIT::emit_op_pre_inc(Instruction
* currentInstruction
)
824 unsigned srcDst
= currentInstruction
[1].u
.operand
;
826 emitGetVirtualRegister(srcDst
, regT0
);
827 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
828 addSlowCase(branchAdd32(Overflow
, TrustedImm32(1), regT0
));
829 emitFastArithIntToImmNoCheck(regT0
, regT0
);
830 emitPutVirtualRegister(srcDst
);
833 void JIT::emitSlow_op_pre_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
835 unsigned srcDst
= currentInstruction
[1].u
.operand
;
837 Jump notImm
= getSlowCase(iter
);
839 emitGetVirtualRegister(srcDst
, regT0
);
841 JITStubCall
stubCall(this, cti_op_pre_inc
);
842 stubCall
.addArgument(regT0
);
843 stubCall
.call(srcDst
);
846 void JIT::emit_op_pre_dec(Instruction
* currentInstruction
)
848 unsigned srcDst
= currentInstruction
[1].u
.operand
;
850 emitGetVirtualRegister(srcDst
, regT0
);
851 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
852 addSlowCase(branchSub32(Zero
, TrustedImm32(1), regT0
));
853 emitFastArithIntToImmNoCheck(regT0
, regT0
);
854 emitPutVirtualRegister(srcDst
);
857 void JIT::emitSlow_op_pre_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
859 unsigned srcDst
= currentInstruction
[1].u
.operand
;
861 Jump notImm
= getSlowCase(iter
);
863 emitGetVirtualRegister(srcDst
, regT0
);
865 JITStubCall
stubCall(this, cti_op_pre_dec
);
866 stubCall
.addArgument(regT0
);
867 stubCall
.call(srcDst
);
870 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
872 #if CPU(X86) || CPU(X86_64) || CPU(MIPS)
874 void JIT::emit_op_mod(Instruction
* currentInstruction
)
876 unsigned result
= currentInstruction
[1].u
.operand
;
877 unsigned op1
= currentInstruction
[2].u
.operand
;
878 unsigned op2
= currentInstruction
[3].u
.operand
;
880 #if CPU(X86) || CPU(X86_64)
881 // Make sure registers are correct for x86 IDIV instructions.
882 ASSERT(regT0
== X86Registers::eax
);
883 ASSERT(regT1
== X86Registers::edx
);
884 ASSERT(regT2
== X86Registers::ecx
);
887 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
888 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
889 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
891 addSlowCase(branchPtr(Equal
, regT2
, TrustedImmPtr(JSValue::encode(jsNumber(0)))));
893 m_assembler
.idivl_r(regT2
);
894 emitFastArithReTagImmediate(regT1
, regT0
);
895 emitPutVirtualRegister(result
);
898 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
900 unsigned result
= currentInstruction
[1].u
.operand
;
905 JITStubCall
stubCall(this, cti_op_mod
);
906 stubCall
.addArgument(regT0
);
907 stubCall
.addArgument(regT2
);
908 stubCall
.call(result
);
911 #else // CPU(X86) || CPU(X86_64) || CPU(MIPS)
913 void JIT::emit_op_mod(Instruction
* currentInstruction
)
915 unsigned result
= currentInstruction
[1].u
.operand
;
916 unsigned op1
= currentInstruction
[2].u
.operand
;
917 unsigned op2
= currentInstruction
[3].u
.operand
;
919 JITStubCall
stubCall(this, cti_op_mod
);
920 stubCall
.addArgument(op1
, regT2
);
921 stubCall
.addArgument(op2
, regT2
);
922 stubCall
.call(result
);
925 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
927 #if ENABLE(JIT_USE_SOFT_MODULO)
928 unsigned result
= currentInstruction
[1].u
.operand
;
929 unsigned op1
= currentInstruction
[2].u
.operand
;
930 unsigned op2
= currentInstruction
[3].u
.operand
;
934 JITStubCall
stubCall(this, cti_op_mod
);
935 stubCall
.addArgument(op1
, regT2
);
936 stubCall
.addArgument(op2
, regT2
);
937 stubCall
.call(result
);
939 ASSERT_NOT_REACHED();
943 #endif // CPU(X86) || CPU(X86_64)
945 /* ------------------------------ END: OP_MOD ------------------------------ */
947 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
949 void JIT::compileBinaryArithOp(OpcodeID opcodeID
, unsigned, unsigned op1
, unsigned op2
, OperandTypes
)
951 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
952 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
953 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
954 if (opcodeID
== op_add
)
955 addSlowCase(branchAdd32(Overflow
, regT1
, regT0
));
956 else if (opcodeID
== op_sub
)
957 addSlowCase(branchSub32(Overflow
, regT1
, regT0
));
959 ASSERT(opcodeID
== op_mul
);
960 addSlowCase(branchMul32(Overflow
, regT1
, regT0
));
961 addSlowCase(branchTest32(Zero
, regT0
));
963 emitFastArithIntToImmNoCheck(regT0
, regT0
);
966 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID
, Vector
<SlowCaseEntry
>::iterator
& iter
, unsigned result
, unsigned op1
, unsigned op2
, OperandTypes types
, bool op1HasImmediateIntFastCase
, bool op2HasImmediateIntFastCase
)
968 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
969 COMPILE_ASSERT(((TagTypeNumber
+ DoubleEncodeOffset
) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0
);
973 if (op1HasImmediateIntFastCase
) {
974 notImm2
= getSlowCase(iter
);
975 } else if (op2HasImmediateIntFastCase
) {
976 notImm1
= getSlowCase(iter
);
978 notImm1
= getSlowCase(iter
);
979 notImm2
= getSlowCase(iter
);
982 linkSlowCase(iter
); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
983 if (opcodeID
== op_mul
&& !op1HasImmediateIntFastCase
&& !op2HasImmediateIntFastCase
) // op_mul has an extra slow case to handle 0 * negative number.
985 emitGetVirtualRegister(op1
, regT0
);
987 Label
stubFunctionCall(this);
988 JITStubCall
stubCall(this, opcodeID
== op_add
? cti_op_add
: opcodeID
== op_sub
? cti_op_sub
: cti_op_mul
);
989 if (op1HasImmediateIntFastCase
|| op2HasImmediateIntFastCase
) {
990 emitGetVirtualRegister(op1
, regT0
);
991 emitGetVirtualRegister(op2
, regT1
);
993 stubCall
.addArgument(regT0
);
994 stubCall
.addArgument(regT1
);
995 stubCall
.call(result
);
998 if (op1HasImmediateIntFastCase
) {
1000 if (!types
.second().definitelyIsNumber())
1001 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
1002 emitGetVirtualRegister(op1
, regT1
);
1003 convertInt32ToDouble(regT1
, fpRegT1
);
1004 addPtr(tagTypeNumberRegister
, regT0
);
1005 movePtrToDouble(regT0
, fpRegT2
);
1006 } else if (op2HasImmediateIntFastCase
) {
1008 if (!types
.first().definitelyIsNumber())
1009 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
1010 emitGetVirtualRegister(op2
, regT1
);
1011 convertInt32ToDouble(regT1
, fpRegT1
);
1012 addPtr(tagTypeNumberRegister
, regT0
);
1013 movePtrToDouble(regT0
, fpRegT2
);
1015 // if we get here, eax is not an int32, edx not yet checked.
1017 if (!types
.first().definitelyIsNumber())
1018 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
1019 if (!types
.second().definitelyIsNumber())
1020 emitJumpIfNotImmediateNumber(regT1
).linkTo(stubFunctionCall
, this);
1021 addPtr(tagTypeNumberRegister
, regT0
);
1022 movePtrToDouble(regT0
, fpRegT1
);
1023 Jump op2isDouble
= emitJumpIfNotImmediateInteger(regT1
);
1024 convertInt32ToDouble(regT1
, fpRegT2
);
1025 Jump op2wasInteger
= jump();
1027 // if we get here, eax IS an int32, edx is not.
1029 if (!types
.second().definitelyIsNumber())
1030 emitJumpIfNotImmediateNumber(regT1
).linkTo(stubFunctionCall
, this);
1031 convertInt32ToDouble(regT0
, fpRegT1
);
1032 op2isDouble
.link(this);
1033 addPtr(tagTypeNumberRegister
, regT1
);
1034 movePtrToDouble(regT1
, fpRegT2
);
1035 op2wasInteger
.link(this);
1038 if (opcodeID
== op_add
)
1039 addDouble(fpRegT2
, fpRegT1
);
1040 else if (opcodeID
== op_sub
)
1041 subDouble(fpRegT2
, fpRegT1
);
1042 else if (opcodeID
== op_mul
)
1043 mulDouble(fpRegT2
, fpRegT1
);
1045 ASSERT(opcodeID
== op_div
);
1046 divDouble(fpRegT2
, fpRegT1
);
1048 moveDoubleToPtr(fpRegT1
, regT0
);
1049 subPtr(tagTypeNumberRegister
, regT0
);
1050 emitPutVirtualRegister(result
, regT0
);
1055 void JIT::emit_op_add(Instruction
* currentInstruction
)
1057 unsigned result
= currentInstruction
[1].u
.operand
;
1058 unsigned op1
= currentInstruction
[2].u
.operand
;
1059 unsigned op2
= currentInstruction
[3].u
.operand
;
1060 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1062 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
1063 JITStubCall
stubCall(this, cti_op_add
);
1064 stubCall
.addArgument(op1
, regT2
);
1065 stubCall
.addArgument(op2
, regT2
);
1066 stubCall
.call(result
);
1070 if (isOperandConstantImmediateInt(op1
)) {
1071 emitGetVirtualRegister(op2
, regT0
);
1072 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1073 addSlowCase(branchAdd32(Overflow
, Imm32(getConstantOperandImmediateInt(op1
)), regT0
));
1074 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1075 } else if (isOperandConstantImmediateInt(op2
)) {
1076 emitGetVirtualRegister(op1
, regT0
);
1077 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1078 addSlowCase(branchAdd32(Overflow
, Imm32(getConstantOperandImmediateInt(op2
)), regT0
));
1079 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1081 compileBinaryArithOp(op_add
, result
, op1
, op2
, types
);
1083 emitPutVirtualRegister(result
);
1086 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1088 unsigned result
= currentInstruction
[1].u
.operand
;
1089 unsigned op1
= currentInstruction
[2].u
.operand
;
1090 unsigned op2
= currentInstruction
[3].u
.operand
;
1091 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1093 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber())
1096 bool op1HasImmediateIntFastCase
= isOperandConstantImmediateInt(op1
);
1097 bool op2HasImmediateIntFastCase
= !op1HasImmediateIntFastCase
&& isOperandConstantImmediateInt(op2
);
1098 compileBinaryArithOpSlowCase(op_add
, iter
, result
, op1
, op2
, types
, op1HasImmediateIntFastCase
, op2HasImmediateIntFastCase
);
1101 void JIT::emit_op_mul(Instruction
* currentInstruction
)
1103 unsigned result
= currentInstruction
[1].u
.operand
;
1104 unsigned op1
= currentInstruction
[2].u
.operand
;
1105 unsigned op2
= currentInstruction
[3].u
.operand
;
1106 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1108 // For now, only plant a fast int case if the constant operand is greater than zero.
1110 if (isOperandConstantImmediateInt(op1
) && ((value
= getConstantOperandImmediateInt(op1
)) > 0)) {
1111 emitGetVirtualRegister(op2
, regT0
);
1112 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1113 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT0
));
1114 emitFastArithReTagImmediate(regT0
, regT0
);
1115 } else if (isOperandConstantImmediateInt(op2
) && ((value
= getConstantOperandImmediateInt(op2
)) > 0)) {
1116 emitGetVirtualRegister(op1
, regT0
);
1117 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1118 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT0
));
1119 emitFastArithReTagImmediate(regT0
, regT0
);
1121 compileBinaryArithOp(op_mul
, result
, op1
, op2
, types
);
1123 emitPutVirtualRegister(result
);
1126 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1128 unsigned result
= currentInstruction
[1].u
.operand
;
1129 unsigned op1
= currentInstruction
[2].u
.operand
;
1130 unsigned op2
= currentInstruction
[3].u
.operand
;
1131 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1133 bool op1HasImmediateIntFastCase
= isOperandConstantImmediateInt(op1
) && getConstantOperandImmediateInt(op1
) > 0;
1134 bool op2HasImmediateIntFastCase
= !op1HasImmediateIntFastCase
&& isOperandConstantImmediateInt(op2
) && getConstantOperandImmediateInt(op2
) > 0;
1135 compileBinaryArithOpSlowCase(op_mul
, iter
, result
, op1
, op2
, types
, op1HasImmediateIntFastCase
, op2HasImmediateIntFastCase
);
1138 void JIT::emit_op_div(Instruction
* currentInstruction
)
1140 unsigned dst
= currentInstruction
[1].u
.operand
;
1141 unsigned op1
= currentInstruction
[2].u
.operand
;
1142 unsigned op2
= currentInstruction
[3].u
.operand
;
1143 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1145 if (isOperandConstantImmediateDouble(op1
)) {
1146 emitGetVirtualRegister(op1
, regT0
);
1147 addPtr(tagTypeNumberRegister
, regT0
);
1148 movePtrToDouble(regT0
, fpRegT0
);
1149 } else if (isOperandConstantImmediateInt(op1
)) {
1150 emitLoadInt32ToDouble(op1
, fpRegT0
);
1152 emitGetVirtualRegister(op1
, regT0
);
1153 if (!types
.first().definitelyIsNumber())
1154 emitJumpSlowCaseIfNotImmediateNumber(regT0
);
1155 Jump notInt
= emitJumpIfNotImmediateInteger(regT0
);
1156 convertInt32ToDouble(regT0
, fpRegT0
);
1157 Jump skipDoubleLoad
= jump();
1159 addPtr(tagTypeNumberRegister
, regT0
);
1160 movePtrToDouble(regT0
, fpRegT0
);
1161 skipDoubleLoad
.link(this);
1164 if (isOperandConstantImmediateDouble(op2
)) {
1165 emitGetVirtualRegister(op2
, regT1
);
1166 addPtr(tagTypeNumberRegister
, regT1
);
1167 movePtrToDouble(regT1
, fpRegT1
);
1168 } else if (isOperandConstantImmediateInt(op2
)) {
1169 emitLoadInt32ToDouble(op2
, fpRegT1
);
1171 emitGetVirtualRegister(op2
, regT1
);
1172 if (!types
.second().definitelyIsNumber())
1173 emitJumpSlowCaseIfNotImmediateNumber(regT1
);
1174 Jump notInt
= emitJumpIfNotImmediateInteger(regT1
);
1175 convertInt32ToDouble(regT1
, fpRegT1
);
1176 Jump skipDoubleLoad
= jump();
1178 addPtr(tagTypeNumberRegister
, regT1
);
1179 movePtrToDouble(regT1
, fpRegT1
);
1180 skipDoubleLoad
.link(this);
1182 divDouble(fpRegT1
, fpRegT0
);
1185 moveDoubleToPtr(fpRegT0
, regT0
);
1186 subPtr(tagTypeNumberRegister
, regT0
);
1188 emitPutVirtualRegister(dst
, regT0
);
1191 void JIT::emitSlow_op_div(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1193 unsigned result
= currentInstruction
[1].u
.operand
;
1194 unsigned op1
= currentInstruction
[2].u
.operand
;
1195 unsigned op2
= currentInstruction
[3].u
.operand
;
1196 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1197 if (types
.first().definitelyIsNumber() && types
.second().definitelyIsNumber()) {
1203 if (!isOperandConstantImmediateDouble(op1
) && !isOperandConstantImmediateInt(op1
)) {
1204 if (!types
.first().definitelyIsNumber())
1207 if (!isOperandConstantImmediateDouble(op2
) && !isOperandConstantImmediateInt(op2
)) {
1208 if (!types
.second().definitelyIsNumber())
1211 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1212 JITStubCall
stubCall(this, cti_op_div
);
1213 stubCall
.addArgument(op1
, regT2
);
1214 stubCall
.addArgument(op2
, regT2
);
1215 stubCall
.call(result
);
1218 void JIT::emit_op_sub(Instruction
* currentInstruction
)
1220 unsigned result
= currentInstruction
[1].u
.operand
;
1221 unsigned op1
= currentInstruction
[2].u
.operand
;
1222 unsigned op2
= currentInstruction
[3].u
.operand
;
1223 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1225 compileBinaryArithOp(op_sub
, result
, op1
, op2
, types
);
1226 emitPutVirtualRegister(result
);
1229 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1231 unsigned result
= currentInstruction
[1].u
.operand
;
1232 unsigned op1
= currentInstruction
[2].u
.operand
;
1233 unsigned op2
= currentInstruction
[3].u
.operand
;
1234 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1236 compileBinaryArithOpSlowCase(op_sub
, iter
, result
, op1
, op2
, types
, false, false);
1239 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
1243 #endif // USE(JSVALUE64)
1244 #endif // ENABLE(JIT)