2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
33 #include "JITStubCall.h"
36 #include "JSFunction.h"
37 #include "Interpreter.h"
38 #include "ResultType.h"
39 #include "SamplingTool.h"
49 #if !USE(JSVALUE32_64)
51 void JIT::emit_op_lshift(Instruction
* currentInstruction
)
53 unsigned result
= currentInstruction
[1].u
.operand
;
54 unsigned op1
= currentInstruction
[2].u
.operand
;
55 unsigned op2
= currentInstruction
[3].u
.operand
;
57 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
58 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
59 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
60 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
61 emitFastArithImmToInt(regT0
);
62 emitFastArithImmToInt(regT2
);
63 lshift32(regT2
, regT0
);
65 addSlowCase(branchAdd32(Overflow
, regT0
, regT0
));
66 signExtend32ToPtr(regT0
, regT0
);
68 emitFastArithReTagImmediate(regT0
, regT0
);
69 emitPutVirtualRegister(result
);
72 void JIT::emitSlow_op_lshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
74 unsigned result
= currentInstruction
[1].u
.operand
;
75 unsigned op1
= currentInstruction
[2].u
.operand
;
76 unsigned op2
= currentInstruction
[3].u
.operand
;
84 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
85 Jump notImm1
= getSlowCase(iter
);
86 Jump notImm2
= getSlowCase(iter
);
88 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
92 JITStubCall
stubCall(this, cti_op_lshift
);
93 stubCall
.addArgument(regT0
);
94 stubCall
.addArgument(regT2
);
95 stubCall
.call(result
);
98 void JIT::emit_op_rshift(Instruction
* currentInstruction
)
100 unsigned result
= currentInstruction
[1].u
.operand
;
101 unsigned op1
= currentInstruction
[2].u
.operand
;
102 unsigned op2
= currentInstruction
[3].u
.operand
;
104 if (isOperandConstantImmediateInt(op2
)) {
105 // isOperandConstantImmediateInt(op2) => 1 SlowCase
106 emitGetVirtualRegister(op1
, regT0
);
107 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
108 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
109 rshift32(Imm32(getConstantOperandImmediateInt(op2
) & 0x1f), regT0
);
111 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
112 if (supportsFloatingPointTruncate()) {
113 Jump lhsIsInt
= emitJumpIfImmediateInteger(regT0
);
115 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
116 addSlowCase(emitJumpIfNotImmediateNumber(regT0
));
117 addPtr(tagTypeNumberRegister
, regT0
);
118 movePtrToDouble(regT0
, fpRegT0
);
119 addSlowCase(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
121 // supportsFloatingPoint() && !USE(JSVALUE64) => 5 SlowCases (of which 1 IfNotJSCell)
122 emitJumpSlowCaseIfNotJSCell(regT0
, op1
);
123 addSlowCase(checkStructure(regT0
, m_globalData
->numberStructure
.get()));
124 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
125 addSlowCase(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
126 addSlowCase(branchAdd32(Overflow
, regT0
, regT0
));
129 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
131 // !supportsFloatingPoint() => 2 SlowCases
132 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
133 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
135 emitFastArithImmToInt(regT2
);
136 rshift32(regT2
, regT0
);
138 signExtend32ToPtr(regT0
, regT0
);
142 emitFastArithIntToImmNoCheck(regT0
, regT0
);
144 orPtr(Imm32(JSImmediate::TagTypeNumber
), regT0
);
146 emitPutVirtualRegister(result
);
149 void JIT::emitSlow_op_rshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
151 unsigned result
= currentInstruction
[1].u
.operand
;
152 unsigned op1
= currentInstruction
[2].u
.operand
;
153 unsigned op2
= currentInstruction
[3].u
.operand
;
155 JITStubCall
stubCall(this, cti_op_rshift
);
157 if (isOperandConstantImmediateInt(op2
)) {
159 stubCall
.addArgument(regT0
);
160 stubCall
.addArgument(op2
, regT2
);
162 if (supportsFloatingPointTruncate()) {
168 linkSlowCaseIfNotJSCell(iter
, op1
);
174 // We're reloading op1 to regT0 as we can no longer guarantee that
175 // we have not munged the operand. It may have already been shifted
176 // correctly, but it still will not have been tagged.
177 stubCall
.addArgument(op1
, regT0
);
178 stubCall
.addArgument(regT2
);
182 stubCall
.addArgument(regT0
);
183 stubCall
.addArgument(regT2
);
187 stubCall
.call(result
);
190 void JIT::emit_op_urshift(Instruction
* currentInstruction
)
192 unsigned dst
= currentInstruction
[1].u
.operand
;
193 unsigned op1
= currentInstruction
[2].u
.operand
;
194 unsigned op2
= currentInstruction
[3].u
.operand
;
196 // Slow case of urshift makes assumptions about what registers hold the
197 // shift arguments, so any changes must be updated there as well.
198 if (isOperandConstantImmediateInt(op2
)) {
199 emitGetVirtualRegister(op1
, regT0
);
200 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
201 emitFastArithImmToInt(regT0
);
202 int shift
= getConstantOperand(op2
).asInt32();
204 urshift32(Imm32(shift
& 0x1f), regT0
);
205 // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
206 // a toUint conversion, which can result in a value we can represent
207 // as an immediate int.
208 if (shift
< 0 || !(shift
& 31))
209 addSlowCase(branch32(LessThan
, regT0
, Imm32(0)));
211 addSlowCase(branchAdd32(Overflow
, regT0
, regT0
));
212 signExtend32ToPtr(regT0
, regT0
);
214 emitFastArithReTagImmediate(regT0
, regT0
);
215 emitPutVirtualRegister(dst
, regT0
);
218 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
219 if (!isOperandConstantImmediateInt(op1
))
220 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
221 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
222 emitFastArithImmToInt(regT0
);
223 emitFastArithImmToInt(regT1
);
224 urshift32(regT1
, regT0
);
225 addSlowCase(branch32(LessThan
, regT0
, Imm32(0)));
227 addSlowCase(branchAdd32(Overflow
, regT0
, regT0
));
228 signExtend32ToPtr(regT0
, regT0
);
230 emitFastArithReTagImmediate(regT0
, regT0
);
231 emitPutVirtualRegister(dst
, regT0
);
234 void JIT::emitSlow_op_urshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
236 unsigned dst
= currentInstruction
[1].u
.operand
;
237 unsigned op1
= currentInstruction
[2].u
.operand
;
238 unsigned op2
= currentInstruction
[3].u
.operand
;
239 if (isOperandConstantImmediateInt(op2
)) {
240 int shift
= getConstantOperand(op2
).asInt32();
242 linkSlowCase(iter
); // int32 check
244 if (supportsFloatingPointTruncate()) {
246 failures
.append(emitJumpIfNotImmediateNumber(regT0
)); // op1 is not a double
247 addPtr(tagTypeNumberRegister
, regT0
);
248 movePtrToDouble(regT0
, fpRegT0
);
249 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
251 urshift32(Imm32(shift
& 0x1f), regT0
);
252 if (shift
< 0 || !(shift
& 31))
253 failures
.append(branch32(LessThan
, regT0
, Imm32(0)));
254 emitFastArithReTagImmediate(regT0
, regT0
);
255 emitPutVirtualRegister(dst
, regT0
);
256 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
260 if (shift
< 0 || !(shift
& 31))
261 linkSlowCase(iter
); // failed to box in hot path
263 linkSlowCase(iter
); // Couldn't box result
268 if (!isOperandConstantImmediateInt(op1
)) {
269 linkSlowCase(iter
); // int32 check -- op1 is not an int
271 if (supportsFloatingPointTruncate()) {
273 failures
.append(emitJumpIfNotImmediateNumber(regT0
)); // op1 is not a double
274 addPtr(tagTypeNumberRegister
, regT0
);
275 movePtrToDouble(regT0
, fpRegT0
);
276 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
277 failures
.append(emitJumpIfNotImmediateInteger(regT1
)); // op2 is not an int
278 emitFastArithImmToInt(regT1
);
279 urshift32(regT1
, regT0
);
280 failures
.append(branch32(LessThan
, regT0
, Imm32(0)));
281 emitFastArithReTagImmediate(regT0
, regT0
);
282 emitPutVirtualRegister(dst
, regT0
);
283 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
289 linkSlowCase(iter
); // int32 check - op2 is not an int
290 linkSlowCase(iter
); // Can't represent unsigned result as an immediate
292 linkSlowCase(iter
); // Couldn't box result
296 JITStubCall
stubCall(this, cti_op_urshift
);
297 stubCall
.addArgument(op1
, regT0
);
298 stubCall
.addArgument(op2
, regT1
);
302 void JIT::emit_op_jnless(Instruction
* currentInstruction
)
304 unsigned op1
= currentInstruction
[1].u
.operand
;
305 unsigned op2
= currentInstruction
[2].u
.operand
;
306 unsigned target
= currentInstruction
[3].u
.operand
;
308 // We generate inline code for the following cases in the fast path:
309 // - int immediate to constant int immediate
310 // - constant int immediate to int immediate
311 // - int immediate to int immediate
313 if (isOperandConstantImmediateChar(op1
)) {
314 emitGetVirtualRegister(op2
, regT0
);
315 addSlowCase(emitJumpIfNotJSCell(regT0
));
317 emitLoadCharacterString(regT0
, regT0
, failures
);
318 addSlowCase(failures
);
319 addJump(branch32(LessThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
322 if (isOperandConstantImmediateChar(op2
)) {
323 emitGetVirtualRegister(op1
, regT0
);
324 addSlowCase(emitJumpIfNotJSCell(regT0
));
326 emitLoadCharacterString(regT0
, regT0
, failures
);
327 addSlowCase(failures
);
328 addJump(branch32(GreaterThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
331 if (isOperandConstantImmediateInt(op2
)) {
332 emitGetVirtualRegister(op1
, regT0
);
333 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
335 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
337 int32_t op2imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)));
339 addJump(branch32(GreaterThanOrEqual
, regT0
, Imm32(op2imm
)), target
);
340 } else if (isOperandConstantImmediateInt(op1
)) {
341 emitGetVirtualRegister(op2
, regT1
);
342 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
344 int32_t op1imm
= getConstantOperandImmediateInt(op1
);
346 int32_t op1imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1
)));
348 addJump(branch32(LessThanOrEqual
, regT1
, Imm32(op1imm
)), target
);
350 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
351 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
352 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
354 addJump(branch32(GreaterThanOrEqual
, regT0
, regT1
), target
);
358 void JIT::emitSlow_op_jnless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
360 unsigned op1
= currentInstruction
[1].u
.operand
;
361 unsigned op2
= currentInstruction
[2].u
.operand
;
362 unsigned target
= currentInstruction
[3].u
.operand
;
364 // We generate inline code for the following cases in the slow path:
365 // - floating-point number to constant int immediate
366 // - constant int immediate to floating-point number
367 // - floating-point number to floating-point number.
368 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
373 JITStubCall
stubCall(this, cti_op_jless
);
374 stubCall
.addArgument(op1
, regT0
);
375 stubCall
.addArgument(op2
, regT1
);
377 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
);
381 if (isOperandConstantImmediateInt(op2
)) {
384 if (supportsFloatingPoint()) {
386 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
387 addPtr(tagTypeNumberRegister
, regT0
);
388 movePtrToDouble(regT0
, fpRegT0
);
391 if (!m_codeBlock
->isKnownNotImmediate(op1
))
392 fail1
= emitJumpIfNotJSCell(regT0
);
394 Jump fail2
= checkStructure(regT0
, m_globalData
->numberStructure
.get());
395 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
398 int32_t op2imm
= getConstantOperand(op2
).asInt32();;
400 move(Imm32(op2imm
), regT1
);
401 convertInt32ToDouble(regT1
, fpRegT1
);
403 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), target
);
405 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
410 if (!m_codeBlock
->isKnownNotImmediate(op1
))
416 JITStubCall
stubCall(this, cti_op_jless
);
417 stubCall
.addArgument(regT0
);
418 stubCall
.addArgument(op2
, regT2
);
420 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
);
422 } else if (isOperandConstantImmediateInt(op1
)) {
425 if (supportsFloatingPoint()) {
427 Jump fail1
= emitJumpIfNotImmediateNumber(regT1
);
428 addPtr(tagTypeNumberRegister
, regT1
);
429 movePtrToDouble(regT1
, fpRegT1
);
432 if (!m_codeBlock
->isKnownNotImmediate(op2
))
433 fail1
= emitJumpIfNotJSCell(regT1
);
435 Jump fail2
= checkStructure(regT1
, m_globalData
->numberStructure
.get());
436 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
439 int32_t op1imm
= getConstantOperand(op1
).asInt32();;
441 move(Imm32(op1imm
), regT0
);
442 convertInt32ToDouble(regT0
, fpRegT0
);
444 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), target
);
446 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
451 if (!m_codeBlock
->isKnownNotImmediate(op2
))
457 JITStubCall
stubCall(this, cti_op_jless
);
458 stubCall
.addArgument(op1
, regT2
);
459 stubCall
.addArgument(regT1
);
461 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
);
466 if (supportsFloatingPoint()) {
468 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
469 Jump fail2
= emitJumpIfNotImmediateNumber(regT1
);
470 Jump fail3
= emitJumpIfImmediateInteger(regT1
);
471 addPtr(tagTypeNumberRegister
, regT0
);
472 addPtr(tagTypeNumberRegister
, regT1
);
473 movePtrToDouble(regT0
, fpRegT0
);
474 movePtrToDouble(regT1
, fpRegT1
);
477 if (!m_codeBlock
->isKnownNotImmediate(op1
))
478 fail1
= emitJumpIfNotJSCell(regT0
);
481 if (!m_codeBlock
->isKnownNotImmediate(op2
))
482 fail2
= emitJumpIfNotJSCell(regT1
);
484 Jump fail3
= checkStructure(regT0
, m_globalData
->numberStructure
.get());
485 Jump fail4
= checkStructure(regT1
, m_globalData
->numberStructure
.get());
486 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
487 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
490 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), target
);
492 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
499 if (!m_codeBlock
->isKnownNotImmediate(op1
))
501 if (!m_codeBlock
->isKnownNotImmediate(op2
))
509 JITStubCall
stubCall(this, cti_op_jless
);
510 stubCall
.addArgument(regT0
);
511 stubCall
.addArgument(regT1
);
513 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
);
517 void JIT::emit_op_jless(Instruction
* currentInstruction
)
519 unsigned op1
= currentInstruction
[1].u
.operand
;
520 unsigned op2
= currentInstruction
[2].u
.operand
;
521 unsigned target
= currentInstruction
[3].u
.operand
;
523 // We generate inline code for the following cases in the fast path:
524 // - int immediate to constant int immediate
525 // - constant int immediate to int immediate
526 // - int immediate to int immediate
528 if (isOperandConstantImmediateChar(op1
)) {
529 emitGetVirtualRegister(op2
, regT0
);
530 addSlowCase(emitJumpIfNotJSCell(regT0
));
532 emitLoadCharacterString(regT0
, regT0
, failures
);
533 addSlowCase(failures
);
534 addJump(branch32(GreaterThan
, regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
537 if (isOperandConstantImmediateChar(op2
)) {
538 emitGetVirtualRegister(op1
, regT0
);
539 addSlowCase(emitJumpIfNotJSCell(regT0
));
541 emitLoadCharacterString(regT0
, regT0
, failures
);
542 addSlowCase(failures
);
543 addJump(branch32(LessThan
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
546 if (isOperandConstantImmediateInt(op2
)) {
547 emitGetVirtualRegister(op1
, regT0
);
548 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
550 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
552 int32_t op2imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)));
554 addJump(branch32(LessThan
, regT0
, Imm32(op2imm
)), target
);
555 } else if (isOperandConstantImmediateInt(op1
)) {
556 emitGetVirtualRegister(op2
, regT1
);
557 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
559 int32_t op1imm
= getConstantOperandImmediateInt(op1
);
561 int32_t op1imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1
)));
563 addJump(branch32(GreaterThan
, regT1
, Imm32(op1imm
)), target
);
565 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
566 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
567 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
569 addJump(branch32(LessThan
, regT0
, regT1
), target
);
573 void JIT::emitSlow_op_jless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
575 unsigned op1
= currentInstruction
[1].u
.operand
;
576 unsigned op2
= currentInstruction
[2].u
.operand
;
577 unsigned target
= currentInstruction
[3].u
.operand
;
579 // We generate inline code for the following cases in the slow path:
580 // - floating-point number to constant int immediate
581 // - constant int immediate to floating-point number
582 // - floating-point number to floating-point number.
583 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
588 JITStubCall
stubCall(this, cti_op_jless
);
589 stubCall
.addArgument(op1
, regT0
);
590 stubCall
.addArgument(op2
, regT1
);
592 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
596 if (isOperandConstantImmediateInt(op2
)) {
599 if (supportsFloatingPoint()) {
601 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
602 addPtr(tagTypeNumberRegister
, regT0
);
603 movePtrToDouble(regT0
, fpRegT0
);
606 if (!m_codeBlock
->isKnownNotImmediate(op1
))
607 fail1
= emitJumpIfNotJSCell(regT0
);
609 Jump fail2
= checkStructure(regT0
, m_globalData
->numberStructure
.get());
610 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
613 int32_t op2imm
= getConstantOperand(op2
).asInt32();
615 move(Imm32(op2imm
), regT1
);
616 convertInt32ToDouble(regT1
, fpRegT1
);
618 emitJumpSlowToHot(branchDouble(DoubleLessThan
, fpRegT0
, fpRegT1
), target
);
620 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
625 if (!m_codeBlock
->isKnownNotImmediate(op1
))
631 JITStubCall
stubCall(this, cti_op_jless
);
632 stubCall
.addArgument(regT0
);
633 stubCall
.addArgument(op2
, regT2
);
635 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
637 } else if (isOperandConstantImmediateInt(op1
)) {
640 if (supportsFloatingPoint()) {
642 Jump fail1
= emitJumpIfNotImmediateNumber(regT1
);
643 addPtr(tagTypeNumberRegister
, regT1
);
644 movePtrToDouble(regT1
, fpRegT1
);
647 if (!m_codeBlock
->isKnownNotImmediate(op2
))
648 fail1
= emitJumpIfNotJSCell(regT1
);
650 Jump fail2
= checkStructure(regT1
, m_globalData
->numberStructure
.get());
651 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
654 int32_t op1imm
= getConstantOperand(op1
).asInt32();
656 move(Imm32(op1imm
), regT0
);
657 convertInt32ToDouble(regT0
, fpRegT0
);
659 emitJumpSlowToHot(branchDouble(DoubleLessThan
, fpRegT0
, fpRegT1
), target
);
661 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
666 if (!m_codeBlock
->isKnownNotImmediate(op2
))
672 JITStubCall
stubCall(this, cti_op_jless
);
673 stubCall
.addArgument(op1
, regT2
);
674 stubCall
.addArgument(regT1
);
676 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
681 if (supportsFloatingPoint()) {
683 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
684 Jump fail2
= emitJumpIfNotImmediateNumber(regT1
);
685 Jump fail3
= emitJumpIfImmediateInteger(regT1
);
686 addPtr(tagTypeNumberRegister
, regT0
);
687 addPtr(tagTypeNumberRegister
, regT1
);
688 movePtrToDouble(regT0
, fpRegT0
);
689 movePtrToDouble(regT1
, fpRegT1
);
692 if (!m_codeBlock
->isKnownNotImmediate(op1
))
693 fail1
= emitJumpIfNotJSCell(regT0
);
696 if (!m_codeBlock
->isKnownNotImmediate(op2
))
697 fail2
= emitJumpIfNotJSCell(regT1
);
699 Jump fail3
= checkStructure(regT0
, m_globalData
->numberStructure
.get());
700 Jump fail4
= checkStructure(regT1
, m_globalData
->numberStructure
.get());
701 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
702 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
705 emitJumpSlowToHot(branchDouble(DoubleLessThan
, fpRegT0
, fpRegT1
), target
);
707 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
714 if (!m_codeBlock
->isKnownNotImmediate(op1
))
716 if (!m_codeBlock
->isKnownNotImmediate(op2
))
724 JITStubCall
stubCall(this, cti_op_jless
);
725 stubCall
.addArgument(regT0
);
726 stubCall
.addArgument(regT1
);
728 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
732 void JIT::emit_op_jlesseq(Instruction
* currentInstruction
, bool invert
)
734 unsigned op1
= currentInstruction
[1].u
.operand
;
735 unsigned op2
= currentInstruction
[2].u
.operand
;
736 unsigned target
= currentInstruction
[3].u
.operand
;
738 // We generate inline code for the following cases in the fast path:
739 // - int immediate to constant int immediate
740 // - constant int immediate to int immediate
741 // - int immediate to int immediate
743 if (isOperandConstantImmediateChar(op1
)) {
744 emitGetVirtualRegister(op2
, regT0
);
745 addSlowCase(emitJumpIfNotJSCell(regT0
));
747 emitLoadCharacterString(regT0
, regT0
, failures
);
748 addSlowCase(failures
);
749 addJump(branch32(invert
? LessThan
: GreaterThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
752 if (isOperandConstantImmediateChar(op2
)) {
753 emitGetVirtualRegister(op1
, regT0
);
754 addSlowCase(emitJumpIfNotJSCell(regT0
));
756 emitLoadCharacterString(regT0
, regT0
, failures
);
757 addSlowCase(failures
);
758 addJump(branch32(invert
? GreaterThan
: LessThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
761 if (isOperandConstantImmediateInt(op2
)) {
762 emitGetVirtualRegister(op1
, regT0
);
763 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
765 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
767 int32_t op2imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)));
769 addJump(branch32(invert
? GreaterThan
: LessThanOrEqual
, regT0
, Imm32(op2imm
)), target
);
770 } else if (isOperandConstantImmediateInt(op1
)) {
771 emitGetVirtualRegister(op2
, regT1
);
772 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
774 int32_t op1imm
= getConstantOperandImmediateInt(op1
);
776 int32_t op1imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1
)));
778 addJump(branch32(invert
? LessThan
: GreaterThanOrEqual
, regT1
, Imm32(op1imm
)), target
);
780 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
781 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
782 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
784 addJump(branch32(invert
? GreaterThan
: LessThanOrEqual
, regT0
, regT1
), target
);
788 void JIT::emitSlow_op_jlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool invert
)
790 unsigned op1
= currentInstruction
[1].u
.operand
;
791 unsigned op2
= currentInstruction
[2].u
.operand
;
792 unsigned target
= currentInstruction
[3].u
.operand
;
794 // We generate inline code for the following cases in the slow path:
795 // - floating-point number to constant int immediate
796 // - constant int immediate to floating-point number
797 // - floating-point number to floating-point number.
799 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
804 JITStubCall
stubCall(this, cti_op_jlesseq
);
805 stubCall
.addArgument(op1
, regT0
);
806 stubCall
.addArgument(op2
, regT1
);
808 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
812 if (isOperandConstantImmediateInt(op2
)) {
815 if (supportsFloatingPoint()) {
817 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
818 addPtr(tagTypeNumberRegister
, regT0
);
819 movePtrToDouble(regT0
, fpRegT0
);
822 if (!m_codeBlock
->isKnownNotImmediate(op1
))
823 fail1
= emitJumpIfNotJSCell(regT0
);
825 Jump fail2
= checkStructure(regT0
, m_globalData
->numberStructure
.get());
826 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
829 int32_t op2imm
= getConstantOperand(op2
).asInt32();;
831 move(Imm32(op2imm
), regT1
);
832 convertInt32ToDouble(regT1
, fpRegT1
);
834 emitJumpSlowToHot(branchDouble(invert
? DoubleLessThanOrUnordered
: DoubleGreaterThanOrEqual
, fpRegT1
, fpRegT0
), target
);
836 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq
));
841 if (!m_codeBlock
->isKnownNotImmediate(op1
))
847 JITStubCall
stubCall(this, cti_op_jlesseq
);
848 stubCall
.addArgument(regT0
);
849 stubCall
.addArgument(op2
, regT2
);
851 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
853 } else if (isOperandConstantImmediateInt(op1
)) {
856 if (supportsFloatingPoint()) {
858 Jump fail1
= emitJumpIfNotImmediateNumber(regT1
);
859 addPtr(tagTypeNumberRegister
, regT1
);
860 movePtrToDouble(regT1
, fpRegT1
);
863 if (!m_codeBlock
->isKnownNotImmediate(op2
))
864 fail1
= emitJumpIfNotJSCell(regT1
);
866 Jump fail2
= checkStructure(regT1
, m_globalData
->numberStructure
.get());
867 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
870 int32_t op1imm
= getConstantOperand(op1
).asInt32();;
872 move(Imm32(op1imm
), regT0
);
873 convertInt32ToDouble(regT0
, fpRegT0
);
875 emitJumpSlowToHot(branchDouble(invert
? DoubleLessThanOrUnordered
: DoubleGreaterThanOrEqual
, fpRegT1
, fpRegT0
), target
);
877 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq
));
882 if (!m_codeBlock
->isKnownNotImmediate(op2
))
888 JITStubCall
stubCall(this, cti_op_jlesseq
);
889 stubCall
.addArgument(op1
, regT2
);
890 stubCall
.addArgument(regT1
);
892 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
897 if (supportsFloatingPoint()) {
899 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
900 Jump fail2
= emitJumpIfNotImmediateNumber(regT1
);
901 Jump fail3
= emitJumpIfImmediateInteger(regT1
);
902 addPtr(tagTypeNumberRegister
, regT0
);
903 addPtr(tagTypeNumberRegister
, regT1
);
904 movePtrToDouble(regT0
, fpRegT0
);
905 movePtrToDouble(regT1
, fpRegT1
);
908 if (!m_codeBlock
->isKnownNotImmediate(op1
))
909 fail1
= emitJumpIfNotJSCell(regT0
);
912 if (!m_codeBlock
->isKnownNotImmediate(op2
))
913 fail2
= emitJumpIfNotJSCell(regT1
);
915 Jump fail3
= checkStructure(regT0
, m_globalData
->numberStructure
.get());
916 Jump fail4
= checkStructure(regT1
, m_globalData
->numberStructure
.get());
917 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
918 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
921 emitJumpSlowToHot(branchDouble(invert
? DoubleLessThanOrUnordered
: DoubleGreaterThanOrEqual
, fpRegT1
, fpRegT0
), target
);
923 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq
));
930 if (!m_codeBlock
->isKnownNotImmediate(op1
))
932 if (!m_codeBlock
->isKnownNotImmediate(op2
))
940 JITStubCall
stubCall(this, cti_op_jlesseq
);
941 stubCall
.addArgument(regT0
);
942 stubCall
.addArgument(regT1
);
944 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
948 void JIT::emit_op_jnlesseq(Instruction
* currentInstruction
)
950 emit_op_jlesseq(currentInstruction
, true);
953 void JIT::emitSlow_op_jnlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
955 emitSlow_op_jlesseq(currentInstruction
, iter
, true);
958 void JIT::emit_op_bitand(Instruction
* currentInstruction
)
960 unsigned result
= currentInstruction
[1].u
.operand
;
961 unsigned op1
= currentInstruction
[2].u
.operand
;
962 unsigned op2
= currentInstruction
[3].u
.operand
;
964 if (isOperandConstantImmediateInt(op1
)) {
965 emitGetVirtualRegister(op2
, regT0
);
966 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
968 int32_t imm
= getConstantOperandImmediateInt(op1
);
969 andPtr(Imm32(imm
), regT0
);
971 emitFastArithIntToImmNoCheck(regT0
, regT0
);
973 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1
)))), regT0
);
975 } else if (isOperandConstantImmediateInt(op2
)) {
976 emitGetVirtualRegister(op1
, regT0
);
977 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
979 int32_t imm
= getConstantOperandImmediateInt(op2
);
980 andPtr(Imm32(imm
), regT0
);
982 emitFastArithIntToImmNoCheck(regT0
, regT0
);
984 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)))), regT0
);
987 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
988 andPtr(regT1
, regT0
);
989 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
991 emitPutVirtualRegister(result
);
994 void JIT::emitSlow_op_bitand(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
996 unsigned result
= currentInstruction
[1].u
.operand
;
997 unsigned op1
= currentInstruction
[2].u
.operand
;
998 unsigned op2
= currentInstruction
[3].u
.operand
;
1001 if (isOperandConstantImmediateInt(op1
)) {
1002 JITStubCall
stubCall(this, cti_op_bitand
);
1003 stubCall
.addArgument(op1
, regT2
);
1004 stubCall
.addArgument(regT0
);
1005 stubCall
.call(result
);
1006 } else if (isOperandConstantImmediateInt(op2
)) {
1007 JITStubCall
stubCall(this, cti_op_bitand
);
1008 stubCall
.addArgument(regT0
);
1009 stubCall
.addArgument(op2
, regT2
);
1010 stubCall
.call(result
);
1012 JITStubCall
stubCall(this, cti_op_bitand
);
1013 stubCall
.addArgument(op1
, regT2
);
1014 stubCall
.addArgument(regT1
);
1015 stubCall
.call(result
);
1019 void JIT::emit_op_post_inc(Instruction
* currentInstruction
)
1021 unsigned result
= currentInstruction
[1].u
.operand
;
1022 unsigned srcDst
= currentInstruction
[2].u
.operand
;
1024 emitGetVirtualRegister(srcDst
, regT0
);
1026 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1028 addSlowCase(branchAdd32(Overflow
, Imm32(1), regT1
));
1029 emitFastArithIntToImmNoCheck(regT1
, regT1
);
1031 addSlowCase(branchAdd32(Overflow
, Imm32(1 << JSImmediate::IntegerPayloadShift
), regT1
));
1032 signExtend32ToPtr(regT1
, regT1
);
1034 emitPutVirtualRegister(srcDst
, regT1
);
1035 emitPutVirtualRegister(result
);
1038 void JIT::emitSlow_op_post_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1040 unsigned result
= currentInstruction
[1].u
.operand
;
1041 unsigned srcDst
= currentInstruction
[2].u
.operand
;
1045 JITStubCall
stubCall(this, cti_op_post_inc
);
1046 stubCall
.addArgument(regT0
);
1047 stubCall
.addArgument(Imm32(srcDst
));
1048 stubCall
.call(result
);
1051 void JIT::emit_op_post_dec(Instruction
* currentInstruction
)
1053 unsigned result
= currentInstruction
[1].u
.operand
;
1054 unsigned srcDst
= currentInstruction
[2].u
.operand
;
1056 emitGetVirtualRegister(srcDst
, regT0
);
1058 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1060 addSlowCase(branchSub32(Zero
, Imm32(1), regT1
));
1061 emitFastArithIntToImmNoCheck(regT1
, regT1
);
1063 addSlowCase(branchSub32(Zero
, Imm32(1 << JSImmediate::IntegerPayloadShift
), regT1
));
1064 signExtend32ToPtr(regT1
, regT1
);
1066 emitPutVirtualRegister(srcDst
, regT1
);
1067 emitPutVirtualRegister(result
);
1070 void JIT::emitSlow_op_post_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1072 unsigned result
= currentInstruction
[1].u
.operand
;
1073 unsigned srcDst
= currentInstruction
[2].u
.operand
;
1077 JITStubCall
stubCall(this, cti_op_post_dec
);
1078 stubCall
.addArgument(regT0
);
1079 stubCall
.addArgument(Imm32(srcDst
));
1080 stubCall
.call(result
);
1083 void JIT::emit_op_pre_inc(Instruction
* currentInstruction
)
1085 unsigned srcDst
= currentInstruction
[1].u
.operand
;
1087 emitGetVirtualRegister(srcDst
, regT0
);
1088 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1090 addSlowCase(branchAdd32(Overflow
, Imm32(1), regT0
));
1091 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1093 addSlowCase(branchAdd32(Overflow
, Imm32(1 << JSImmediate::IntegerPayloadShift
), regT0
));
1094 signExtend32ToPtr(regT0
, regT0
);
1096 emitPutVirtualRegister(srcDst
);
1099 void JIT::emitSlow_op_pre_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1101 unsigned srcDst
= currentInstruction
[1].u
.operand
;
1103 Jump notImm
= getSlowCase(iter
);
1105 emitGetVirtualRegister(srcDst
, regT0
);
1107 JITStubCall
stubCall(this, cti_op_pre_inc
);
1108 stubCall
.addArgument(regT0
);
1109 stubCall
.call(srcDst
);
1112 void JIT::emit_op_pre_dec(Instruction
* currentInstruction
)
1114 unsigned srcDst
= currentInstruction
[1].u
.operand
;
1116 emitGetVirtualRegister(srcDst
, regT0
);
1117 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1119 addSlowCase(branchSub32(Zero
, Imm32(1), regT0
));
1120 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1122 addSlowCase(branchSub32(Zero
, Imm32(1 << JSImmediate::IntegerPayloadShift
), regT0
));
1123 signExtend32ToPtr(regT0
, regT0
);
1125 emitPutVirtualRegister(srcDst
);
1128 void JIT::emitSlow_op_pre_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1130 unsigned srcDst
= currentInstruction
[1].u
.operand
;
1132 Jump notImm
= getSlowCase(iter
);
1134 emitGetVirtualRegister(srcDst
, regT0
);
1136 JITStubCall
stubCall(this, cti_op_pre_dec
);
1137 stubCall
.addArgument(regT0
);
1138 stubCall
.call(srcDst
);
1141 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1143 #if CPU(X86) || CPU(X86_64)
1145 void JIT::emit_op_mod(Instruction
* currentInstruction
)
1147 unsigned result
= currentInstruction
[1].u
.operand
;
1148 unsigned op1
= currentInstruction
[2].u
.operand
;
1149 unsigned op2
= currentInstruction
[3].u
.operand
;
1151 emitGetVirtualRegisters(op1
, X86Registers::eax
, op2
, X86Registers::ecx
);
1152 emitJumpSlowCaseIfNotImmediateInteger(X86Registers::eax
);
1153 emitJumpSlowCaseIfNotImmediateInteger(X86Registers::ecx
);
1155 addSlowCase(branchPtr(Equal
, X86Registers::ecx
, ImmPtr(JSValue::encode(jsNumber(m_globalData
, 0)))));
1157 m_assembler
.idivl_r(X86Registers::ecx
);
1159 emitFastArithDeTagImmediate(X86Registers::eax
);
1160 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86Registers::ecx
));
1162 m_assembler
.idivl_r(X86Registers::ecx
);
1163 signExtend32ToPtr(X86Registers::edx
, X86Registers::edx
);
1165 emitFastArithReTagImmediate(X86Registers::edx
, X86Registers::eax
);
1166 emitPutVirtualRegister(result
);
1169 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1171 unsigned result
= currentInstruction
[1].u
.operand
;
1178 Jump notImm1
= getSlowCase(iter
);
1179 Jump notImm2
= getSlowCase(iter
);
1181 emitFastArithReTagImmediate(X86Registers::eax
, X86Registers::eax
);
1182 emitFastArithReTagImmediate(X86Registers::ecx
, X86Registers::ecx
);
1186 JITStubCall
stubCall(this, cti_op_mod
);
1187 stubCall
.addArgument(X86Registers::eax
);
1188 stubCall
.addArgument(X86Registers::ecx
);
1189 stubCall
.call(result
);
1192 #else // CPU(X86) || CPU(X86_64)
1194 void JIT::emit_op_mod(Instruction
* currentInstruction
)
1196 unsigned result
= currentInstruction
[1].u
.operand
;
1197 unsigned op1
= currentInstruction
[2].u
.operand
;
1198 unsigned op2
= currentInstruction
[3].u
.operand
;
1200 #if ENABLE(JIT_OPTIMIZE_MOD)
1201 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
1202 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1203 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
1205 addSlowCase(branch32(Equal
, regT2
, Imm32(1)));
1207 emitNakedCall(m_globalData
->jitStubs
.ctiSoftModulo());
1209 emitPutVirtualRegister(result
, regT0
);
1211 JITStubCall
stubCall(this, cti_op_mod
);
1212 stubCall
.addArgument(op1
, regT2
);
1213 stubCall
.addArgument(op2
, regT2
);
1214 stubCall
.call(result
);
1218 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1220 #if ENABLE(JIT_OPTIMIZE_MOD)
1221 unsigned result
= currentInstruction
[1].u
.operand
;
1222 unsigned op1
= currentInstruction
[2].u
.operand
;
1223 unsigned op2
= currentInstruction
[3].u
.operand
;
1227 JITStubCall
stubCall(this, cti_op_mod
);
1228 stubCall
.addArgument(op1
, regT2
);
1229 stubCall
.addArgument(op2
, regT2
);
1230 stubCall
.call(result
);
1232 ASSERT_NOT_REACHED();
1236 #endif // CPU(X86) || CPU(X86_64)
1238 /* ------------------------------ END: OP_MOD ------------------------------ */
1242 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
1244 void JIT::compileBinaryArithOp(OpcodeID opcodeID
, unsigned, unsigned op1
, unsigned op2
, OperandTypes
)
1246 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
1247 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1248 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1249 if (opcodeID
== op_add
)
1250 addSlowCase(branchAdd32(Overflow
, regT1
, regT0
));
1251 else if (opcodeID
== op_sub
)
1252 addSlowCase(branchSub32(Overflow
, regT1
, regT0
));
1254 ASSERT(opcodeID
== op_mul
);
1255 addSlowCase(branchMul32(Overflow
, regT1
, regT0
));
1256 addSlowCase(branchTest32(Zero
, regT0
));
1258 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1261 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID
, Vector
<SlowCaseEntry
>::iterator
& iter
, unsigned result
, unsigned op1
, unsigned op2
, OperandTypes types
, bool op1HasImmediateIntFastCase
, bool op2HasImmediateIntFastCase
)
1263 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
1264 COMPILE_ASSERT(((JSImmediate::TagTypeNumber
+ JSImmediate::DoubleEncodeOffset
) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0
);
1268 if (op1HasImmediateIntFastCase
) {
1269 notImm2
= getSlowCase(iter
);
1270 } else if (op2HasImmediateIntFastCase
) {
1271 notImm1
= getSlowCase(iter
);
1273 notImm1
= getSlowCase(iter
);
1274 notImm2
= getSlowCase(iter
);
1277 linkSlowCase(iter
); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
1278 if (opcodeID
== op_mul
&& !op1HasImmediateIntFastCase
&& !op2HasImmediateIntFastCase
) // op_mul has an extra slow case to handle 0 * negative number.
1280 emitGetVirtualRegister(op1
, regT0
);
1282 Label
stubFunctionCall(this);
1283 JITStubCall
stubCall(this, opcodeID
== op_add
? cti_op_add
: opcodeID
== op_sub
? cti_op_sub
: cti_op_mul
);
1284 if (op1HasImmediateIntFastCase
|| op2HasImmediateIntFastCase
) {
1285 emitGetVirtualRegister(op1
, regT0
);
1286 emitGetVirtualRegister(op2
, regT1
);
1288 stubCall
.addArgument(regT0
);
1289 stubCall
.addArgument(regT1
);
1290 stubCall
.call(result
);
1293 if (op1HasImmediateIntFastCase
) {
1295 if (!types
.second().definitelyIsNumber())
1296 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
1297 emitGetVirtualRegister(op1
, regT1
);
1298 convertInt32ToDouble(regT1
, fpRegT1
);
1299 addPtr(tagTypeNumberRegister
, regT0
);
1300 movePtrToDouble(regT0
, fpRegT2
);
1301 } else if (op2HasImmediateIntFastCase
) {
1303 if (!types
.first().definitelyIsNumber())
1304 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
1305 emitGetVirtualRegister(op2
, regT1
);
1306 convertInt32ToDouble(regT1
, fpRegT1
);
1307 addPtr(tagTypeNumberRegister
, regT0
);
1308 movePtrToDouble(regT0
, fpRegT2
);
1310 // if we get here, eax is not an int32, edx not yet checked.
1312 if (!types
.first().definitelyIsNumber())
1313 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
1314 if (!types
.second().definitelyIsNumber())
1315 emitJumpIfNotImmediateNumber(regT1
).linkTo(stubFunctionCall
, this);
1316 addPtr(tagTypeNumberRegister
, regT0
);
1317 movePtrToDouble(regT0
, fpRegT1
);
1318 Jump op2isDouble
= emitJumpIfNotImmediateInteger(regT1
);
1319 convertInt32ToDouble(regT1
, fpRegT2
);
1320 Jump op2wasInteger
= jump();
1322 // if we get here, eax IS an int32, edx is not.
1324 if (!types
.second().definitelyIsNumber())
1325 emitJumpIfNotImmediateNumber(regT1
).linkTo(stubFunctionCall
, this);
1326 convertInt32ToDouble(regT0
, fpRegT1
);
1327 op2isDouble
.link(this);
1328 addPtr(tagTypeNumberRegister
, regT1
);
1329 movePtrToDouble(regT1
, fpRegT2
);
1330 op2wasInteger
.link(this);
1333 if (opcodeID
== op_add
)
1334 addDouble(fpRegT2
, fpRegT1
);
1335 else if (opcodeID
== op_sub
)
1336 subDouble(fpRegT2
, fpRegT1
);
1337 else if (opcodeID
== op_mul
)
1338 mulDouble(fpRegT2
, fpRegT1
);
1340 ASSERT(opcodeID
== op_div
);
1341 divDouble(fpRegT2
, fpRegT1
);
1343 moveDoubleToPtr(fpRegT1
, regT0
);
1344 subPtr(tagTypeNumberRegister
, regT0
);
1345 emitPutVirtualRegister(result
, regT0
);
1350 void JIT::emit_op_add(Instruction
* currentInstruction
)
1352 unsigned result
= currentInstruction
[1].u
.operand
;
1353 unsigned op1
= currentInstruction
[2].u
.operand
;
1354 unsigned op2
= currentInstruction
[3].u
.operand
;
1355 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1357 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
1358 JITStubCall
stubCall(this, cti_op_add
);
1359 stubCall
.addArgument(op1
, regT2
);
1360 stubCall
.addArgument(op2
, regT2
);
1361 stubCall
.call(result
);
1365 if (isOperandConstantImmediateInt(op1
)) {
1366 emitGetVirtualRegister(op2
, regT0
);
1367 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1368 addSlowCase(branchAdd32(Overflow
, Imm32(getConstantOperandImmediateInt(op1
)), regT0
));
1369 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1370 } else if (isOperandConstantImmediateInt(op2
)) {
1371 emitGetVirtualRegister(op1
, regT0
);
1372 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1373 addSlowCase(branchAdd32(Overflow
, Imm32(getConstantOperandImmediateInt(op2
)), regT0
));
1374 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1376 compileBinaryArithOp(op_add
, result
, op1
, op2
, types
);
1378 emitPutVirtualRegister(result
);
1381 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1383 unsigned result
= currentInstruction
[1].u
.operand
;
1384 unsigned op1
= currentInstruction
[2].u
.operand
;
1385 unsigned op2
= currentInstruction
[3].u
.operand
;
1386 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1388 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber())
1391 bool op1HasImmediateIntFastCase
= isOperandConstantImmediateInt(op1
);
1392 bool op2HasImmediateIntFastCase
= !op1HasImmediateIntFastCase
&& isOperandConstantImmediateInt(op2
);
1393 compileBinaryArithOpSlowCase(op_add
, iter
, result
, op1
, op2
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
), op1HasImmediateIntFastCase
, op2HasImmediateIntFastCase
);
1396 void JIT::emit_op_mul(Instruction
* currentInstruction
)
1398 unsigned result
= currentInstruction
[1].u
.operand
;
1399 unsigned op1
= currentInstruction
[2].u
.operand
;
1400 unsigned op2
= currentInstruction
[3].u
.operand
;
1401 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1403 // For now, only plant a fast int case if the constant operand is greater than zero.
1405 if (isOperandConstantImmediateInt(op1
) && ((value
= getConstantOperandImmediateInt(op1
)) > 0)) {
1406 emitGetVirtualRegister(op2
, regT0
);
1407 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1408 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT0
));
1409 emitFastArithReTagImmediate(regT0
, regT0
);
1410 } else if (isOperandConstantImmediateInt(op2
) && ((value
= getConstantOperandImmediateInt(op2
)) > 0)) {
1411 emitGetVirtualRegister(op1
, regT0
);
1412 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1413 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT0
));
1414 emitFastArithReTagImmediate(regT0
, regT0
);
1416 compileBinaryArithOp(op_mul
, result
, op1
, op2
, types
);
1418 emitPutVirtualRegister(result
);
1421 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1423 unsigned result
= currentInstruction
[1].u
.operand
;
1424 unsigned op1
= currentInstruction
[2].u
.operand
;
1425 unsigned op2
= currentInstruction
[3].u
.operand
;
1426 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1428 bool op1HasImmediateIntFastCase
= isOperandConstantImmediateInt(op1
) && getConstantOperandImmediateInt(op1
) > 0;
1429 bool op2HasImmediateIntFastCase
= !op1HasImmediateIntFastCase
&& isOperandConstantImmediateInt(op2
) && getConstantOperandImmediateInt(op2
) > 0;
1430 compileBinaryArithOpSlowCase(op_mul
, iter
, result
, op1
, op2
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
), op1HasImmediateIntFastCase
, op2HasImmediateIntFastCase
);
1433 void JIT::emit_op_div(Instruction
* currentInstruction
)
1435 unsigned dst
= currentInstruction
[1].u
.operand
;
1436 unsigned op1
= currentInstruction
[2].u
.operand
;
1437 unsigned op2
= currentInstruction
[3].u
.operand
;
1438 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1440 if (isOperandConstantImmediateDouble(op1
)) {
1441 emitGetVirtualRegister(op1
, regT0
);
1442 addPtr(tagTypeNumberRegister
, regT0
);
1443 movePtrToDouble(regT0
, fpRegT0
);
1444 } else if (isOperandConstantImmediateInt(op1
)) {
1445 emitLoadInt32ToDouble(op1
, fpRegT0
);
1447 emitGetVirtualRegister(op1
, regT0
);
1448 if (!types
.first().definitelyIsNumber())
1449 emitJumpSlowCaseIfNotImmediateNumber(regT0
);
1450 Jump notInt
= emitJumpIfNotImmediateInteger(regT0
);
1451 convertInt32ToDouble(regT0
, fpRegT0
);
1452 Jump skipDoubleLoad
= jump();
1454 addPtr(tagTypeNumberRegister
, regT0
);
1455 movePtrToDouble(regT0
, fpRegT0
);
1456 skipDoubleLoad
.link(this);
1459 if (isOperandConstantImmediateDouble(op2
)) {
1460 emitGetVirtualRegister(op2
, regT1
);
1461 addPtr(tagTypeNumberRegister
, regT1
);
1462 movePtrToDouble(regT1
, fpRegT1
);
1463 } else if (isOperandConstantImmediateInt(op2
)) {
1464 emitLoadInt32ToDouble(op2
, fpRegT1
);
1466 emitGetVirtualRegister(op2
, regT1
);
1467 if (!types
.second().definitelyIsNumber())
1468 emitJumpSlowCaseIfNotImmediateNumber(regT1
);
1469 Jump notInt
= emitJumpIfNotImmediateInteger(regT1
);
1470 convertInt32ToDouble(regT1
, fpRegT1
);
1471 Jump skipDoubleLoad
= jump();
1473 addPtr(tagTypeNumberRegister
, regT1
);
1474 movePtrToDouble(regT1
, fpRegT1
);
1475 skipDoubleLoad
.link(this);
1477 divDouble(fpRegT1
, fpRegT0
);
1480 moveDoubleToPtr(fpRegT0
, regT0
);
1481 subPtr(tagTypeNumberRegister
, regT0
);
1483 emitPutVirtualRegister(dst
, regT0
);
1486 void JIT::emitSlow_op_div(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1488 unsigned result
= currentInstruction
[1].u
.operand
;
1489 unsigned op1
= currentInstruction
[2].u
.operand
;
1490 unsigned op2
= currentInstruction
[3].u
.operand
;
1491 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1492 if (types
.first().definitelyIsNumber() && types
.second().definitelyIsNumber()) {
1498 if (!isOperandConstantImmediateDouble(op1
) && !isOperandConstantImmediateInt(op1
)) {
1499 if (!types
.first().definitelyIsNumber())
1502 if (!isOperandConstantImmediateDouble(op2
) && !isOperandConstantImmediateInt(op2
)) {
1503 if (!types
.second().definitelyIsNumber())
1506 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1507 JITStubCall
stubCall(this, cti_op_div
);
1508 stubCall
.addArgument(op1
, regT2
);
1509 stubCall
.addArgument(op2
, regT2
);
1510 stubCall
.call(result
);
1513 void JIT::emit_op_sub(Instruction
* currentInstruction
)
1515 unsigned result
= currentInstruction
[1].u
.operand
;
1516 unsigned op1
= currentInstruction
[2].u
.operand
;
1517 unsigned op2
= currentInstruction
[3].u
.operand
;
1518 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1520 compileBinaryArithOp(op_sub
, result
, op1
, op2
, types
);
1521 emitPutVirtualRegister(result
);
1524 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1526 unsigned result
= currentInstruction
[1].u
.operand
;
1527 unsigned op1
= currentInstruction
[2].u
.operand
;
1528 unsigned op2
= currentInstruction
[3].u
.operand
;
1529 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1531 compileBinaryArithOpSlowCase(op_sub
, iter
, result
, op1
, op2
, types
, false, false);
1534 #else // USE(JSVALUE64)
1536 /* ------------------------------ BEGIN: !USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
1538 void JIT::compileBinaryArithOp(OpcodeID opcodeID
, unsigned dst
, unsigned src1
, unsigned src2
, OperandTypes types
)
1540 Structure
* numberStructure
= m_globalData
->numberStructure
.get();
1541 Jump wasJSNumberCell1
;
1542 Jump wasJSNumberCell2
;
1544 emitGetVirtualRegisters(src1
, regT0
, src2
, regT1
);
1546 if (types
.second().isReusable() && supportsFloatingPoint()) {
1547 ASSERT(types
.second().mightBeNumber());
1549 // Check op2 is a number
1550 Jump op2imm
= emitJumpIfImmediateInteger(regT1
);
1551 if (!types
.second().definitelyIsNumber()) {
1552 emitJumpSlowCaseIfNotJSCell(regT1
, src2
);
1553 addSlowCase(checkStructure(regT1
, numberStructure
));
1556 // (1) In this case src2 is a reusable number cell.
1557 // Slow case if src1 is not a number type.
1558 Jump op1imm
= emitJumpIfImmediateInteger(regT0
);
1559 if (!types
.first().definitelyIsNumber()) {
1560 emitJumpSlowCaseIfNotJSCell(regT0
, src1
);
1561 addSlowCase(checkStructure(regT0
, numberStructure
));
1564 // (1a) if we get here, src1 is also a number cell
1565 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
1566 Jump loadedDouble
= jump();
1567 // (1b) if we get here, src1 is an immediate
1569 emitFastArithImmToInt(regT0
);
1570 convertInt32ToDouble(regT0
, fpRegT0
);
1572 loadedDouble
.link(this);
1573 if (opcodeID
== op_add
)
1574 addDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
1575 else if (opcodeID
== op_sub
)
1576 subDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
1578 ASSERT(opcodeID
== op_mul
);
1579 mulDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
1582 // Store the result to the JSNumberCell and jump.
1583 storeDouble(fpRegT0
, Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)));
1585 emitPutVirtualRegister(dst
);
1586 wasJSNumberCell2
= jump();
1588 // (2) This handles cases where src2 is an immediate number.
1589 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
1591 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1592 } else if (types
.first().isReusable() && supportsFloatingPoint()) {
1593 ASSERT(types
.first().mightBeNumber());
1595 // Check op1 is a number
1596 Jump op1imm
= emitJumpIfImmediateInteger(regT0
);
1597 if (!types
.first().definitelyIsNumber()) {
1598 emitJumpSlowCaseIfNotJSCell(regT0
, src1
);
1599 addSlowCase(checkStructure(regT0
, numberStructure
));
1602 // (1) In this case src1 is a reusable number cell.
1603 // Slow case if src2 is not a number type.
1604 Jump op2imm
= emitJumpIfImmediateInteger(regT1
);
1605 if (!types
.second().definitelyIsNumber()) {
1606 emitJumpSlowCaseIfNotJSCell(regT1
, src2
);
1607 addSlowCase(checkStructure(regT1
, numberStructure
));
1610 // (1a) if we get here, src2 is also a number cell
1611 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
1612 Jump loadedDouble
= jump();
1613 // (1b) if we get here, src2 is an immediate
1615 emitFastArithImmToInt(regT1
);
1616 convertInt32ToDouble(regT1
, fpRegT1
);
1618 loadedDouble
.link(this);
1619 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
1620 if (opcodeID
== op_add
)
1621 addDouble(fpRegT1
, fpRegT0
);
1622 else if (opcodeID
== op_sub
)
1623 subDouble(fpRegT1
, fpRegT0
);
1625 ASSERT(opcodeID
== op_mul
);
1626 mulDouble(fpRegT1
, fpRegT0
);
1628 storeDouble(fpRegT0
, Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)));
1629 emitPutVirtualRegister(dst
);
1631 // Store the result to the JSNumberCell and jump.
1632 storeDouble(fpRegT0
, Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)));
1633 emitPutVirtualRegister(dst
);
1634 wasJSNumberCell1
= jump();
1636 // (2) This handles cases where src1 is an immediate number.
1637 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
1639 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1641 emitJumpSlowCaseIfNotImmediateIntegers(regT0
, regT1
, regT2
);
1643 if (opcodeID
== op_add
) {
1644 emitFastArithDeTagImmediate(regT0
);
1645 addSlowCase(branchAdd32(Overflow
, regT1
, regT0
));
1646 } else if (opcodeID
== op_sub
) {
1647 addSlowCase(branchSub32(Overflow
, regT1
, regT0
));
1648 signExtend32ToPtr(regT0
, regT0
);
1649 emitFastArithReTagImmediate(regT0
, regT0
);
1651 ASSERT(opcodeID
== op_mul
);
1652 // convert eax & edx from JSImmediates to ints, and check if either are zero
1653 emitFastArithImmToInt(regT1
);
1654 Jump op1Zero
= emitFastArithDeTagImmediateJumpIfZero(regT0
);
1655 Jump op2NonZero
= branchTest32(NonZero
, regT1
);
1657 // if either input is zero, add the two together, and check if the result is < 0.
1658 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
1660 addSlowCase(branchAdd32(Signed
, regT1
, regT2
));
1661 // Skip the above check if neither input is zero
1662 op2NonZero
.link(this);
1663 addSlowCase(branchMul32(Overflow
, regT1
, regT0
));
1664 signExtend32ToPtr(regT0
, regT0
);
1665 emitFastArithReTagImmediate(regT0
, regT0
);
1667 emitPutVirtualRegister(dst
);
1669 if (types
.second().isReusable() && supportsFloatingPoint())
1670 wasJSNumberCell2
.link(this);
1671 else if (types
.first().isReusable() && supportsFloatingPoint())
1672 wasJSNumberCell1
.link(this);
1675 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID
, Vector
<SlowCaseEntry
>::iterator
& iter
, unsigned dst
, unsigned src1
, unsigned src2
, OperandTypes types
)
1678 if (types
.second().isReusable() && supportsFloatingPoint()) {
1679 if (!types
.first().definitelyIsNumber()) {
1680 linkSlowCaseIfNotJSCell(iter
, src1
);
1683 if (!types
.second().definitelyIsNumber()) {
1684 linkSlowCaseIfNotJSCell(iter
, src2
);
1687 } else if (types
.first().isReusable() && supportsFloatingPoint()) {
1688 if (!types
.first().definitelyIsNumber()) {
1689 linkSlowCaseIfNotJSCell(iter
, src1
);
1692 if (!types
.second().definitelyIsNumber()) {
1693 linkSlowCaseIfNotJSCell(iter
, src2
);
1699 // additional entry point to handle -0 cases.
1700 if (opcodeID
== op_mul
)
1703 JITStubCall
stubCall(this, opcodeID
== op_add
? cti_op_add
: opcodeID
== op_sub
? cti_op_sub
: cti_op_mul
);
1704 stubCall
.addArgument(src1
, regT2
);
1705 stubCall
.addArgument(src2
, regT2
);
1709 void JIT::emit_op_add(Instruction
* currentInstruction
)
1711 unsigned result
= currentInstruction
[1].u
.operand
;
1712 unsigned op1
= currentInstruction
[2].u
.operand
;
1713 unsigned op2
= currentInstruction
[3].u
.operand
;
1714 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1716 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
1717 JITStubCall
stubCall(this, cti_op_add
);
1718 stubCall
.addArgument(op1
, regT2
);
1719 stubCall
.addArgument(op2
, regT2
);
1720 stubCall
.call(result
);
1724 if (isOperandConstantImmediateInt(op1
)) {
1725 emitGetVirtualRegister(op2
, regT0
);
1726 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1727 addSlowCase(branchAdd32(Overflow
, Imm32(getConstantOperandImmediateInt(op1
) << JSImmediate::IntegerPayloadShift
), regT0
));
1728 signExtend32ToPtr(regT0
, regT0
);
1729 emitPutVirtualRegister(result
);
1730 } else if (isOperandConstantImmediateInt(op2
)) {
1731 emitGetVirtualRegister(op1
, regT0
);
1732 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1733 addSlowCase(branchAdd32(Overflow
, Imm32(getConstantOperandImmediateInt(op2
) << JSImmediate::IntegerPayloadShift
), regT0
));
1734 signExtend32ToPtr(regT0
, regT0
);
1735 emitPutVirtualRegister(result
);
1737 compileBinaryArithOp(op_add
, result
, op1
, op2
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
));
1741 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1743 unsigned result
= currentInstruction
[1].u
.operand
;
1744 unsigned op1
= currentInstruction
[2].u
.operand
;
1745 unsigned op2
= currentInstruction
[3].u
.operand
;
1747 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1748 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber())
1751 if (isOperandConstantImmediateInt(op1
)) {
1752 Jump notImm
= getSlowCase(iter
);
1754 sub32(Imm32(getConstantOperandImmediateInt(op1
) << JSImmediate::IntegerPayloadShift
), regT0
);
1756 JITStubCall
stubCall(this, cti_op_add
);
1757 stubCall
.addArgument(op1
, regT2
);
1758 stubCall
.addArgument(regT0
);
1759 stubCall
.call(result
);
1760 } else if (isOperandConstantImmediateInt(op2
)) {
1761 Jump notImm
= getSlowCase(iter
);
1763 sub32(Imm32(getConstantOperandImmediateInt(op2
) << JSImmediate::IntegerPayloadShift
), regT0
);
1765 JITStubCall
stubCall(this, cti_op_add
);
1766 stubCall
.addArgument(regT0
);
1767 stubCall
.addArgument(op2
, regT2
);
1768 stubCall
.call(result
);
1770 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1771 ASSERT(types
.first().mightBeNumber() && types
.second().mightBeNumber());
1772 compileBinaryArithOpSlowCase(op_add
, iter
, result
, op1
, op2
, types
);
1776 void JIT::emit_op_mul(Instruction
* currentInstruction
)
1778 unsigned result
= currentInstruction
[1].u
.operand
;
1779 unsigned op1
= currentInstruction
[2].u
.operand
;
1780 unsigned op2
= currentInstruction
[3].u
.operand
;
1782 // For now, only plant a fast int case if the constant operand is greater than zero.
1784 if (isOperandConstantImmediateInt(op1
) && ((value
= getConstantOperandImmediateInt(op1
)) > 0)) {
1785 emitGetVirtualRegister(op2
, regT0
);
1786 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1787 emitFastArithDeTagImmediate(regT0
);
1788 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT0
));
1789 signExtend32ToPtr(regT0
, regT0
);
1790 emitFastArithReTagImmediate(regT0
, regT0
);
1791 emitPutVirtualRegister(result
);
1792 } else if (isOperandConstantImmediateInt(op2
) && ((value
= getConstantOperandImmediateInt(op2
)) > 0)) {
1793 emitGetVirtualRegister(op1
, regT0
);
1794 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1795 emitFastArithDeTagImmediate(regT0
);
1796 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT0
));
1797 signExtend32ToPtr(regT0
, regT0
);
1798 emitFastArithReTagImmediate(regT0
, regT0
);
1799 emitPutVirtualRegister(result
);
1801 compileBinaryArithOp(op_mul
, result
, op1
, op2
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
));
1804 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1806 unsigned result
= currentInstruction
[1].u
.operand
;
1807 unsigned op1
= currentInstruction
[2].u
.operand
;
1808 unsigned op2
= currentInstruction
[3].u
.operand
;
1810 if ((isOperandConstantImmediateInt(op1
) && (getConstantOperandImmediateInt(op1
) > 0))
1811 || (isOperandConstantImmediateInt(op2
) && (getConstantOperandImmediateInt(op2
) > 0))) {
1814 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1815 JITStubCall
stubCall(this, cti_op_mul
);
1816 stubCall
.addArgument(op1
, regT2
);
1817 stubCall
.addArgument(op2
, regT2
);
1818 stubCall
.call(result
);
1820 compileBinaryArithOpSlowCase(op_mul
, iter
, result
, op1
, op2
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
));
1823 void JIT::emit_op_sub(Instruction
* currentInstruction
)
1825 compileBinaryArithOp(op_sub
, currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
));
1828 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1830 compileBinaryArithOpSlowCase(op_sub
, iter
, currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
));
1833 #endif // USE(JSVALUE64)
1835 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
1837 #endif // !USE(JSVALUE32_64)
1841 #endif // ENABLE(JIT)