2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "CodeBlock.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
37 #include "JSFunction.h"
38 #include "Interpreter.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
50 void JIT::emit_op_negate(Instruction
* currentInstruction
)
52 unsigned dst
= currentInstruction
[1].u
.operand
;
53 unsigned src
= currentInstruction
[2].u
.operand
;
55 emitLoad(src
, regT1
, regT0
);
57 Jump srcNotInt
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
58 addSlowCase(branchTest32(Zero
, regT0
, TrustedImm32(0x7fffffff)));
60 emitStoreInt32(dst
, regT0
, (dst
== src
));
65 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
67 xor32(TrustedImm32(1 << 31), regT1
);
68 store32(regT1
, tagFor(dst
));
70 store32(regT0
, payloadFor(dst
));
75 void JIT::emitSlow_op_negate(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
77 unsigned dst
= currentInstruction
[1].u
.operand
;
79 linkSlowCase(iter
); // 0x7fffffff check
80 linkSlowCase(iter
); // double check
82 JITStubCall
stubCall(this, cti_op_negate
);
83 stubCall
.addArgument(regT1
, regT0
);
87 void JIT::emit_op_jnless(Instruction
* currentInstruction
)
89 unsigned op1
= currentInstruction
[1].u
.operand
;
90 unsigned op2
= currentInstruction
[2].u
.operand
;
91 unsigned target
= currentInstruction
[3].u
.operand
;
97 if (isOperandConstantImmediateChar(op1
)) {
98 emitLoad(op2
, regT1
, regT0
);
99 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
101 emitLoadCharacterString(regT0
, regT0
, failures
);
102 addSlowCase(failures
);
103 addJump(branch32(LessThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
106 if (isOperandConstantImmediateChar(op2
)) {
107 emitLoad(op1
, regT1
, regT0
);
108 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
110 emitLoadCharacterString(regT0
, regT0
, failures
);
111 addSlowCase(failures
);
112 addJump(branch32(GreaterThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
115 if (isOperandConstantImmediateInt(op1
)) {
117 emitLoad(op2
, regT3
, regT2
);
118 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
119 addJump(branch32(LessThanOrEqual
, regT2
, Imm32(getConstantOperand(op1
).asInt32())), target
);
120 } else if (isOperandConstantImmediateInt(op2
)) {
121 emitLoad(op1
, regT1
, regT0
);
122 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
123 addJump(branch32(GreaterThanOrEqual
, regT0
, Imm32(getConstantOperand(op2
).asInt32())), target
);
125 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
126 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
127 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
128 addJump(branch32(GreaterThanOrEqual
, regT0
, regT2
), target
);
131 if (!supportsFloatingPoint()) {
132 addSlowCase(notInt32Op1
);
133 addSlowCase(notInt32Op2
);
139 emitBinaryDoubleOp(op_jnless
, target
, op1
, op2
, OperandTypes(), notInt32Op1
, notInt32Op2
, !isOperandConstantImmediateInt(op1
), isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
));
143 void JIT::emitSlow_op_jnless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
145 unsigned op1
= currentInstruction
[1].u
.operand
;
146 unsigned op2
= currentInstruction
[2].u
.operand
;
147 unsigned target
= currentInstruction
[3].u
.operand
;
149 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
155 if (!supportsFloatingPoint()) {
156 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
157 linkSlowCase(iter
); // int32 check
158 linkSlowCase(iter
); // int32 check
160 if (!isOperandConstantImmediateInt(op1
)) {
161 linkSlowCase(iter
); // double check
162 linkSlowCase(iter
); // int32 check
164 if (isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
))
165 linkSlowCase(iter
); // double check
169 JITStubCall
stubCall(this, cti_op_jless
);
170 stubCall
.addArgument(op1
);
171 stubCall
.addArgument(op2
);
173 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
);
176 void JIT::emit_op_jless(Instruction
* currentInstruction
)
178 unsigned op1
= currentInstruction
[1].u
.operand
;
179 unsigned op2
= currentInstruction
[2].u
.operand
;
180 unsigned target
= currentInstruction
[3].u
.operand
;
182 JumpList notInt32Op1
;
183 JumpList notInt32Op2
;
186 if (isOperandConstantImmediateChar(op1
)) {
187 emitLoad(op2
, regT1
, regT0
);
188 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
190 emitLoadCharacterString(regT0
, regT0
, failures
);
191 addSlowCase(failures
);
192 addJump(branch32(GreaterThan
, regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
195 if (isOperandConstantImmediateChar(op2
)) {
196 emitLoad(op1
, regT1
, regT0
);
197 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
199 emitLoadCharacterString(regT0
, regT0
, failures
);
200 addSlowCase(failures
);
201 addJump(branch32(LessThan
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
204 if (isOperandConstantImmediateInt(op1
)) {
205 emitLoad(op2
, regT3
, regT2
);
206 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
207 addJump(branch32(GreaterThan
, regT2
, Imm32(getConstantOperand(op1
).asInt32())), target
);
208 } else if (isOperandConstantImmediateInt(op2
)) {
209 emitLoad(op1
, regT1
, regT0
);
210 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
211 addJump(branch32(LessThan
, regT0
, Imm32(getConstantOperand(op2
).asInt32())), target
);
213 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
214 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
215 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
216 addJump(branch32(LessThan
, regT0
, regT2
), target
);
219 if (!supportsFloatingPoint()) {
220 addSlowCase(notInt32Op1
);
221 addSlowCase(notInt32Op2
);
227 emitBinaryDoubleOp(op_jless
, target
, op1
, op2
, OperandTypes(), notInt32Op1
, notInt32Op2
, !isOperandConstantImmediateInt(op1
), isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
));
231 void JIT::emitSlow_op_jless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
233 unsigned op1
= currentInstruction
[1].u
.operand
;
234 unsigned op2
= currentInstruction
[2].u
.operand
;
235 unsigned target
= currentInstruction
[3].u
.operand
;
237 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
243 if (!supportsFloatingPoint()) {
244 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
245 linkSlowCase(iter
); // int32 check
246 linkSlowCase(iter
); // int32 check
248 if (!isOperandConstantImmediateInt(op1
)) {
249 linkSlowCase(iter
); // double check
250 linkSlowCase(iter
); // int32 check
252 if (isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
))
253 linkSlowCase(iter
); // double check
256 JITStubCall
stubCall(this, cti_op_jless
);
257 stubCall
.addArgument(op1
);
258 stubCall
.addArgument(op2
);
260 emitJumpSlowToHot(branchTest32(NonZero
, regT0
), target
);
263 void JIT::emit_op_jlesseq(Instruction
* currentInstruction
, bool invert
)
265 unsigned op1
= currentInstruction
[1].u
.operand
;
266 unsigned op2
= currentInstruction
[2].u
.operand
;
267 unsigned target
= currentInstruction
[3].u
.operand
;
269 JumpList notInt32Op1
;
270 JumpList notInt32Op2
;
273 if (isOperandConstantImmediateChar(op1
)) {
274 emitLoad(op2
, regT1
, regT0
);
275 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
277 emitLoadCharacterString(regT0
, regT0
, failures
);
278 addSlowCase(failures
);
279 addJump(branch32(invert
? LessThan
: GreaterThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
282 if (isOperandConstantImmediateChar(op2
)) {
283 emitLoad(op1
, regT1
, regT0
);
284 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
286 emitLoadCharacterString(regT0
, regT0
, failures
);
287 addSlowCase(failures
);
288 addJump(branch32(invert
? GreaterThan
: LessThanOrEqual
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
291 if (isOperandConstantImmediateInt(op1
)) {
292 emitLoad(op2
, regT3
, regT2
);
293 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
294 addJump(branch32(invert
? LessThan
: GreaterThanOrEqual
, regT2
, Imm32(getConstantOperand(op1
).asInt32())), target
);
295 } else if (isOperandConstantImmediateInt(op2
)) {
296 emitLoad(op1
, regT1
, regT0
);
297 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
298 addJump(branch32(invert
? GreaterThan
: LessThanOrEqual
, regT0
, Imm32(getConstantOperand(op2
).asInt32())), target
);
300 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
301 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
302 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
303 addJump(branch32(invert
? GreaterThan
: LessThanOrEqual
, regT0
, regT2
), target
);
306 if (!supportsFloatingPoint()) {
307 addSlowCase(notInt32Op1
);
308 addSlowCase(notInt32Op2
);
314 emitBinaryDoubleOp(invert
? op_jnlesseq
: op_jlesseq
, target
, op1
, op2
, OperandTypes(), notInt32Op1
, notInt32Op2
, !isOperandConstantImmediateInt(op1
), isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
));
318 void JIT::emitSlow_op_jlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool invert
)
320 unsigned op1
= currentInstruction
[1].u
.operand
;
321 unsigned op2
= currentInstruction
[2].u
.operand
;
322 unsigned target
= currentInstruction
[3].u
.operand
;
324 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
330 if (!supportsFloatingPoint()) {
331 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
332 linkSlowCase(iter
); // int32 check
333 linkSlowCase(iter
); // int32 check
335 if (!isOperandConstantImmediateInt(op1
)) {
336 linkSlowCase(iter
); // double check
337 linkSlowCase(iter
); // int32 check
339 if (isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
))
340 linkSlowCase(iter
); // double check
344 JITStubCall
stubCall(this, cti_op_jlesseq
);
345 stubCall
.addArgument(op1
);
346 stubCall
.addArgument(op2
);
348 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
351 void JIT::emit_op_jnlesseq(Instruction
* currentInstruction
)
353 emit_op_jlesseq(currentInstruction
, true);
356 void JIT::emitSlow_op_jnlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
358 emitSlow_op_jlesseq(currentInstruction
, iter
, true);
363 void JIT::emit_op_lshift(Instruction
* currentInstruction
)
365 unsigned dst
= currentInstruction
[1].u
.operand
;
366 unsigned op1
= currentInstruction
[2].u
.operand
;
367 unsigned op2
= currentInstruction
[3].u
.operand
;
369 if (isOperandConstantImmediateInt(op2
)) {
370 emitLoad(op1
, regT1
, regT0
);
371 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
372 lshift32(Imm32(getConstantOperand(op2
).asInt32()), regT0
);
373 emitStoreInt32(dst
, regT0
, dst
== op1
);
377 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
378 if (!isOperandConstantImmediateInt(op1
))
379 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
380 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
381 lshift32(regT2
, regT0
);
382 emitStoreInt32(dst
, regT0
, dst
== op1
|| dst
== op2
);
385 void JIT::emitSlow_op_lshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
387 unsigned dst
= currentInstruction
[1].u
.operand
;
388 unsigned op1
= currentInstruction
[2].u
.operand
;
389 unsigned op2
= currentInstruction
[3].u
.operand
;
391 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
392 linkSlowCase(iter
); // int32 check
393 linkSlowCase(iter
); // int32 check
395 JITStubCall
stubCall(this, cti_op_lshift
);
396 stubCall
.addArgument(op1
);
397 stubCall
.addArgument(op2
);
401 // RightShift (>>) and UnsignedRightShift (>>>) helper
403 void JIT::emitRightShift(Instruction
* currentInstruction
, bool isUnsigned
)
405 unsigned dst
= currentInstruction
[1].u
.operand
;
406 unsigned op1
= currentInstruction
[2].u
.operand
;
407 unsigned op2
= currentInstruction
[3].u
.operand
;
409 // Slow case of rshift makes assumptions about what registers hold the
410 // shift arguments, so any changes must be updated there as well.
411 if (isOperandConstantImmediateInt(op2
)) {
412 emitLoad(op1
, regT1
, regT0
);
413 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
414 int shift
= getConstantOperand(op2
).asInt32();
417 urshift32(Imm32(shift
& 0x1f), regT0
);
418 // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
419 // a toUint conversion, which can result in a value we can represent
420 // as an immediate int.
421 if (shift
< 0 || !(shift
& 31))
422 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
423 } else if (shift
) { // signed right shift by zero is simply toInt conversion
424 rshift32(Imm32(shift
& 0x1f), regT0
);
426 emitStoreInt32(dst
, regT0
, dst
== op1
);
430 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
431 if (!isOperandConstantImmediateInt(op1
))
432 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
433 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
435 urshift32(regT2
, regT0
);
436 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
438 rshift32(regT2
, regT0
);
439 emitStoreInt32(dst
, regT0
, dst
== op1
|| dst
== op2
);
442 void JIT::emitRightShiftSlowCase(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isUnsigned
)
444 unsigned dst
= currentInstruction
[1].u
.operand
;
445 unsigned op1
= currentInstruction
[2].u
.operand
;
446 unsigned op2
= currentInstruction
[3].u
.operand
;
447 if (isOperandConstantImmediateInt(op2
)) {
448 int shift
= getConstantOperand(op2
).asInt32();
450 linkSlowCase(iter
); // int32 check
451 if (supportsFloatingPointTruncate()) {
453 failures
.append(branch32(AboveOrEqual
, regT1
, TrustedImm32(JSValue::LowestTag
)));
454 emitLoadDouble(op1
, fpRegT0
);
455 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
458 urshift32(Imm32(shift
& 0x1f), regT0
);
459 if (shift
< 0 || !(shift
& 31))
460 failures
.append(branch32(LessThan
, regT0
, TrustedImm32(0)));
462 rshift32(Imm32(shift
& 0x1f), regT0
);
463 emitStoreInt32(dst
, regT0
, false);
464 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
467 if (isUnsigned
&& (shift
< 0 || !(shift
& 31)))
468 linkSlowCase(iter
); // failed to box in hot path
472 if (!isOperandConstantImmediateInt(op1
)) {
473 linkSlowCase(iter
); // int32 check -- op1 is not an int
474 if (supportsFloatingPointTruncate()) {
475 Jump notDouble
= branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)); // op1 is not a double
476 emitLoadDouble(op1
, fpRegT0
);
477 Jump notInt
= branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)); // op2 is not an int
478 Jump cantTruncate
= branchTruncateDoubleToInt32(fpRegT0
, regT0
);
480 urshift32(regT2
, regT0
);
482 rshift32(regT2
, regT0
);
483 emitStoreInt32(dst
, regT0
, false);
484 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
485 notDouble
.link(this);
487 cantTruncate
.link(this);
491 linkSlowCase(iter
); // int32 check - op2 is not an int
493 linkSlowCase(iter
); // Can't represent unsigned result as an immediate
496 JITStubCall
stubCall(this, isUnsigned
? cti_op_urshift
: cti_op_rshift
);
497 stubCall
.addArgument(op1
);
498 stubCall
.addArgument(op2
);
504 void JIT::emit_op_rshift(Instruction
* currentInstruction
)
506 emitRightShift(currentInstruction
, false);
509 void JIT::emitSlow_op_rshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
511 emitRightShiftSlowCase(currentInstruction
, iter
, false);
514 // UnsignedRightShift (>>>)
516 void JIT::emit_op_urshift(Instruction
* currentInstruction
)
518 emitRightShift(currentInstruction
, true);
521 void JIT::emitSlow_op_urshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
523 emitRightShiftSlowCase(currentInstruction
, iter
, true);
528 void JIT::emit_op_bitand(Instruction
* currentInstruction
)
530 unsigned dst
= currentInstruction
[1].u
.operand
;
531 unsigned op1
= currentInstruction
[2].u
.operand
;
532 unsigned op2
= currentInstruction
[3].u
.operand
;
536 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
537 emitLoad(op
, regT1
, regT0
);
538 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
539 and32(Imm32(constant
), regT0
);
540 emitStoreInt32(dst
, regT0
, (op
== dst
));
544 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
545 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
546 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
548 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
551 void JIT::emitSlow_op_bitand(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
553 unsigned dst
= currentInstruction
[1].u
.operand
;
554 unsigned op1
= currentInstruction
[2].u
.operand
;
555 unsigned op2
= currentInstruction
[3].u
.operand
;
557 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
558 linkSlowCase(iter
); // int32 check
559 linkSlowCase(iter
); // int32 check
561 JITStubCall
stubCall(this, cti_op_bitand
);
562 stubCall
.addArgument(op1
);
563 stubCall
.addArgument(op2
);
569 void JIT::emit_op_bitor(Instruction
* currentInstruction
)
571 unsigned dst
= currentInstruction
[1].u
.operand
;
572 unsigned op1
= currentInstruction
[2].u
.operand
;
573 unsigned op2
= currentInstruction
[3].u
.operand
;
577 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
578 emitLoad(op
, regT1
, regT0
);
579 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
580 or32(Imm32(constant
), regT0
);
581 emitStoreInt32(dst
, regT0
, (op
== dst
));
585 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
586 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
587 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
589 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
592 void JIT::emitSlow_op_bitor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
594 unsigned dst
= currentInstruction
[1].u
.operand
;
595 unsigned op1
= currentInstruction
[2].u
.operand
;
596 unsigned op2
= currentInstruction
[3].u
.operand
;
598 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
599 linkSlowCase(iter
); // int32 check
600 linkSlowCase(iter
); // int32 check
602 JITStubCall
stubCall(this, cti_op_bitor
);
603 stubCall
.addArgument(op1
);
604 stubCall
.addArgument(op2
);
610 void JIT::emit_op_bitxor(Instruction
* currentInstruction
)
612 unsigned dst
= currentInstruction
[1].u
.operand
;
613 unsigned op1
= currentInstruction
[2].u
.operand
;
614 unsigned op2
= currentInstruction
[3].u
.operand
;
618 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
619 emitLoad(op
, regT1
, regT0
);
620 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
621 xor32(Imm32(constant
), regT0
);
622 emitStoreInt32(dst
, regT0
, (op
== dst
));
626 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
627 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
628 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
630 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
633 void JIT::emitSlow_op_bitxor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
635 unsigned dst
= currentInstruction
[1].u
.operand
;
636 unsigned op1
= currentInstruction
[2].u
.operand
;
637 unsigned op2
= currentInstruction
[3].u
.operand
;
639 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
640 linkSlowCase(iter
); // int32 check
641 linkSlowCase(iter
); // int32 check
643 JITStubCall
stubCall(this, cti_op_bitxor
);
644 stubCall
.addArgument(op1
);
645 stubCall
.addArgument(op2
);
651 void JIT::emit_op_bitnot(Instruction
* currentInstruction
)
653 unsigned dst
= currentInstruction
[1].u
.operand
;
654 unsigned src
= currentInstruction
[2].u
.operand
;
656 emitLoad(src
, regT1
, regT0
);
657 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
660 emitStoreInt32(dst
, regT0
, (dst
== src
));
663 void JIT::emitSlow_op_bitnot(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
665 unsigned dst
= currentInstruction
[1].u
.operand
;
667 linkSlowCase(iter
); // int32 check
669 JITStubCall
stubCall(this, cti_op_bitnot
);
670 stubCall
.addArgument(regT1
, regT0
);
676 void JIT::emit_op_post_inc(Instruction
* currentInstruction
)
678 unsigned dst
= currentInstruction
[1].u
.operand
;
679 unsigned srcDst
= currentInstruction
[2].u
.operand
;
681 emitLoad(srcDst
, regT1
, regT0
);
682 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
684 if (dst
== srcDst
) // x = x++ is a noop for ints.
687 emitStoreInt32(dst
, regT0
);
689 addSlowCase(branchAdd32(Overflow
, TrustedImm32(1), regT0
));
690 emitStoreInt32(srcDst
, regT0
, true);
693 void JIT::emitSlow_op_post_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
695 unsigned dst
= currentInstruction
[1].u
.operand
;
696 unsigned srcDst
= currentInstruction
[2].u
.operand
;
698 linkSlowCase(iter
); // int32 check
700 linkSlowCase(iter
); // overflow check
702 JITStubCall
stubCall(this, cti_op_post_inc
);
703 stubCall
.addArgument(srcDst
);
704 stubCall
.addArgument(Imm32(srcDst
));
710 void JIT::emit_op_post_dec(Instruction
* currentInstruction
)
712 unsigned dst
= currentInstruction
[1].u
.operand
;
713 unsigned srcDst
= currentInstruction
[2].u
.operand
;
715 emitLoad(srcDst
, regT1
, regT0
);
716 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
718 if (dst
== srcDst
) // x = x-- is a noop for ints.
721 emitStoreInt32(dst
, regT0
);
723 addSlowCase(branchSub32(Overflow
, TrustedImm32(1), regT0
));
724 emitStoreInt32(srcDst
, regT0
, true);
727 void JIT::emitSlow_op_post_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
729 unsigned dst
= currentInstruction
[1].u
.operand
;
730 unsigned srcDst
= currentInstruction
[2].u
.operand
;
732 linkSlowCase(iter
); // int32 check
734 linkSlowCase(iter
); // overflow check
736 JITStubCall
stubCall(this, cti_op_post_dec
);
737 stubCall
.addArgument(srcDst
);
738 stubCall
.addArgument(TrustedImm32(srcDst
));
744 void JIT::emit_op_pre_inc(Instruction
* currentInstruction
)
746 unsigned srcDst
= currentInstruction
[1].u
.operand
;
748 emitLoad(srcDst
, regT1
, regT0
);
750 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
751 addSlowCase(branchAdd32(Overflow
, TrustedImm32(1), regT0
));
752 emitStoreInt32(srcDst
, regT0
, true);
755 void JIT::emitSlow_op_pre_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
757 unsigned srcDst
= currentInstruction
[1].u
.operand
;
759 linkSlowCase(iter
); // int32 check
760 linkSlowCase(iter
); // overflow check
762 JITStubCall
stubCall(this, cti_op_pre_inc
);
763 stubCall
.addArgument(srcDst
);
764 stubCall
.call(srcDst
);
769 void JIT::emit_op_pre_dec(Instruction
* currentInstruction
)
771 unsigned srcDst
= currentInstruction
[1].u
.operand
;
773 emitLoad(srcDst
, regT1
, regT0
);
775 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
776 addSlowCase(branchSub32(Overflow
, TrustedImm32(1), regT0
));
777 emitStoreInt32(srcDst
, regT0
, true);
780 void JIT::emitSlow_op_pre_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
782 unsigned srcDst
= currentInstruction
[1].u
.operand
;
784 linkSlowCase(iter
); // int32 check
785 linkSlowCase(iter
); // overflow check
787 JITStubCall
stubCall(this, cti_op_pre_dec
);
788 stubCall
.addArgument(srcDst
);
789 stubCall
.call(srcDst
);
794 void JIT::emit_op_add(Instruction
* currentInstruction
)
796 unsigned dst
= currentInstruction
[1].u
.operand
;
797 unsigned op1
= currentInstruction
[2].u
.operand
;
798 unsigned op2
= currentInstruction
[3].u
.operand
;
799 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
801 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
802 JITStubCall
stubCall(this, cti_op_add
);
803 stubCall
.addArgument(op1
);
804 stubCall
.addArgument(op2
);
809 JumpList notInt32Op1
;
810 JumpList notInt32Op2
;
814 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
815 emitAdd32Constant(dst
, op
, constant
, op
== op1
? types
.first() : types
.second());
819 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
820 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
821 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
824 addSlowCase(branchAdd32(Overflow
, regT2
, regT0
));
825 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
827 if (!supportsFloatingPoint()) {
828 addSlowCase(notInt32Op1
);
829 addSlowCase(notInt32Op2
);
835 emitBinaryDoubleOp(op_add
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
839 void JIT::emitAdd32Constant(unsigned dst
, unsigned op
, int32_t constant
, ResultType opType
)
842 emitLoad(op
, regT1
, regT0
);
843 Jump notInt32
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
844 addSlowCase(branchAdd32(Overflow
, Imm32(constant
), regT0
));
845 emitStoreInt32(dst
, regT0
, (op
== dst
));
848 if (!supportsFloatingPoint()) {
849 addSlowCase(notInt32
);
855 if (!opType
.definitelyIsNumber())
856 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
857 move(Imm32(constant
), regT2
);
858 convertInt32ToDouble(regT2
, fpRegT0
);
859 emitLoadDouble(op
, fpRegT1
);
860 addDouble(fpRegT1
, fpRegT0
);
861 emitStoreDouble(dst
, fpRegT0
);
866 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
868 unsigned dst
= currentInstruction
[1].u
.operand
;
869 unsigned op1
= currentInstruction
[2].u
.operand
;
870 unsigned op2
= currentInstruction
[3].u
.operand
;
871 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
873 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber())
878 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
879 linkSlowCase(iter
); // overflow check
881 if (!supportsFloatingPoint())
882 linkSlowCase(iter
); // non-sse case
884 ResultType opType
= op
== op1
? types
.first() : types
.second();
885 if (!opType
.definitelyIsNumber())
886 linkSlowCase(iter
); // double check
889 linkSlowCase(iter
); // overflow check
891 if (!supportsFloatingPoint()) {
892 linkSlowCase(iter
); // int32 check
893 linkSlowCase(iter
); // int32 check
895 if (!types
.first().definitelyIsNumber())
896 linkSlowCase(iter
); // double check
898 if (!types
.second().definitelyIsNumber()) {
899 linkSlowCase(iter
); // int32 check
900 linkSlowCase(iter
); // double check
905 JITStubCall
stubCall(this, cti_op_add
);
906 stubCall
.addArgument(op1
);
907 stubCall
.addArgument(op2
);
913 void JIT::emit_op_sub(Instruction
* currentInstruction
)
915 unsigned dst
= currentInstruction
[1].u
.operand
;
916 unsigned op1
= currentInstruction
[2].u
.operand
;
917 unsigned op2
= currentInstruction
[3].u
.operand
;
918 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
920 JumpList notInt32Op1
;
921 JumpList notInt32Op2
;
923 if (isOperandConstantImmediateInt(op2
)) {
924 emitSub32Constant(dst
, op1
, getConstantOperand(op2
).asInt32(), types
.first());
928 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
929 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
930 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
933 addSlowCase(branchSub32(Overflow
, regT2
, regT0
));
934 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
936 if (!supportsFloatingPoint()) {
937 addSlowCase(notInt32Op1
);
938 addSlowCase(notInt32Op2
);
944 emitBinaryDoubleOp(op_sub
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
948 void JIT::emitSub32Constant(unsigned dst
, unsigned op
, int32_t constant
, ResultType opType
)
951 emitLoad(op
, regT1
, regT0
);
952 Jump notInt32
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
953 addSlowCase(branchSub32(Overflow
, Imm32(constant
), regT0
));
954 emitStoreInt32(dst
, regT0
, (op
== dst
));
957 if (!supportsFloatingPoint()) {
958 addSlowCase(notInt32
);
964 if (!opType
.definitelyIsNumber())
965 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
966 move(Imm32(constant
), regT2
);
967 convertInt32ToDouble(regT2
, fpRegT0
);
968 emitLoadDouble(op
, fpRegT1
);
969 subDouble(fpRegT0
, fpRegT1
);
970 emitStoreDouble(dst
, fpRegT1
);
975 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
977 unsigned dst
= currentInstruction
[1].u
.operand
;
978 unsigned op1
= currentInstruction
[2].u
.operand
;
979 unsigned op2
= currentInstruction
[3].u
.operand
;
980 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
982 if (isOperandConstantImmediateInt(op2
)) {
983 linkSlowCase(iter
); // overflow check
985 if (!supportsFloatingPoint() || !types
.first().definitelyIsNumber())
986 linkSlowCase(iter
); // int32 or double check
988 linkSlowCase(iter
); // overflow check
990 if (!supportsFloatingPoint()) {
991 linkSlowCase(iter
); // int32 check
992 linkSlowCase(iter
); // int32 check
994 if (!types
.first().definitelyIsNumber())
995 linkSlowCase(iter
); // double check
997 if (!types
.second().definitelyIsNumber()) {
998 linkSlowCase(iter
); // int32 check
999 linkSlowCase(iter
); // double check
1004 JITStubCall
stubCall(this, cti_op_sub
);
1005 stubCall
.addArgument(op1
);
1006 stubCall
.addArgument(op2
);
1010 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID
, unsigned dst
, unsigned op1
, unsigned op2
, OperandTypes types
, JumpList
& notInt32Op1
, JumpList
& notInt32Op2
, bool op1IsInRegisters
, bool op2IsInRegisters
)
1014 if (!notInt32Op1
.empty()) {
1015 // Double case 1: Op1 is not int32; Op2 is unknown.
1016 notInt32Op1
.link(this);
1018 ASSERT(op1IsInRegisters
);
1020 // Verify Op1 is double.
1021 if (!types
.first().definitelyIsNumber())
1022 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
1024 if (!op2IsInRegisters
)
1025 emitLoad(op2
, regT3
, regT2
);
1027 Jump doubleOp2
= branch32(Below
, regT3
, TrustedImm32(JSValue::LowestTag
));
1029 if (!types
.second().definitelyIsNumber())
1030 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
1032 convertInt32ToDouble(regT2
, fpRegT0
);
1033 Jump doTheMath
= jump();
1035 // Load Op2 as double into double register.
1036 doubleOp2
.link(this);
1037 emitLoadDouble(op2
, fpRegT0
);
1040 doTheMath
.link(this);
1043 emitLoadDouble(op1
, fpRegT2
);
1044 mulDouble(fpRegT2
, fpRegT0
);
1045 emitStoreDouble(dst
, fpRegT0
);
1048 emitLoadDouble(op1
, fpRegT2
);
1049 addDouble(fpRegT2
, fpRegT0
);
1050 emitStoreDouble(dst
, fpRegT0
);
1053 emitLoadDouble(op1
, fpRegT1
);
1054 subDouble(fpRegT0
, fpRegT1
);
1055 emitStoreDouble(dst
, fpRegT1
);
1058 emitLoadDouble(op1
, fpRegT1
);
1059 divDouble(fpRegT0
, fpRegT1
);
1060 emitStoreDouble(dst
, fpRegT1
);
1063 emitLoadDouble(op1
, fpRegT2
);
1064 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT0
, fpRegT2
), dst
);
1067 emitLoadDouble(op1
, fpRegT2
);
1068 addJump(branchDouble(DoubleLessThan
, fpRegT2
, fpRegT0
), dst
);
1071 emitLoadDouble(op1
, fpRegT2
);
1072 addJump(branchDouble(DoubleLessThanOrEqual
, fpRegT2
, fpRegT0
), dst
);
1075 emitLoadDouble(op1
, fpRegT2
);
1076 addJump(branchDouble(DoubleLessThanOrUnordered
, fpRegT0
, fpRegT2
), dst
);
1079 ASSERT_NOT_REACHED();
1082 if (!notInt32Op2
.empty())
1086 if (!notInt32Op2
.empty()) {
1087 // Double case 2: Op1 is int32; Op2 is not int32.
1088 notInt32Op2
.link(this);
1090 ASSERT(op2IsInRegisters
);
1092 if (!op1IsInRegisters
)
1093 emitLoadPayload(op1
, regT0
);
1095 convertInt32ToDouble(regT0
, fpRegT0
);
1097 // Verify op2 is double.
1098 if (!types
.second().definitelyIsNumber())
1099 addSlowCase(branch32(Above
, regT3
, TrustedImm32(JSValue::LowestTag
)));
1104 emitLoadDouble(op2
, fpRegT2
);
1105 mulDouble(fpRegT2
, fpRegT0
);
1106 emitStoreDouble(dst
, fpRegT0
);
1109 emitLoadDouble(op2
, fpRegT2
);
1110 addDouble(fpRegT2
, fpRegT0
);
1111 emitStoreDouble(dst
, fpRegT0
);
1114 emitLoadDouble(op2
, fpRegT2
);
1115 subDouble(fpRegT2
, fpRegT0
);
1116 emitStoreDouble(dst
, fpRegT0
);
1119 emitLoadDouble(op2
, fpRegT2
);
1120 divDouble(fpRegT2
, fpRegT0
);
1121 emitStoreDouble(dst
, fpRegT0
);
1124 emitLoadDouble(op2
, fpRegT1
);
1125 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), dst
);
1128 emitLoadDouble(op2
, fpRegT1
);
1129 addJump(branchDouble(DoubleLessThan
, fpRegT0
, fpRegT1
), dst
);
1132 emitLoadDouble(op2
, fpRegT1
);
1133 addJump(branchDouble(DoubleLessThanOrUnordered
, fpRegT1
, fpRegT0
), dst
);
1136 emitLoadDouble(op2
, fpRegT1
);
1137 addJump(branchDouble(DoubleLessThanOrEqual
, fpRegT0
, fpRegT1
), dst
);
1140 ASSERT_NOT_REACHED();
1147 // Multiplication (*)
1149 void JIT::emit_op_mul(Instruction
* currentInstruction
)
1151 unsigned dst
= currentInstruction
[1].u
.operand
;
1152 unsigned op1
= currentInstruction
[2].u
.operand
;
1153 unsigned op2
= currentInstruction
[3].u
.operand
;
1154 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1156 JumpList notInt32Op1
;
1157 JumpList notInt32Op2
;
1159 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
1160 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1161 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
1165 addSlowCase(branchMul32(Overflow
, regT2
, regT0
));
1166 addSlowCase(branchTest32(Zero
, regT0
));
1167 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
1169 if (!supportsFloatingPoint()) {
1170 addSlowCase(notInt32Op1
);
1171 addSlowCase(notInt32Op2
);
1177 emitBinaryDoubleOp(op_mul
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
1181 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1183 unsigned dst
= currentInstruction
[1].u
.operand
;
1184 unsigned op1
= currentInstruction
[2].u
.operand
;
1185 unsigned op2
= currentInstruction
[3].u
.operand
;
1186 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1188 Jump overflow
= getSlowCase(iter
); // overflow check
1189 linkSlowCase(iter
); // zero result check
1191 Jump negZero
= branchOr32(Signed
, regT2
, regT3
);
1192 emitStoreInt32(dst
, TrustedImm32(0), (op1
== dst
|| op2
== dst
));
1194 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul
));
1197 overflow
.link(this);
1199 if (!supportsFloatingPoint()) {
1200 linkSlowCase(iter
); // int32 check
1201 linkSlowCase(iter
); // int32 check
1204 if (supportsFloatingPoint()) {
1205 if (!types
.first().definitelyIsNumber())
1206 linkSlowCase(iter
); // double check
1208 if (!types
.second().definitelyIsNumber()) {
1209 linkSlowCase(iter
); // int32 check
1210 linkSlowCase(iter
); // double check
1214 Label
jitStubCall(this);
1215 JITStubCall
stubCall(this, cti_op_mul
);
1216 stubCall
.addArgument(op1
);
1217 stubCall
.addArgument(op2
);
1223 void JIT::emit_op_div(Instruction
* currentInstruction
)
1225 unsigned dst
= currentInstruction
[1].u
.operand
;
1226 unsigned op1
= currentInstruction
[2].u
.operand
;
1227 unsigned op2
= currentInstruction
[3].u
.operand
;
1228 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1230 if (!supportsFloatingPoint()) {
1231 addSlowCase(jump());
1236 JumpList notInt32Op1
;
1237 JumpList notInt32Op2
;
1241 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
1243 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1244 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
1246 convertInt32ToDouble(regT0
, fpRegT0
);
1247 convertInt32ToDouble(regT2
, fpRegT1
);
1248 divDouble(fpRegT1
, fpRegT0
);
1249 emitStoreDouble(dst
, fpRegT0
);
1253 emitBinaryDoubleOp(op_div
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
1257 void JIT::emitSlow_op_div(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1259 unsigned dst
= currentInstruction
[1].u
.operand
;
1260 unsigned op1
= currentInstruction
[2].u
.operand
;
1261 unsigned op2
= currentInstruction
[3].u
.operand
;
1262 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1264 if (!supportsFloatingPoint())
1267 if (!types
.first().definitelyIsNumber())
1268 linkSlowCase(iter
); // double check
1270 if (!types
.second().definitelyIsNumber()) {
1271 linkSlowCase(iter
); // int32 check
1272 linkSlowCase(iter
); // double check
1276 JITStubCall
stubCall(this, cti_op_div
);
1277 stubCall
.addArgument(op1
);
1278 stubCall
.addArgument(op2
);
1284 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1286 #if CPU(X86) || CPU(X86_64) || CPU(MIPS)
1288 void JIT::emit_op_mod(Instruction
* currentInstruction
)
1290 unsigned dst
= currentInstruction
[1].u
.operand
;
1291 unsigned op1
= currentInstruction
[2].u
.operand
;
1292 unsigned op2
= currentInstruction
[3].u
.operand
;
1294 #if CPU(X86) || CPU(X86_64)
1295 // Make sure registers are correct for x86 IDIV instructions.
1296 ASSERT(regT0
== X86Registers::eax
);
1297 ASSERT(regT1
== X86Registers::edx
);
1298 ASSERT(regT2
== X86Registers::ecx
);
1299 ASSERT(regT3
== X86Registers::ebx
);
1302 if (isOperandConstantImmediateInt(op2
) && getConstantOperand(op2
).asInt32() != 0) {
1303 emitLoad(op1
, regT1
, regT0
);
1304 move(Imm32(getConstantOperand(op2
).asInt32()), regT2
);
1305 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1306 if (getConstantOperand(op2
).asInt32() == -1)
1307 addSlowCase(branch32(Equal
, regT0
, TrustedImm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1309 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
1310 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1311 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
1313 addSlowCase(branch32(Equal
, regT0
, TrustedImm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1314 addSlowCase(branch32(Equal
, regT2
, TrustedImm32(0))); // divide by 0
1317 move(regT0
, regT3
); // Save dividend payload, in case of 0.
1318 #if CPU(X86) || CPU(X86_64)
1320 m_assembler
.idivl_r(regT2
);
1322 m_assembler
.div(regT0
, regT2
);
1323 m_assembler
.mfhi(regT1
);
1326 // If the remainder is zero and the dividend is negative, the result is -0.
1327 Jump storeResult1
= branchTest32(NonZero
, regT1
);
1328 Jump storeResult2
= branchTest32(Zero
, regT3
, TrustedImm32(0x80000000)); // not negative
1329 emitStore(dst
, jsNumber(-0.0));
1332 storeResult1
.link(this);
1333 storeResult2
.link(this);
1334 emitStoreInt32(dst
, regT1
, (op1
== dst
|| op2
== dst
));
1338 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1340 unsigned dst
= currentInstruction
[1].u
.operand
;
1341 unsigned op1
= currentInstruction
[2].u
.operand
;
1342 unsigned op2
= currentInstruction
[3].u
.operand
;
1344 if (isOperandConstantImmediateInt(op2
) && getConstantOperand(op2
).asInt32() != 0) {
1345 linkSlowCase(iter
); // int32 check
1346 if (getConstantOperand(op2
).asInt32() == -1)
1347 linkSlowCase(iter
); // 0x80000000 check
1349 linkSlowCase(iter
); // int32 check
1350 linkSlowCase(iter
); // int32 check
1351 linkSlowCase(iter
); // 0 check
1352 linkSlowCase(iter
); // 0x80000000 check
1355 JITStubCall
stubCall(this, cti_op_mod
);
1356 stubCall
.addArgument(op1
);
1357 stubCall
.addArgument(op2
);
1361 #else // CPU(X86) || CPU(X86_64) || CPU(MIPS)
1363 void JIT::emit_op_mod(Instruction
* currentInstruction
)
1365 unsigned dst
= currentInstruction
[1].u
.operand
;
1366 unsigned op1
= currentInstruction
[2].u
.operand
;
1367 unsigned op2
= currentInstruction
[3].u
.operand
;
1369 #if ENABLE(JIT_USE_SOFT_MODULO)
1370 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
1371 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1372 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
1374 addSlowCase(branch32(Equal
, regT2
, TrustedImm32(0)));
1376 emitNakedCall(m_globalData
->jitStubs
->ctiSoftModulo());
1378 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
1380 JITStubCall
stubCall(this, cti_op_mod
);
1381 stubCall
.addArgument(op1
);
1382 stubCall
.addArgument(op2
);
1387 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1389 UNUSED_PARAM(currentInstruction
);
1391 #if ENABLE(JIT_USE_SOFT_MODULO)
1392 unsigned result
= currentInstruction
[1].u
.operand
;
1393 unsigned op1
= currentInstruction
[2].u
.operand
;
1394 unsigned op2
= currentInstruction
[3].u
.operand
;
1398 JITStubCall
stubCall(this, cti_op_mod
);
1399 stubCall
.addArgument(op1
);
1400 stubCall
.addArgument(op2
);
1401 stubCall
.call(result
);
1403 UNUSED_PARAM(currentInstruction
);
1405 ASSERT_NOT_REACHED();
1409 #endif // CPU(X86) || CPU(X86_64)
1411 /* ------------------------------ END: OP_MOD ------------------------------ */
1415 #endif // USE(JSVALUE32_64)
1416 #endif // ENABLE(JIT)