2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 #include "CodeBlock.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
37 #include "JSFunction.h"
38 #include "Interpreter.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
50 void JIT::emit_op_negate(Instruction
* currentInstruction
)
52 unsigned dst
= currentInstruction
[1].u
.operand
;
53 unsigned src
= currentInstruction
[2].u
.operand
;
55 emitLoad(src
, regT1
, regT0
);
57 Jump srcNotInt
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
58 addSlowCase(branchTest32(Zero
, regT0
, TrustedImm32(0x7fffffff)));
60 emitStoreInt32(dst
, regT0
, (dst
== src
));
65 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
67 xor32(TrustedImm32(1 << 31), regT1
);
68 store32(regT1
, tagFor(dst
));
70 store32(regT0
, payloadFor(dst
));
75 void JIT::emitSlow_op_negate(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
77 unsigned dst
= currentInstruction
[1].u
.operand
;
79 linkSlowCase(iter
); // 0x7fffffff check
80 linkSlowCase(iter
); // double check
82 JITStubCall
stubCall(this, cti_op_negate
);
83 stubCall
.addArgument(regT1
, regT0
);
87 void JIT::emit_compareAndJump(OpcodeID opcode
, unsigned op1
, unsigned op2
, unsigned target
, RelationalCondition condition
)
93 if (isOperandConstantImmediateChar(op1
)) {
94 emitLoad(op2
, regT1
, regT0
);
95 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
97 emitLoadCharacterString(regT0
, regT0
, failures
);
98 addSlowCase(failures
);
99 addJump(branch32(commute(condition
), regT0
, Imm32(asString(getConstantOperand(op1
))->tryGetValue()[0])), target
);
102 if (isOperandConstantImmediateChar(op2
)) {
103 emitLoad(op1
, regT1
, regT0
);
104 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::CellTag
)));
106 emitLoadCharacterString(regT0
, regT0
, failures
);
107 addSlowCase(failures
);
108 addJump(branch32(condition
, regT0
, Imm32(asString(getConstantOperand(op2
))->tryGetValue()[0])), target
);
111 if (isOperandConstantImmediateInt(op1
)) {
112 emitLoad(op2
, regT3
, regT2
);
113 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
114 addJump(branch32(commute(condition
), regT2
, Imm32(getConstantOperand(op1
).asInt32())), target
);
115 } else if (isOperandConstantImmediateInt(op2
)) {
116 emitLoad(op1
, regT1
, regT0
);
117 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
118 addJump(branch32(condition
, regT0
, Imm32(getConstantOperand(op2
).asInt32())), target
);
120 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
121 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
122 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
123 addJump(branch32(condition
, regT0
, regT2
), target
);
126 if (!supportsFloatingPoint()) {
127 addSlowCase(notInt32Op1
);
128 addSlowCase(notInt32Op2
);
134 emitBinaryDoubleOp(opcode
, target
, op1
, op2
, OperandTypes(), notInt32Op1
, notInt32Op2
, !isOperandConstantImmediateInt(op1
), isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
));
138 void JIT::emit_compareAndJumpSlow(unsigned op1
, unsigned op2
, unsigned target
, DoubleCondition
, int (JIT_STUB
*stub
)(STUB_ARGS_DECLARATION
), bool invert
, Vector
<SlowCaseEntry
>::iterator
& iter
)
140 if (isOperandConstantImmediateChar(op1
) || isOperandConstantImmediateChar(op2
)) {
146 if (!supportsFloatingPoint()) {
147 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
148 linkSlowCase(iter
); // int32 check
149 linkSlowCase(iter
); // int32 check
151 if (!isOperandConstantImmediateInt(op1
)) {
152 linkSlowCase(iter
); // double check
153 linkSlowCase(iter
); // int32 check
155 if (isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
))
156 linkSlowCase(iter
); // double check
159 JITStubCall
stubCall(this, stub
);
160 stubCall
.addArgument(op1
);
161 stubCall
.addArgument(op2
);
163 emitJumpSlowToHot(branchTest32(invert
? Zero
: NonZero
, regT0
), target
);
168 void JIT::emit_op_lshift(Instruction
* currentInstruction
)
170 unsigned dst
= currentInstruction
[1].u
.operand
;
171 unsigned op1
= currentInstruction
[2].u
.operand
;
172 unsigned op2
= currentInstruction
[3].u
.operand
;
174 if (isOperandConstantImmediateInt(op2
)) {
175 emitLoad(op1
, regT1
, regT0
);
176 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
177 lshift32(Imm32(getConstantOperand(op2
).asInt32()), regT0
);
178 emitStoreAndMapInt32(dst
, regT1
, regT0
, dst
== op1
, OPCODE_LENGTH(op_lshift
));
182 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
183 if (!isOperandConstantImmediateInt(op1
))
184 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
185 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
186 lshift32(regT2
, regT0
);
187 emitStoreAndMapInt32(dst
, regT1
, regT0
, dst
== op1
|| dst
== op2
, OPCODE_LENGTH(op_lshift
));
190 void JIT::emitSlow_op_lshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
192 unsigned dst
= currentInstruction
[1].u
.operand
;
193 unsigned op1
= currentInstruction
[2].u
.operand
;
194 unsigned op2
= currentInstruction
[3].u
.operand
;
196 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
197 linkSlowCase(iter
); // int32 check
198 linkSlowCase(iter
); // int32 check
200 JITStubCall
stubCall(this, cti_op_lshift
);
201 stubCall
.addArgument(op1
);
202 stubCall
.addArgument(op2
);
206 // RightShift (>>) and UnsignedRightShift (>>>) helper
208 void JIT::emitRightShift(Instruction
* currentInstruction
, bool isUnsigned
)
210 unsigned dst
= currentInstruction
[1].u
.operand
;
211 unsigned op1
= currentInstruction
[2].u
.operand
;
212 unsigned op2
= currentInstruction
[3].u
.operand
;
214 // Slow case of rshift makes assumptions about what registers hold the
215 // shift arguments, so any changes must be updated there as well.
216 if (isOperandConstantImmediateInt(op2
)) {
217 emitLoad(op1
, regT1
, regT0
);
218 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
219 int shift
= getConstantOperand(op2
).asInt32() & 0x1f;
222 urshift32(Imm32(shift
), regT0
);
224 rshift32(Imm32(shift
), regT0
);
225 } else if (isUnsigned
) // signed right shift by zero is simply toInt conversion
226 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
227 emitStoreAndMapInt32(dst
, regT1
, regT0
, dst
== op1
, OPCODE_LENGTH(op_rshift
));
229 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
230 if (!isOperandConstantImmediateInt(op1
))
231 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
232 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
234 urshift32(regT2
, regT0
);
235 addSlowCase(branch32(LessThan
, regT0
, TrustedImm32(0)));
237 rshift32(regT2
, regT0
);
238 emitStoreAndMapInt32(dst
, regT1
, regT0
, dst
== op1
, OPCODE_LENGTH(op_rshift
));
242 void JIT::emitRightShiftSlowCase(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
, bool isUnsigned
)
244 unsigned dst
= currentInstruction
[1].u
.operand
;
245 unsigned op1
= currentInstruction
[2].u
.operand
;
246 unsigned op2
= currentInstruction
[3].u
.operand
;
247 if (isOperandConstantImmediateInt(op2
)) {
248 int shift
= getConstantOperand(op2
).asInt32() & 0x1f;
250 linkSlowCase(iter
); // int32 check
251 if (supportsFloatingPointTruncate()) {
253 failures
.append(branch32(AboveOrEqual
, regT1
, TrustedImm32(JSValue::LowestTag
)));
254 emitLoadDouble(op1
, fpRegT0
);
255 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
258 urshift32(Imm32(shift
), regT0
);
260 rshift32(Imm32(shift
), regT0
);
261 } else if (isUnsigned
) // signed right shift by zero is simply toInt conversion
262 failures
.append(branch32(LessThan
, regT0
, TrustedImm32(0)));
263 move(TrustedImm32(JSValue::Int32Tag
), regT1
);
264 emitStoreInt32(dst
, regT0
, false);
265 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
268 if (isUnsigned
&& !shift
)
269 linkSlowCase(iter
); // failed to box in hot path
273 if (!isOperandConstantImmediateInt(op1
)) {
274 linkSlowCase(iter
); // int32 check -- op1 is not an int
275 if (supportsFloatingPointTruncate()) {
277 failures
.append(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
))); // op1 is not a double
278 emitLoadDouble(op1
, fpRegT0
);
279 failures
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
))); // op2 is not an int
280 failures
.append(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
282 urshift32(regT2
, regT0
);
283 failures
.append(branch32(LessThan
, regT0
, TrustedImm32(0)));
285 rshift32(regT2
, regT0
);
286 move(TrustedImm32(JSValue::Int32Tag
), regT1
);
287 emitStoreInt32(dst
, regT0
, false);
288 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift
));
293 linkSlowCase(iter
); // int32 check - op2 is not an int
295 linkSlowCase(iter
); // Can't represent unsigned result as an immediate
298 JITStubCall
stubCall(this, isUnsigned
? cti_op_urshift
: cti_op_rshift
);
299 stubCall
.addArgument(op1
);
300 stubCall
.addArgument(op2
);
306 void JIT::emit_op_rshift(Instruction
* currentInstruction
)
308 emitRightShift(currentInstruction
, false);
311 void JIT::emitSlow_op_rshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
313 emitRightShiftSlowCase(currentInstruction
, iter
, false);
316 // UnsignedRightShift (>>>)
318 void JIT::emit_op_urshift(Instruction
* currentInstruction
)
320 emitRightShift(currentInstruction
, true);
323 void JIT::emitSlow_op_urshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
325 emitRightShiftSlowCase(currentInstruction
, iter
, true);
330 void JIT::emit_op_bitand(Instruction
* currentInstruction
)
332 unsigned dst
= currentInstruction
[1].u
.operand
;
333 unsigned op1
= currentInstruction
[2].u
.operand
;
334 unsigned op2
= currentInstruction
[3].u
.operand
;
338 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
339 emitLoad(op
, regT1
, regT0
);
340 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
341 and32(Imm32(constant
), regT0
);
342 emitStoreAndMapInt32(dst
, regT1
, regT0
, dst
== op
, OPCODE_LENGTH(op_bitand
));
346 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
347 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
348 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
350 emitStoreAndMapInt32(dst
, regT1
, regT0
, (op1
== dst
|| op2
== dst
), OPCODE_LENGTH(op_bitand
));
353 void JIT::emitSlow_op_bitand(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
355 unsigned dst
= currentInstruction
[1].u
.operand
;
356 unsigned op1
= currentInstruction
[2].u
.operand
;
357 unsigned op2
= currentInstruction
[3].u
.operand
;
359 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
360 linkSlowCase(iter
); // int32 check
361 linkSlowCase(iter
); // int32 check
363 JITStubCall
stubCall(this, cti_op_bitand
);
364 stubCall
.addArgument(op1
);
365 stubCall
.addArgument(op2
);
371 void JIT::emit_op_bitor(Instruction
* currentInstruction
)
373 unsigned dst
= currentInstruction
[1].u
.operand
;
374 unsigned op1
= currentInstruction
[2].u
.operand
;
375 unsigned op2
= currentInstruction
[3].u
.operand
;
379 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
380 emitLoad(op
, regT1
, regT0
);
381 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
382 or32(Imm32(constant
), regT0
);
383 emitStoreAndMapInt32(dst
, regT1
, regT0
, op
== dst
, OPCODE_LENGTH(op_bitor
));
387 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
388 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
389 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
391 emitStoreAndMapInt32(dst
, regT1
, regT0
, (op1
== dst
|| op2
== dst
), OPCODE_LENGTH(op_bitor
));
394 void JIT::emitSlow_op_bitor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
396 unsigned dst
= currentInstruction
[1].u
.operand
;
397 unsigned op1
= currentInstruction
[2].u
.operand
;
398 unsigned op2
= currentInstruction
[3].u
.operand
;
400 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
401 linkSlowCase(iter
); // int32 check
402 linkSlowCase(iter
); // int32 check
404 JITStubCall
stubCall(this, cti_op_bitor
);
405 stubCall
.addArgument(op1
);
406 stubCall
.addArgument(op2
);
412 void JIT::emit_op_bitxor(Instruction
* currentInstruction
)
414 unsigned dst
= currentInstruction
[1].u
.operand
;
415 unsigned op1
= currentInstruction
[2].u
.operand
;
416 unsigned op2
= currentInstruction
[3].u
.operand
;
420 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
421 emitLoad(op
, regT1
, regT0
);
422 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
423 xor32(Imm32(constant
), regT0
);
424 emitStoreAndMapInt32(dst
, regT1
, regT0
, op
== dst
, OPCODE_LENGTH(op_bitxor
));
428 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
429 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
430 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
432 emitStoreAndMapInt32(dst
, regT1
, regT0
, (op1
== dst
|| op2
== dst
), OPCODE_LENGTH(op_bitxor
));
435 void JIT::emitSlow_op_bitxor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
437 unsigned dst
= currentInstruction
[1].u
.operand
;
438 unsigned op1
= currentInstruction
[2].u
.operand
;
439 unsigned op2
= currentInstruction
[3].u
.operand
;
441 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
442 linkSlowCase(iter
); // int32 check
443 linkSlowCase(iter
); // int32 check
445 JITStubCall
stubCall(this, cti_op_bitxor
);
446 stubCall
.addArgument(op1
);
447 stubCall
.addArgument(op2
);
453 void JIT::emit_op_post_inc(Instruction
* currentInstruction
)
455 unsigned dst
= currentInstruction
[1].u
.operand
;
456 unsigned srcDst
= currentInstruction
[2].u
.operand
;
458 emitLoad(srcDst
, regT1
, regT0
);
459 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
461 if (dst
== srcDst
) // x = x++ is a noop for ints.
465 addSlowCase(branchAdd32(Overflow
, TrustedImm32(1), regT2
));
466 emitStoreInt32(srcDst
, regT2
, true);
468 emitStoreAndMapInt32(dst
, regT1
, regT0
, false, OPCODE_LENGTH(op_post_inc
));
471 void JIT::emitSlow_op_post_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
473 unsigned dst
= currentInstruction
[1].u
.operand
;
474 unsigned srcDst
= currentInstruction
[2].u
.operand
;
476 linkSlowCase(iter
); // int32 check
478 linkSlowCase(iter
); // overflow check
480 JITStubCall
stubCall(this, cti_op_post_inc
);
481 stubCall
.addArgument(srcDst
);
482 stubCall
.addArgument(TrustedImm32(srcDst
));
488 void JIT::emit_op_post_dec(Instruction
* currentInstruction
)
490 unsigned dst
= currentInstruction
[1].u
.operand
;
491 unsigned srcDst
= currentInstruction
[2].u
.operand
;
493 emitLoad(srcDst
, regT1
, regT0
);
494 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
496 if (dst
== srcDst
) // x = x-- is a noop for ints.
500 addSlowCase(branchSub32(Overflow
, TrustedImm32(1), regT2
));
501 emitStoreInt32(srcDst
, regT2
, true);
503 emitStoreAndMapInt32(dst
, regT1
, regT0
, false, OPCODE_LENGTH(op_post_dec
));
506 void JIT::emitSlow_op_post_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
508 unsigned dst
= currentInstruction
[1].u
.operand
;
509 unsigned srcDst
= currentInstruction
[2].u
.operand
;
511 linkSlowCase(iter
); // int32 check
513 linkSlowCase(iter
); // overflow check
515 JITStubCall
stubCall(this, cti_op_post_dec
);
516 stubCall
.addArgument(srcDst
);
517 stubCall
.addArgument(TrustedImm32(srcDst
));
523 void JIT::emit_op_pre_inc(Instruction
* currentInstruction
)
525 unsigned srcDst
= currentInstruction
[1].u
.operand
;
527 emitLoad(srcDst
, regT1
, regT0
);
529 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
530 addSlowCase(branchAdd32(Overflow
, TrustedImm32(1), regT0
));
531 emitStoreAndMapInt32(srcDst
, regT1
, regT0
, true, OPCODE_LENGTH(op_pre_inc
));
534 void JIT::emitSlow_op_pre_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
536 unsigned srcDst
= currentInstruction
[1].u
.operand
;
538 linkSlowCase(iter
); // int32 check
539 linkSlowCase(iter
); // overflow check
541 JITStubCall
stubCall(this, cti_op_pre_inc
);
542 stubCall
.addArgument(srcDst
);
543 stubCall
.call(srcDst
);
548 void JIT::emit_op_pre_dec(Instruction
* currentInstruction
)
550 unsigned srcDst
= currentInstruction
[1].u
.operand
;
552 emitLoad(srcDst
, regT1
, regT0
);
554 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
555 addSlowCase(branchSub32(Overflow
, TrustedImm32(1), regT0
));
556 emitStoreAndMapInt32(srcDst
, regT1
, regT0
, true, OPCODE_LENGTH(op_pre_dec
));
559 void JIT::emitSlow_op_pre_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
561 unsigned srcDst
= currentInstruction
[1].u
.operand
;
563 linkSlowCase(iter
); // int32 check
564 linkSlowCase(iter
); // overflow check
566 JITStubCall
stubCall(this, cti_op_pre_dec
);
567 stubCall
.addArgument(srcDst
);
568 stubCall
.call(srcDst
);
573 void JIT::emit_op_add(Instruction
* currentInstruction
)
575 unsigned dst
= currentInstruction
[1].u
.operand
;
576 unsigned op1
= currentInstruction
[2].u
.operand
;
577 unsigned op2
= currentInstruction
[3].u
.operand
;
578 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
580 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
582 JITStubCall
stubCall(this, cti_op_add
);
583 stubCall
.addArgument(op1
);
584 stubCall
.addArgument(op2
);
589 JumpList notInt32Op1
;
590 JumpList notInt32Op2
;
594 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
595 emitAdd32Constant(dst
, op
, constant
, op
== op1
? types
.first() : types
.second());
599 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
600 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
601 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
604 addSlowCase(branchAdd32(Overflow
, regT2
, regT0
));
605 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
607 if (!supportsFloatingPoint()) {
608 addSlowCase(notInt32Op1
);
609 addSlowCase(notInt32Op2
);
615 emitBinaryDoubleOp(op_add
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
619 void JIT::emitAdd32Constant(unsigned dst
, unsigned op
, int32_t constant
, ResultType opType
)
622 emitLoad(op
, regT1
, regT2
);
623 Jump notInt32
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
624 addSlowCase(branchAdd32(Overflow
, regT2
, Imm32(constant
), regT0
));
625 emitStoreInt32(dst
, regT0
, (op
== dst
));
628 if (!supportsFloatingPoint()) {
629 addSlowCase(notInt32
);
635 if (!opType
.definitelyIsNumber())
636 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
637 move(Imm32(constant
), regT2
);
638 convertInt32ToDouble(regT2
, fpRegT0
);
639 emitLoadDouble(op
, fpRegT1
);
640 addDouble(fpRegT1
, fpRegT0
);
641 emitStoreDouble(dst
, fpRegT0
);
646 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
648 unsigned dst
= currentInstruction
[1].u
.operand
;
649 unsigned op1
= currentInstruction
[2].u
.operand
;
650 unsigned op2
= currentInstruction
[3].u
.operand
;
651 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
653 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
654 linkDummySlowCase(iter
);
660 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
661 linkSlowCase(iter
); // overflow check
663 if (!supportsFloatingPoint())
664 linkSlowCase(iter
); // non-sse case
666 ResultType opType
= op
== op1
? types
.first() : types
.second();
667 if (!opType
.definitelyIsNumber())
668 linkSlowCase(iter
); // double check
671 linkSlowCase(iter
); // overflow check
673 if (!supportsFloatingPoint()) {
674 linkSlowCase(iter
); // int32 check
675 linkSlowCase(iter
); // int32 check
677 if (!types
.first().definitelyIsNumber())
678 linkSlowCase(iter
); // double check
680 if (!types
.second().definitelyIsNumber()) {
681 linkSlowCase(iter
); // int32 check
682 linkSlowCase(iter
); // double check
687 JITStubCall
stubCall(this, cti_op_add
);
688 stubCall
.addArgument(op1
);
689 stubCall
.addArgument(op2
);
695 void JIT::emit_op_sub(Instruction
* currentInstruction
)
697 unsigned dst
= currentInstruction
[1].u
.operand
;
698 unsigned op1
= currentInstruction
[2].u
.operand
;
699 unsigned op2
= currentInstruction
[3].u
.operand
;
700 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
702 JumpList notInt32Op1
;
703 JumpList notInt32Op2
;
705 if (isOperandConstantImmediateInt(op2
)) {
706 emitSub32Constant(dst
, op1
, getConstantOperand(op2
).asInt32(), types
.first());
710 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
711 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
712 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
715 addSlowCase(branchSub32(Overflow
, regT2
, regT0
));
716 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
718 if (!supportsFloatingPoint()) {
719 addSlowCase(notInt32Op1
);
720 addSlowCase(notInt32Op2
);
726 emitBinaryDoubleOp(op_sub
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
730 void JIT::emitSub32Constant(unsigned dst
, unsigned op
, int32_t constant
, ResultType opType
)
733 emitLoad(op
, regT1
, regT0
);
734 Jump notInt32
= branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
));
735 #if ENABLE(JIT_CONSTANT_BLINDING)
736 addSlowCase(branchSub32(Overflow
, regT0
, Imm32(constant
), regT2
, regT3
));
738 addSlowCase(branchSub32(Overflow
, regT0
, Imm32(constant
), regT2
));
741 emitStoreInt32(dst
, regT2
, (op
== dst
));
744 if (!supportsFloatingPoint()) {
745 addSlowCase(notInt32
);
751 if (!opType
.definitelyIsNumber())
752 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
753 move(Imm32(constant
), regT2
);
754 convertInt32ToDouble(regT2
, fpRegT0
);
755 emitLoadDouble(op
, fpRegT1
);
756 subDouble(fpRegT0
, fpRegT1
);
757 emitStoreDouble(dst
, fpRegT1
);
762 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
764 unsigned dst
= currentInstruction
[1].u
.operand
;
765 unsigned op1
= currentInstruction
[2].u
.operand
;
766 unsigned op2
= currentInstruction
[3].u
.operand
;
767 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
769 if (isOperandConstantImmediateInt(op2
)) {
770 linkSlowCase(iter
); // overflow check
772 if (!supportsFloatingPoint() || !types
.first().definitelyIsNumber())
773 linkSlowCase(iter
); // int32 or double check
775 linkSlowCase(iter
); // overflow check
777 if (!supportsFloatingPoint()) {
778 linkSlowCase(iter
); // int32 check
779 linkSlowCase(iter
); // int32 check
781 if (!types
.first().definitelyIsNumber())
782 linkSlowCase(iter
); // double check
784 if (!types
.second().definitelyIsNumber()) {
785 linkSlowCase(iter
); // int32 check
786 linkSlowCase(iter
); // double check
791 JITStubCall
stubCall(this, cti_op_sub
);
792 stubCall
.addArgument(op1
);
793 stubCall
.addArgument(op2
);
797 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID
, unsigned dst
, unsigned op1
, unsigned op2
, OperandTypes types
, JumpList
& notInt32Op1
, JumpList
& notInt32Op2
, bool op1IsInRegisters
, bool op2IsInRegisters
)
801 if (!notInt32Op1
.empty()) {
802 // Double case 1: Op1 is not int32; Op2 is unknown.
803 notInt32Op1
.link(this);
805 ASSERT(op1IsInRegisters
);
807 // Verify Op1 is double.
808 if (!types
.first().definitelyIsNumber())
809 addSlowCase(branch32(Above
, regT1
, TrustedImm32(JSValue::LowestTag
)));
811 if (!op2IsInRegisters
)
812 emitLoad(op2
, regT3
, regT2
);
814 Jump doubleOp2
= branch32(Below
, regT3
, TrustedImm32(JSValue::LowestTag
));
816 if (!types
.second().definitelyIsNumber())
817 addSlowCase(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
819 convertInt32ToDouble(regT2
, fpRegT0
);
820 Jump doTheMath
= jump();
822 // Load Op2 as double into double register.
823 doubleOp2
.link(this);
824 emitLoadDouble(op2
, fpRegT0
);
827 doTheMath
.link(this);
830 emitLoadDouble(op1
, fpRegT2
);
831 mulDouble(fpRegT2
, fpRegT0
);
832 emitStoreDouble(dst
, fpRegT0
);
835 emitLoadDouble(op1
, fpRegT2
);
836 addDouble(fpRegT2
, fpRegT0
);
837 emitStoreDouble(dst
, fpRegT0
);
840 emitLoadDouble(op1
, fpRegT1
);
841 subDouble(fpRegT0
, fpRegT1
);
842 emitStoreDouble(dst
, fpRegT1
);
845 emitLoadDouble(op1
, fpRegT1
);
846 divDouble(fpRegT0
, fpRegT1
);
848 #if ENABLE(VALUE_PROFILER)
849 // Is the result actually an integer? The DFG JIT would really like to know. If it's
850 // not an integer, we increment a count. If this together with the slow case counter
851 // are below threshold then the DFG JIT will compile this division with a specualtion
852 // that the remainder is zero.
854 // As well, there are cases where a double result here would cause an important field
855 // in the heap to sometimes have doubles in it, resulting in double predictions getting
856 // propagated to a use site where it might cause damage (such as the index to an array
857 // access). So if we are DFG compiling anything in the program, we want this code to
858 // ensure that it produces integers whenever possible.
860 // FIXME: This will fail to convert to integer if the result is zero. We should
861 // distinguish between positive zero and negative zero here.
864 branchConvertDoubleToInt32(fpRegT1
, regT2
, notInteger
, fpRegT0
);
865 // If we've got an integer, we might as well make that the result of the division.
866 emitStoreInt32(dst
, regT2
);
867 Jump isInteger
= jump();
868 notInteger
.link(this);
869 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
870 emitStoreDouble(dst
, fpRegT1
);
871 isInteger
.link(this);
873 emitStoreDouble(dst
, fpRegT1
);
878 emitLoadDouble(op1
, fpRegT2
);
879 addJump(branchDouble(DoubleLessThan
, fpRegT2
, fpRegT0
), dst
);
882 emitLoadDouble(op1
, fpRegT2
);
883 addJump(branchDouble(DoubleLessThanOrEqual
, fpRegT2
, fpRegT0
), dst
);
886 emitLoadDouble(op1
, fpRegT2
);
887 addJump(branchDouble(DoubleGreaterThan
, fpRegT2
, fpRegT0
), dst
);
890 emitLoadDouble(op1
, fpRegT2
);
891 addJump(branchDouble(DoubleGreaterThanOrEqual
, fpRegT2
, fpRegT0
), dst
);
894 emitLoadDouble(op1
, fpRegT2
);
895 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT0
, fpRegT2
), dst
);
898 emitLoadDouble(op1
, fpRegT2
);
899 addJump(branchDouble(DoubleLessThanOrUnordered
, fpRegT0
, fpRegT2
), dst
);
902 emitLoadDouble(op1
, fpRegT2
);
903 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered
, fpRegT0
, fpRegT2
), dst
);
906 emitLoadDouble(op1
, fpRegT2
);
907 addJump(branchDouble(DoubleGreaterThanOrUnordered
, fpRegT0
, fpRegT2
), dst
);
910 ASSERT_NOT_REACHED();
913 if (!notInt32Op2
.empty())
917 if (!notInt32Op2
.empty()) {
918 // Double case 2: Op1 is int32; Op2 is not int32.
919 notInt32Op2
.link(this);
921 ASSERT(op2IsInRegisters
);
923 if (!op1IsInRegisters
)
924 emitLoadPayload(op1
, regT0
);
926 convertInt32ToDouble(regT0
, fpRegT0
);
928 // Verify op2 is double.
929 if (!types
.second().definitelyIsNumber())
930 addSlowCase(branch32(Above
, regT3
, TrustedImm32(JSValue::LowestTag
)));
935 emitLoadDouble(op2
, fpRegT2
);
936 mulDouble(fpRegT2
, fpRegT0
);
937 emitStoreDouble(dst
, fpRegT0
);
940 emitLoadDouble(op2
, fpRegT2
);
941 addDouble(fpRegT2
, fpRegT0
);
942 emitStoreDouble(dst
, fpRegT0
);
945 emitLoadDouble(op2
, fpRegT2
);
946 subDouble(fpRegT2
, fpRegT0
);
947 emitStoreDouble(dst
, fpRegT0
);
950 emitLoadDouble(op2
, fpRegT2
);
951 divDouble(fpRegT2
, fpRegT0
);
952 #if ENABLE(VALUE_PROFILER)
953 // Is the result actually an integer? The DFG JIT would really like to know. If it's
954 // not an integer, we increment a count. If this together with the slow case counter
955 // are below threshold then the DFG JIT will compile this division with a specualtion
956 // that the remainder is zero.
958 // As well, there are cases where a double result here would cause an important field
959 // in the heap to sometimes have doubles in it, resulting in double predictions getting
960 // propagated to a use site where it might cause damage (such as the index to an array
961 // access). So if we are DFG compiling anything in the program, we want this code to
962 // ensure that it produces integers whenever possible.
964 // FIXME: This will fail to convert to integer if the result is zero. We should
965 // distinguish between positive zero and negative zero here.
968 branchConvertDoubleToInt32(fpRegT0
, regT2
, notInteger
, fpRegT1
);
969 // If we've got an integer, we might as well make that the result of the division.
970 emitStoreInt32(dst
, regT2
);
971 Jump isInteger
= jump();
972 notInteger
.link(this);
973 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
974 emitStoreDouble(dst
, fpRegT0
);
975 isInteger
.link(this);
977 emitStoreDouble(dst
, fpRegT0
);
982 emitLoadDouble(op2
, fpRegT1
);
983 addJump(branchDouble(DoubleLessThan
, fpRegT0
, fpRegT1
), dst
);
986 emitLoadDouble(op2
, fpRegT1
);
987 addJump(branchDouble(DoubleLessThanOrEqual
, fpRegT0
, fpRegT1
), dst
);
990 emitLoadDouble(op2
, fpRegT1
);
991 addJump(branchDouble(DoubleGreaterThan
, fpRegT0
, fpRegT1
), dst
);
994 emitLoadDouble(op2
, fpRegT1
);
995 addJump(branchDouble(DoubleGreaterThanOrEqual
, fpRegT0
, fpRegT1
), dst
);
998 emitLoadDouble(op2
, fpRegT1
);
999 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), dst
);
1002 emitLoadDouble(op2
, fpRegT1
);
1003 addJump(branchDouble(DoubleLessThanOrUnordered
, fpRegT1
, fpRegT0
), dst
);
1006 emitLoadDouble(op2
, fpRegT1
);
1007 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered
, fpRegT1
, fpRegT0
), dst
);
1009 case op_jngreatereq
:
1010 emitLoadDouble(op2
, fpRegT1
);
1011 addJump(branchDouble(DoubleGreaterThanOrUnordered
, fpRegT1
, fpRegT0
), dst
);
1014 ASSERT_NOT_REACHED();
1021 // Multiplication (*)
1023 void JIT::emit_op_mul(Instruction
* currentInstruction
)
1025 unsigned dst
= currentInstruction
[1].u
.operand
;
1026 unsigned op1
= currentInstruction
[2].u
.operand
;
1027 unsigned op2
= currentInstruction
[3].u
.operand
;
1028 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1030 #if ENABLE(VALUE_PROFILER)
1031 m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
1034 JumpList notInt32Op1
;
1035 JumpList notInt32Op2
;
1037 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
1038 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1039 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
1043 addSlowCase(branchMul32(Overflow
, regT2
, regT0
));
1044 addSlowCase(branchTest32(Zero
, regT0
));
1045 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
1047 if (!supportsFloatingPoint()) {
1048 addSlowCase(notInt32Op1
);
1049 addSlowCase(notInt32Op2
);
1055 emitBinaryDoubleOp(op_mul
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
1059 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1061 unsigned dst
= currentInstruction
[1].u
.operand
;
1062 unsigned op1
= currentInstruction
[2].u
.operand
;
1063 unsigned op2
= currentInstruction
[3].u
.operand
;
1064 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1066 Jump overflow
= getSlowCase(iter
); // overflow check
1067 linkSlowCase(iter
); // zero result check
1069 Jump negZero
= branchOr32(Signed
, regT2
, regT3
);
1070 emitStoreInt32(dst
, TrustedImm32(0), (op1
== dst
|| op2
== dst
));
1072 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul
));
1075 #if ENABLE(VALUE_PROFILER)
1076 // We only get here if we have a genuine negative zero. Record this,
1077 // so that the speculative JIT knows that we failed speculation
1078 // because of a negative zero.
1079 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
1081 overflow
.link(this);
1083 if (!supportsFloatingPoint()) {
1084 linkSlowCase(iter
); // int32 check
1085 linkSlowCase(iter
); // int32 check
1088 if (supportsFloatingPoint()) {
1089 if (!types
.first().definitelyIsNumber())
1090 linkSlowCase(iter
); // double check
1092 if (!types
.second().definitelyIsNumber()) {
1093 linkSlowCase(iter
); // int32 check
1094 linkSlowCase(iter
); // double check
1098 Label
jitStubCall(this);
1099 JITStubCall
stubCall(this, cti_op_mul
);
1100 stubCall
.addArgument(op1
);
1101 stubCall
.addArgument(op2
);
1107 void JIT::emit_op_div(Instruction
* currentInstruction
)
1109 unsigned dst
= currentInstruction
[1].u
.operand
;
1110 unsigned op1
= currentInstruction
[2].u
.operand
;
1111 unsigned op2
= currentInstruction
[3].u
.operand
;
1112 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1114 #if ENABLE(VALUE_PROFILER)
1115 m_codeBlock
->addSpecialFastCaseProfile(m_bytecodeOffset
);
1118 if (!supportsFloatingPoint()) {
1119 addSlowCase(jump());
1124 JumpList notInt32Op1
;
1125 JumpList notInt32Op2
;
1129 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
1131 notInt32Op1
.append(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1132 notInt32Op2
.append(branch32(NotEqual
, regT3
, TrustedImm32(JSValue::Int32Tag
)));
1134 convertInt32ToDouble(regT0
, fpRegT0
);
1135 convertInt32ToDouble(regT2
, fpRegT1
);
1136 divDouble(fpRegT1
, fpRegT0
);
1137 #if ENABLE(VALUE_PROFILER)
1138 // Is the result actually an integer? The DFG JIT would really like to know. If it's
1139 // not an integer, we increment a count. If this together with the slow case counter
1140 // are below threshold then the DFG JIT will compile this division with a specualtion
1141 // that the remainder is zero.
1143 // As well, there are cases where a double result here would cause an important field
1144 // in the heap to sometimes have doubles in it, resulting in double predictions getting
1145 // propagated to a use site where it might cause damage (such as the index to an array
1146 // access). So if we are DFG compiling anything in the program, we want this code to
1147 // ensure that it produces integers whenever possible.
1149 // FIXME: This will fail to convert to integer if the result is zero. We should
1150 // distinguish between positive zero and negative zero here.
1152 JumpList notInteger
;
1153 branchConvertDoubleToInt32(fpRegT0
, regT2
, notInteger
, fpRegT1
);
1154 // If we've got an integer, we might as well make that the result of the division.
1155 emitStoreInt32(dst
, regT2
);
1157 notInteger
.link(this);
1158 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock
->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset
)->m_counter
));
1159 emitStoreDouble(dst
, fpRegT0
);
1161 emitStoreDouble(dst
, fpRegT0
);
1166 emitBinaryDoubleOp(op_div
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
1170 void JIT::emitSlow_op_div(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1172 unsigned dst
= currentInstruction
[1].u
.operand
;
1173 unsigned op1
= currentInstruction
[2].u
.operand
;
1174 unsigned op2
= currentInstruction
[3].u
.operand
;
1175 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1177 if (!supportsFloatingPoint())
1180 if (!types
.first().definitelyIsNumber())
1181 linkSlowCase(iter
); // double check
1183 if (!types
.second().definitelyIsNumber()) {
1184 linkSlowCase(iter
); // int32 check
1185 linkSlowCase(iter
); // double check
1189 JITStubCall
stubCall(this, cti_op_div
);
1190 stubCall
.addArgument(op1
);
1191 stubCall
.addArgument(op2
);
1197 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1199 void JIT::emit_op_mod(Instruction
* currentInstruction
)
1201 unsigned dst
= currentInstruction
[1].u
.operand
;
1202 unsigned op1
= currentInstruction
[2].u
.operand
;
1203 unsigned op2
= currentInstruction
[3].u
.operand
;
1205 #if CPU(X86) || CPU(X86_64)
1206 // Make sure registers are correct for x86 IDIV instructions.
1207 ASSERT(regT0
== X86Registers::eax
);
1208 ASSERT(regT1
== X86Registers::edx
);
1209 ASSERT(regT2
== X86Registers::ecx
);
1210 ASSERT(regT3
== X86Registers::ebx
);
1212 emitLoad2(op1
, regT0
, regT3
, op2
, regT1
, regT2
);
1213 addSlowCase(branch32(NotEqual
, regT1
, TrustedImm32(JSValue::Int32Tag
)));
1214 addSlowCase(branch32(NotEqual
, regT0
, TrustedImm32(JSValue::Int32Tag
)));
1217 addSlowCase(branchTest32(Zero
, regT2
));
1218 Jump denominatorNotNeg1
= branch32(NotEqual
, regT2
, TrustedImm32(-1));
1219 addSlowCase(branch32(Equal
, regT0
, TrustedImm32(-2147483647-1)));
1220 denominatorNotNeg1
.link(this);
1222 m_assembler
.idivl_r(regT2
);
1223 Jump numeratorPositive
= branch32(GreaterThanOrEqual
, regT3
, TrustedImm32(0));
1224 addSlowCase(branchTest32(Zero
, regT1
));
1225 numeratorPositive
.link(this);
1226 emitStoreInt32(dst
, regT1
, (op1
== dst
|| op2
== dst
));
1228 JITStubCall
stubCall(this, cti_op_mod
);
1229 stubCall
.addArgument(op1
);
1230 stubCall
.addArgument(op2
);
1235 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1237 #if CPU(X86) || CPU(X86_64)
1238 unsigned result
= currentInstruction
[1].u
.operand
;
1239 unsigned op1
= currentInstruction
[2].u
.operand
;
1240 unsigned op2
= currentInstruction
[3].u
.operand
;
1246 JITStubCall
stubCall(this, cti_op_mod
);
1247 stubCall
.addArgument(op1
);
1248 stubCall
.addArgument(op2
);
1249 stubCall
.call(result
);
1251 UNUSED_PARAM(currentInstruction
);
1253 // We would have really useful assertions here if it wasn't for the compiler's
1254 // insistence on attribute noreturn.
1255 // ASSERT_NOT_REACHED();
1259 /* ------------------------------ END: OP_MOD ------------------------------ */
1263 #endif // USE(JSVALUE32_64)
1264 #endif // ENABLE(JIT)