2 * Copyright (C) 2008 Apple Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
33 #include "JITStubCall.h"
35 #include "JSFunction.h"
36 #include "Interpreter.h"
37 #include "ResultType.h"
38 #include "SamplingTool.h"
50 void JIT::emit_op_negate(Instruction
* currentInstruction
)
52 unsigned dst
= currentInstruction
[1].u
.operand
;
53 unsigned src
= currentInstruction
[2].u
.operand
;
55 emitLoad(src
, regT1
, regT0
);
57 Jump srcNotInt
= branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
));
58 addSlowCase(branch32(Equal
, regT0
, Imm32(0)));
61 emitStoreInt32(dst
, regT0
, (dst
== src
));
66 addSlowCase(branch32(Above
, regT1
, Imm32(JSValue::LowestTag
)));
68 xor32(Imm32(1 << 31), regT1
);
69 store32(regT1
, tagFor(dst
));
71 store32(regT0
, payloadFor(dst
));
76 void JIT::emitSlow_op_negate(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
78 unsigned dst
= currentInstruction
[1].u
.operand
;
80 linkSlowCase(iter
); // 0 check
81 linkSlowCase(iter
); // double check
83 JITStubCall
stubCall(this, cti_op_negate
);
84 stubCall
.addArgument(regT1
, regT0
);
88 void JIT::emit_op_jnless(Instruction
* currentInstruction
)
90 unsigned op1
= currentInstruction
[1].u
.operand
;
91 unsigned op2
= currentInstruction
[2].u
.operand
;
92 unsigned target
= currentInstruction
[3].u
.operand
;
98 if (isOperandConstantImmediateInt(op1
)) {
99 emitLoad(op2
, regT3
, regT2
);
100 notInt32Op2
.append(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
101 addJump(branch32(LessThanOrEqual
, regT2
, Imm32(getConstantOperand(op1
).asInt32())), target
+ 3);
102 } else if (isOperandConstantImmediateInt(op2
)) {
103 emitLoad(op1
, regT1
, regT0
);
104 notInt32Op1
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
105 addJump(branch32(GreaterThanOrEqual
, regT0
, Imm32(getConstantOperand(op2
).asInt32())), target
+ 3);
107 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
108 notInt32Op1
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
109 notInt32Op2
.append(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
110 addJump(branch32(GreaterThanOrEqual
, regT0
, regT2
), target
+ 3);
113 if (!supportsFloatingPoint()) {
114 addSlowCase(notInt32Op1
);
115 addSlowCase(notInt32Op2
);
121 emitBinaryDoubleOp(op_jnless
, target
, op1
, op2
, OperandTypes(), notInt32Op1
, notInt32Op2
, !isOperandConstantImmediateInt(op1
), isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
));
125 void JIT::emitSlow_op_jnless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
127 unsigned op1
= currentInstruction
[1].u
.operand
;
128 unsigned op2
= currentInstruction
[2].u
.operand
;
129 unsigned target
= currentInstruction
[3].u
.operand
;
131 if (!supportsFloatingPoint()) {
132 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
133 linkSlowCase(iter
); // int32 check
134 linkSlowCase(iter
); // int32 check
136 if (!isOperandConstantImmediateInt(op1
)) {
137 linkSlowCase(iter
); // double check
138 linkSlowCase(iter
); // int32 check
140 if (isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
))
141 linkSlowCase(iter
); // double check
144 JITStubCall
stubCall(this, cti_op_jless
);
145 stubCall
.addArgument(op1
);
146 stubCall
.addArgument(op2
);
148 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
+ 3);
151 void JIT::emit_op_jnlesseq(Instruction
* currentInstruction
)
153 unsigned op1
= currentInstruction
[1].u
.operand
;
154 unsigned op2
= currentInstruction
[2].u
.operand
;
155 unsigned target
= currentInstruction
[3].u
.operand
;
157 JumpList notInt32Op1
;
158 JumpList notInt32Op2
;
161 if (isOperandConstantImmediateInt(op1
)) {
162 emitLoad(op2
, regT3
, regT2
);
163 notInt32Op2
.append(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
164 addJump(branch32(LessThan
, regT2
, Imm32(getConstantOperand(op1
).asInt32())), target
+ 3);
165 } else if (isOperandConstantImmediateInt(op2
)) {
166 emitLoad(op1
, regT1
, regT0
);
167 notInt32Op1
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
168 addJump(branch32(GreaterThan
, regT0
, Imm32(getConstantOperand(op2
).asInt32())), target
+ 3);
170 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
171 notInt32Op1
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
172 notInt32Op2
.append(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
173 addJump(branch32(GreaterThan
, regT0
, regT2
), target
+ 3);
176 if (!supportsFloatingPoint()) {
177 addSlowCase(notInt32Op1
);
178 addSlowCase(notInt32Op2
);
184 emitBinaryDoubleOp(op_jnlesseq
, target
, op1
, op2
, OperandTypes(), notInt32Op1
, notInt32Op2
, !isOperandConstantImmediateInt(op1
), isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
));
188 void JIT::emitSlow_op_jnlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
190 unsigned op1
= currentInstruction
[1].u
.operand
;
191 unsigned op2
= currentInstruction
[2].u
.operand
;
192 unsigned target
= currentInstruction
[3].u
.operand
;
194 if (!supportsFloatingPoint()) {
195 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
196 linkSlowCase(iter
); // int32 check
197 linkSlowCase(iter
); // int32 check
199 if (!isOperandConstantImmediateInt(op1
)) {
200 linkSlowCase(iter
); // double check
201 linkSlowCase(iter
); // int32 check
203 if (isOperandConstantImmediateInt(op1
) || !isOperandConstantImmediateInt(op2
))
204 linkSlowCase(iter
); // double check
207 JITStubCall
stubCall(this, cti_op_jlesseq
);
208 stubCall
.addArgument(op1
);
209 stubCall
.addArgument(op2
);
211 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
+ 3);
216 void JIT::emit_op_lshift(Instruction
* currentInstruction
)
218 unsigned dst
= currentInstruction
[1].u
.operand
;
219 unsigned op1
= currentInstruction
[2].u
.operand
;
220 unsigned op2
= currentInstruction
[3].u
.operand
;
222 if (isOperandConstantImmediateInt(op2
)) {
223 emitLoad(op1
, regT1
, regT0
);
224 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
225 lshift32(Imm32(getConstantOperand(op2
).asInt32()), regT0
);
226 emitStoreInt32(dst
, regT0
, dst
== op1
);
230 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
231 if (!isOperandConstantImmediateInt(op1
))
232 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
233 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
234 lshift32(regT2
, regT0
);
235 emitStoreInt32(dst
, regT0
, dst
== op1
|| dst
== op2
);
238 void JIT::emitSlow_op_lshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
240 unsigned dst
= currentInstruction
[1].u
.operand
;
241 unsigned op1
= currentInstruction
[2].u
.operand
;
242 unsigned op2
= currentInstruction
[3].u
.operand
;
244 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
245 linkSlowCase(iter
); // int32 check
246 linkSlowCase(iter
); // int32 check
248 JITStubCall
stubCall(this, cti_op_lshift
);
249 stubCall
.addArgument(op1
);
250 stubCall
.addArgument(op2
);
256 void JIT::emit_op_rshift(Instruction
* currentInstruction
)
258 unsigned dst
= currentInstruction
[1].u
.operand
;
259 unsigned op1
= currentInstruction
[2].u
.operand
;
260 unsigned op2
= currentInstruction
[3].u
.operand
;
262 if (isOperandConstantImmediateInt(op2
)) {
263 emitLoad(op1
, regT1
, regT0
);
264 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
265 rshift32(Imm32(getConstantOperand(op2
).asInt32()), regT0
);
266 emitStoreInt32(dst
, regT0
, dst
== op1
);
270 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
271 if (!isOperandConstantImmediateInt(op1
))
272 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
273 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
274 rshift32(regT2
, regT0
);
275 emitStoreInt32(dst
, regT0
, dst
== op1
|| dst
== op2
);
278 void JIT::emitSlow_op_rshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
280 unsigned dst
= currentInstruction
[1].u
.operand
;
281 unsigned op1
= currentInstruction
[2].u
.operand
;
282 unsigned op2
= currentInstruction
[3].u
.operand
;
284 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
285 linkSlowCase(iter
); // int32 check
286 linkSlowCase(iter
); // int32 check
288 JITStubCall
stubCall(this, cti_op_rshift
);
289 stubCall
.addArgument(op1
);
290 stubCall
.addArgument(op2
);
296 void JIT::emit_op_bitand(Instruction
* currentInstruction
)
298 unsigned dst
= currentInstruction
[1].u
.operand
;
299 unsigned op1
= currentInstruction
[2].u
.operand
;
300 unsigned op2
= currentInstruction
[3].u
.operand
;
304 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
305 emitLoad(op
, regT1
, regT0
);
306 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
307 and32(Imm32(constant
), regT0
);
308 emitStoreInt32(dst
, regT0
, (op
== dst
));
312 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
313 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
314 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
316 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
319 void JIT::emitSlow_op_bitand(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
321 unsigned dst
= currentInstruction
[1].u
.operand
;
322 unsigned op1
= currentInstruction
[2].u
.operand
;
323 unsigned op2
= currentInstruction
[3].u
.operand
;
325 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
326 linkSlowCase(iter
); // int32 check
327 linkSlowCase(iter
); // int32 check
329 JITStubCall
stubCall(this, cti_op_bitand
);
330 stubCall
.addArgument(op1
);
331 stubCall
.addArgument(op2
);
337 void JIT::emit_op_bitor(Instruction
* currentInstruction
)
339 unsigned dst
= currentInstruction
[1].u
.operand
;
340 unsigned op1
= currentInstruction
[2].u
.operand
;
341 unsigned op2
= currentInstruction
[3].u
.operand
;
345 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
346 emitLoad(op
, regT1
, regT0
);
347 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
348 or32(Imm32(constant
), regT0
);
349 emitStoreInt32(dst
, regT0
, (op
== dst
));
353 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
354 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
355 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
357 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
360 void JIT::emitSlow_op_bitor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
362 unsigned dst
= currentInstruction
[1].u
.operand
;
363 unsigned op1
= currentInstruction
[2].u
.operand
;
364 unsigned op2
= currentInstruction
[3].u
.operand
;
366 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
367 linkSlowCase(iter
); // int32 check
368 linkSlowCase(iter
); // int32 check
370 JITStubCall
stubCall(this, cti_op_bitor
);
371 stubCall
.addArgument(op1
);
372 stubCall
.addArgument(op2
);
378 void JIT::emit_op_bitxor(Instruction
* currentInstruction
)
380 unsigned dst
= currentInstruction
[1].u
.operand
;
381 unsigned op1
= currentInstruction
[2].u
.operand
;
382 unsigned op2
= currentInstruction
[3].u
.operand
;
386 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
387 emitLoad(op
, regT1
, regT0
);
388 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
389 xor32(Imm32(constant
), regT0
);
390 emitStoreInt32(dst
, regT0
, (op
== dst
));
394 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
395 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
396 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
398 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
401 void JIT::emitSlow_op_bitxor(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
403 unsigned dst
= currentInstruction
[1].u
.operand
;
404 unsigned op1
= currentInstruction
[2].u
.operand
;
405 unsigned op2
= currentInstruction
[3].u
.operand
;
407 if (!isOperandConstantImmediateInt(op1
) && !isOperandConstantImmediateInt(op2
))
408 linkSlowCase(iter
); // int32 check
409 linkSlowCase(iter
); // int32 check
411 JITStubCall
stubCall(this, cti_op_bitxor
);
412 stubCall
.addArgument(op1
);
413 stubCall
.addArgument(op2
);
419 void JIT::emit_op_bitnot(Instruction
* currentInstruction
)
421 unsigned dst
= currentInstruction
[1].u
.operand
;
422 unsigned src
= currentInstruction
[2].u
.operand
;
424 emitLoad(src
, regT1
, regT0
);
425 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
428 emitStoreInt32(dst
, regT0
, (dst
== src
));
431 void JIT::emitSlow_op_bitnot(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
433 unsigned dst
= currentInstruction
[1].u
.operand
;
435 linkSlowCase(iter
); // int32 check
437 JITStubCall
stubCall(this, cti_op_bitnot
);
438 stubCall
.addArgument(regT1
, regT0
);
444 void JIT::emit_op_post_inc(Instruction
* currentInstruction
)
446 unsigned dst
= currentInstruction
[1].u
.operand
;
447 unsigned srcDst
= currentInstruction
[2].u
.operand
;
449 emitLoad(srcDst
, regT1
, regT0
);
450 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
452 if (dst
== srcDst
) // x = x++ is a noop for ints.
455 emitStoreInt32(dst
, regT0
);
457 addSlowCase(branchAdd32(Overflow
, Imm32(1), regT0
));
458 emitStoreInt32(srcDst
, regT0
, true);
461 void JIT::emitSlow_op_post_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
463 unsigned dst
= currentInstruction
[1].u
.operand
;
464 unsigned srcDst
= currentInstruction
[2].u
.operand
;
466 linkSlowCase(iter
); // int32 check
468 linkSlowCase(iter
); // overflow check
470 JITStubCall
stubCall(this, cti_op_post_inc
);
471 stubCall
.addArgument(srcDst
);
472 stubCall
.addArgument(Imm32(srcDst
));
478 void JIT::emit_op_post_dec(Instruction
* currentInstruction
)
480 unsigned dst
= currentInstruction
[1].u
.operand
;
481 unsigned srcDst
= currentInstruction
[2].u
.operand
;
483 emitLoad(srcDst
, regT1
, regT0
);
484 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
486 if (dst
== srcDst
) // x = x-- is a noop for ints.
489 emitStoreInt32(dst
, regT0
);
491 addSlowCase(branchSub32(Overflow
, Imm32(1), regT0
));
492 emitStoreInt32(srcDst
, regT0
, true);
495 void JIT::emitSlow_op_post_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
497 unsigned dst
= currentInstruction
[1].u
.operand
;
498 unsigned srcDst
= currentInstruction
[2].u
.operand
;
500 linkSlowCase(iter
); // int32 check
502 linkSlowCase(iter
); // overflow check
504 JITStubCall
stubCall(this, cti_op_post_dec
);
505 stubCall
.addArgument(srcDst
);
506 stubCall
.addArgument(Imm32(srcDst
));
512 void JIT::emit_op_pre_inc(Instruction
* currentInstruction
)
514 unsigned srcDst
= currentInstruction
[1].u
.operand
;
516 emitLoad(srcDst
, regT1
, regT0
);
518 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
519 addSlowCase(branchAdd32(Overflow
, Imm32(1), regT0
));
520 emitStoreInt32(srcDst
, regT0
, true);
523 void JIT::emitSlow_op_pre_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
525 unsigned srcDst
= currentInstruction
[1].u
.operand
;
527 linkSlowCase(iter
); // int32 check
528 linkSlowCase(iter
); // overflow check
530 JITStubCall
stubCall(this, cti_op_pre_inc
);
531 stubCall
.addArgument(srcDst
);
532 stubCall
.call(srcDst
);
537 void JIT::emit_op_pre_dec(Instruction
* currentInstruction
)
539 unsigned srcDst
= currentInstruction
[1].u
.operand
;
541 emitLoad(srcDst
, regT1
, regT0
);
543 addSlowCase(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
544 addSlowCase(branchSub32(Overflow
, Imm32(1), regT0
));
545 emitStoreInt32(srcDst
, regT0
, true);
548 void JIT::emitSlow_op_pre_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
550 unsigned srcDst
= currentInstruction
[1].u
.operand
;
552 linkSlowCase(iter
); // int32 check
553 linkSlowCase(iter
); // overflow check
555 JITStubCall
stubCall(this, cti_op_pre_dec
);
556 stubCall
.addArgument(srcDst
);
557 stubCall
.call(srcDst
);
562 void JIT::emit_op_add(Instruction
* currentInstruction
)
564 unsigned dst
= currentInstruction
[1].u
.operand
;
565 unsigned op1
= currentInstruction
[2].u
.operand
;
566 unsigned op2
= currentInstruction
[3].u
.operand
;
567 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
569 JumpList notInt32Op1
;
570 JumpList notInt32Op2
;
574 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
575 emitAdd32Constant(dst
, op
, constant
, op
== op1
? types
.first() : types
.second());
579 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
580 notInt32Op1
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
581 notInt32Op2
.append(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
584 addSlowCase(branchAdd32(Overflow
, regT2
, regT0
));
585 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
587 if (!supportsFloatingPoint()) {
588 addSlowCase(notInt32Op1
);
589 addSlowCase(notInt32Op2
);
595 emitBinaryDoubleOp(op_add
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
599 void JIT::emitAdd32Constant(unsigned dst
, unsigned op
, int32_t constant
, ResultType opType
)
602 emitLoad(op
, regT1
, regT0
);
603 Jump notInt32
= branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
));
604 addSlowCase(branchAdd32(Overflow
, Imm32(constant
), regT0
));
605 emitStoreInt32(dst
, regT0
, (op
== dst
));
608 if (!supportsFloatingPoint()) {
609 addSlowCase(notInt32
);
615 if (!opType
.definitelyIsNumber())
616 addSlowCase(branch32(Above
, regT1
, Imm32(JSValue::LowestTag
)));
617 move(Imm32(constant
), regT2
);
618 convertInt32ToDouble(regT2
, fpRegT0
);
619 emitLoadDouble(op
, fpRegT1
);
620 addDouble(fpRegT1
, fpRegT0
);
621 emitStoreDouble(dst
, fpRegT0
);
626 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
628 unsigned dst
= currentInstruction
[1].u
.operand
;
629 unsigned op1
= currentInstruction
[2].u
.operand
;
630 unsigned op2
= currentInstruction
[3].u
.operand
;
631 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
635 if (getOperandConstantImmediateInt(op1
, op2
, op
, constant
)) {
636 linkSlowCase(iter
); // overflow check
638 if (!supportsFloatingPoint())
639 linkSlowCase(iter
); // non-sse case
641 ResultType opType
= op
== op1
? types
.first() : types
.second();
642 if (!opType
.definitelyIsNumber())
643 linkSlowCase(iter
); // double check
646 linkSlowCase(iter
); // overflow check
648 if (!supportsFloatingPoint()) {
649 linkSlowCase(iter
); // int32 check
650 linkSlowCase(iter
); // int32 check
652 if (!types
.first().definitelyIsNumber())
653 linkSlowCase(iter
); // double check
655 if (!types
.second().definitelyIsNumber()) {
656 linkSlowCase(iter
); // int32 check
657 linkSlowCase(iter
); // double check
662 JITStubCall
stubCall(this, cti_op_add
);
663 stubCall
.addArgument(op1
);
664 stubCall
.addArgument(op2
);
670 void JIT::emit_op_sub(Instruction
* currentInstruction
)
672 unsigned dst
= currentInstruction
[1].u
.operand
;
673 unsigned op1
= currentInstruction
[2].u
.operand
;
674 unsigned op2
= currentInstruction
[3].u
.operand
;
675 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
677 JumpList notInt32Op1
;
678 JumpList notInt32Op2
;
680 if (isOperandConstantImmediateInt(op2
)) {
681 emitSub32Constant(dst
, op1
, getConstantOperand(op2
).asInt32(), types
.first());
685 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
686 notInt32Op1
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
687 notInt32Op2
.append(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
690 addSlowCase(branchSub32(Overflow
, regT2
, regT0
));
691 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
693 if (!supportsFloatingPoint()) {
694 addSlowCase(notInt32Op1
);
695 addSlowCase(notInt32Op2
);
701 emitBinaryDoubleOp(op_sub
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
705 void JIT::emitSub32Constant(unsigned dst
, unsigned op
, int32_t constant
, ResultType opType
)
708 emitLoad(op
, regT1
, regT0
);
709 Jump notInt32
= branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
));
710 addSlowCase(branchSub32(Overflow
, Imm32(constant
), regT0
));
711 emitStoreInt32(dst
, regT0
, (op
== dst
));
714 if (!supportsFloatingPoint()) {
715 addSlowCase(notInt32
);
721 if (!opType
.definitelyIsNumber())
722 addSlowCase(branch32(Above
, regT1
, Imm32(JSValue::LowestTag
)));
723 move(Imm32(constant
), regT2
);
724 convertInt32ToDouble(regT2
, fpRegT0
);
725 emitLoadDouble(op
, fpRegT1
);
726 subDouble(fpRegT0
, fpRegT1
);
727 emitStoreDouble(dst
, fpRegT1
);
732 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
734 unsigned dst
= currentInstruction
[1].u
.operand
;
735 unsigned op1
= currentInstruction
[2].u
.operand
;
736 unsigned op2
= currentInstruction
[3].u
.operand
;
737 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
739 if (isOperandConstantImmediateInt(op2
)) {
740 linkSlowCase(iter
); // overflow check
742 if (!supportsFloatingPoint() || !types
.first().definitelyIsNumber())
743 linkSlowCase(iter
); // int32 or double check
745 linkSlowCase(iter
); // overflow check
747 if (!supportsFloatingPoint()) {
748 linkSlowCase(iter
); // int32 check
749 linkSlowCase(iter
); // int32 check
751 if (!types
.first().definitelyIsNumber())
752 linkSlowCase(iter
); // double check
754 if (!types
.second().definitelyIsNumber()) {
755 linkSlowCase(iter
); // int32 check
756 linkSlowCase(iter
); // double check
761 JITStubCall
stubCall(this, cti_op_sub
);
762 stubCall
.addArgument(op1
);
763 stubCall
.addArgument(op2
);
767 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID
, unsigned dst
, unsigned op1
, unsigned op2
, OperandTypes types
, JumpList
& notInt32Op1
, JumpList
& notInt32Op2
, bool op1IsInRegisters
, bool op2IsInRegisters
)
771 if (!notInt32Op1
.empty()) {
772 // Double case 1: Op1 is not int32; Op2 is unknown.
773 notInt32Op1
.link(this);
775 ASSERT(op1IsInRegisters
);
777 // Verify Op1 is double.
778 if (!types
.first().definitelyIsNumber())
779 addSlowCase(branch32(Above
, regT1
, Imm32(JSValue::LowestTag
)));
781 if (!op2IsInRegisters
)
782 emitLoad(op2
, regT3
, regT2
);
784 Jump doubleOp2
= branch32(Below
, regT3
, Imm32(JSValue::LowestTag
));
786 if (!types
.second().definitelyIsNumber())
787 addSlowCase(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
789 convertInt32ToDouble(regT2
, fpRegT0
);
790 Jump doTheMath
= jump();
792 // Load Op2 as double into double register.
793 doubleOp2
.link(this);
794 emitLoadDouble(op2
, fpRegT0
);
797 doTheMath
.link(this);
800 emitLoadDouble(op1
, fpRegT2
);
801 mulDouble(fpRegT2
, fpRegT0
);
802 emitStoreDouble(dst
, fpRegT0
);
805 emitLoadDouble(op1
, fpRegT2
);
806 addDouble(fpRegT2
, fpRegT0
);
807 emitStoreDouble(dst
, fpRegT0
);
810 emitLoadDouble(op1
, fpRegT1
);
811 subDouble(fpRegT0
, fpRegT1
);
812 emitStoreDouble(dst
, fpRegT1
);
815 emitLoadDouble(op1
, fpRegT1
);
816 divDouble(fpRegT0
, fpRegT1
);
817 emitStoreDouble(dst
, fpRegT1
);
820 emitLoadDouble(op1
, fpRegT2
);
821 addJump(branchDouble(DoubleLessThanOrEqual
, fpRegT0
, fpRegT2
), dst
+ 3);
824 emitLoadDouble(op1
, fpRegT2
);
825 addJump(branchDouble(DoubleLessThan
, fpRegT0
, fpRegT2
), dst
+ 3);
828 ASSERT_NOT_REACHED();
831 if (!notInt32Op2
.empty())
835 if (!notInt32Op2
.empty()) {
836 // Double case 2: Op1 is int32; Op2 is not int32.
837 notInt32Op2
.link(this);
839 ASSERT(op2IsInRegisters
);
841 if (!op1IsInRegisters
)
842 emitLoadPayload(op1
, regT0
);
844 convertInt32ToDouble(regT0
, fpRegT0
);
846 // Verify op2 is double.
847 if (!types
.second().definitelyIsNumber())
848 addSlowCase(branch32(Above
, regT3
, Imm32(JSValue::LowestTag
)));
853 emitLoadDouble(op2
, fpRegT2
);
854 mulDouble(fpRegT2
, fpRegT0
);
855 emitStoreDouble(dst
, fpRegT0
);
858 emitLoadDouble(op2
, fpRegT2
);
859 addDouble(fpRegT2
, fpRegT0
);
860 emitStoreDouble(dst
, fpRegT0
);
863 emitLoadDouble(op2
, fpRegT2
);
864 subDouble(fpRegT2
, fpRegT0
);
865 emitStoreDouble(dst
, fpRegT0
);
868 emitLoadDouble(op2
, fpRegT2
);
869 divDouble(fpRegT2
, fpRegT0
);
870 emitStoreDouble(dst
, fpRegT0
);
873 emitLoadDouble(op2
, fpRegT1
);
874 addJump(branchDouble(DoubleLessThanOrEqual
, fpRegT1
, fpRegT0
), dst
+ 3);
877 emitLoadDouble(op2
, fpRegT1
);
878 addJump(branchDouble(DoubleLessThan
, fpRegT1
, fpRegT0
), dst
+ 3);
881 ASSERT_NOT_REACHED();
888 // Multiplication (*)
890 void JIT::emit_op_mul(Instruction
* currentInstruction
)
892 unsigned dst
= currentInstruction
[1].u
.operand
;
893 unsigned op1
= currentInstruction
[2].u
.operand
;
894 unsigned op2
= currentInstruction
[3].u
.operand
;
895 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
897 JumpList notInt32Op1
;
898 JumpList notInt32Op2
;
900 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
901 notInt32Op1
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
902 notInt32Op2
.append(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
906 addSlowCase(branchMul32(Overflow
, regT2
, regT0
));
907 addSlowCase(branchTest32(Zero
, regT0
));
908 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
910 if (!supportsFloatingPoint()) {
911 addSlowCase(notInt32Op1
);
912 addSlowCase(notInt32Op2
);
918 emitBinaryDoubleOp(op_mul
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
922 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
924 unsigned dst
= currentInstruction
[1].u
.operand
;
925 unsigned op1
= currentInstruction
[2].u
.operand
;
926 unsigned op2
= currentInstruction
[3].u
.operand
;
927 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
929 Jump overflow
= getSlowCase(iter
); // overflow check
930 linkSlowCase(iter
); // zero result check
932 Jump negZero
= branchOr32(Signed
, regT2
, regT3
);
933 emitStoreInt32(dst
, Imm32(0), (op1
== dst
|| op2
== dst
));
935 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul
));
940 if (!supportsFloatingPoint()) {
941 linkSlowCase(iter
); // int32 check
942 linkSlowCase(iter
); // int32 check
945 if (supportsFloatingPoint()) {
946 if (!types
.first().definitelyIsNumber())
947 linkSlowCase(iter
); // double check
949 if (!types
.second().definitelyIsNumber()) {
950 linkSlowCase(iter
); // int32 check
951 linkSlowCase(iter
); // double check
955 Label
jitStubCall(this);
956 JITStubCall
stubCall(this, cti_op_mul
);
957 stubCall
.addArgument(op1
);
958 stubCall
.addArgument(op2
);
964 void JIT::emit_op_div(Instruction
* currentInstruction
)
966 unsigned dst
= currentInstruction
[1].u
.operand
;
967 unsigned op1
= currentInstruction
[2].u
.operand
;
968 unsigned op2
= currentInstruction
[3].u
.operand
;
969 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
971 if (!supportsFloatingPoint()) {
977 JumpList notInt32Op1
;
978 JumpList notInt32Op2
;
982 emitLoad2(op1
, regT1
, regT0
, op2
, regT3
, regT2
);
984 notInt32Op1
.append(branch32(NotEqual
, regT1
, Imm32(JSValue::Int32Tag
)));
985 notInt32Op2
.append(branch32(NotEqual
, regT3
, Imm32(JSValue::Int32Tag
)));
987 convertInt32ToDouble(regT0
, fpRegT0
);
988 convertInt32ToDouble(regT2
, fpRegT1
);
989 divDouble(fpRegT1
, fpRegT0
);
991 JumpList doubleResult
;
992 if (!isOperandConstantImmediateInt(op1
) || getConstantOperand(op1
).asInt32() > 1) {
993 m_assembler
.cvttsd2si_rr(fpRegT0
, regT0
);
994 convertInt32ToDouble(regT0
, fpRegT1
);
995 m_assembler
.ucomisd_rr(fpRegT1
, fpRegT0
);
997 doubleResult
.append(m_assembler
.jne());
998 doubleResult
.append(m_assembler
.jp());
1000 doubleResult
.append(branchTest32(Zero
, regT0
));
1003 emitStoreInt32(dst
, regT0
, (op1
== dst
|| op2
== dst
));
1008 doubleResult
.link(this);
1009 emitStoreDouble(dst
, fpRegT0
);
1013 emitBinaryDoubleOp(op_div
, dst
, op1
, op2
, types
, notInt32Op1
, notInt32Op2
);
1017 void JIT::emitSlow_op_div(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1019 unsigned dst
= currentInstruction
[1].u
.operand
;
1020 unsigned op1
= currentInstruction
[2].u
.operand
;
1021 unsigned op2
= currentInstruction
[3].u
.operand
;
1022 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1024 if (!supportsFloatingPoint())
1027 if (!types
.first().definitelyIsNumber())
1028 linkSlowCase(iter
); // double check
1030 if (!types
.second().definitelyIsNumber()) {
1031 linkSlowCase(iter
); // int32 check
1032 linkSlowCase(iter
); // double check
1036 JITStubCall
stubCall(this, cti_op_div
);
1037 stubCall
.addArgument(op1
);
1038 stubCall
.addArgument(op2
);
1044 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1046 #if PLATFORM(X86) || PLATFORM(X86_64)
1048 void JIT::emit_op_mod(Instruction
* currentInstruction
)
1050 unsigned dst
= currentInstruction
[1].u
.operand
;
1051 unsigned op1
= currentInstruction
[2].u
.operand
;
1052 unsigned op2
= currentInstruction
[3].u
.operand
;
1054 if (isOperandConstantImmediateInt(op2
) && getConstantOperand(op2
).asInt32() != 0) {
1055 emitLoad(op1
, X86::edx
, X86::eax
);
1056 move(Imm32(getConstantOperand(op2
).asInt32()), X86::ecx
);
1057 addSlowCase(branch32(NotEqual
, X86::edx
, Imm32(JSValue::Int32Tag
)));
1058 if (getConstantOperand(op2
).asInt32() == -1)
1059 addSlowCase(branch32(Equal
, X86::eax
, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1061 emitLoad2(op1
, X86::edx
, X86::eax
, op2
, X86::ebx
, X86::ecx
);
1062 addSlowCase(branch32(NotEqual
, X86::edx
, Imm32(JSValue::Int32Tag
)));
1063 addSlowCase(branch32(NotEqual
, X86::ebx
, Imm32(JSValue::Int32Tag
)));
1065 addSlowCase(branch32(Equal
, X86::eax
, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1066 addSlowCase(branch32(Equal
, X86::ecx
, Imm32(0))); // divide by 0
1069 move(X86::eax
, X86::ebx
); // Save dividend payload, in case of 0.
1071 m_assembler
.idivl_r(X86::ecx
);
1073 // If the remainder is zero and the dividend is negative, the result is -0.
1074 Jump storeResult1
= branchTest32(NonZero
, X86::edx
);
1075 Jump storeResult2
= branchTest32(Zero
, X86::ebx
, Imm32(0x80000000)); // not negative
1076 emitStore(dst
, jsNumber(m_globalData
, -0.0));
1079 storeResult1
.link(this);
1080 storeResult2
.link(this);
1081 emitStoreInt32(dst
, X86::edx
, (op1
== dst
|| op2
== dst
));
1085 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1087 unsigned dst
= currentInstruction
[1].u
.operand
;
1088 unsigned op1
= currentInstruction
[2].u
.operand
;
1089 unsigned op2
= currentInstruction
[3].u
.operand
;
1091 if (isOperandConstantImmediateInt(op2
) && getConstantOperand(op2
).asInt32() != 0) {
1092 linkSlowCase(iter
); // int32 check
1093 if (getConstantOperand(op2
).asInt32() == -1)
1094 linkSlowCase(iter
); // 0x80000000 check
1096 linkSlowCase(iter
); // int32 check
1097 linkSlowCase(iter
); // int32 check
1098 linkSlowCase(iter
); // 0 check
1099 linkSlowCase(iter
); // 0x80000000 check
1102 JITStubCall
stubCall(this, cti_op_mod
);
1103 stubCall
.addArgument(op1
);
1104 stubCall
.addArgument(op2
);
1108 #else // PLATFORM(X86) || PLATFORM(X86_64)
1110 void JIT::emit_op_mod(Instruction
* currentInstruction
)
1112 unsigned dst
= currentInstruction
[1].u
.operand
;
1113 unsigned op1
= currentInstruction
[2].u
.operand
;
1114 unsigned op2
= currentInstruction
[3].u
.operand
;
1116 JITStubCall
stubCall(this, cti_op_mod
);
1117 stubCall
.addArgument(op1
);
1118 stubCall
.addArgument(op2
);
1122 void JIT::emitSlow_op_mod(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
1126 #endif // PLATFORM(X86) || PLATFORM(X86_64)
1128 /* ------------------------------ END: OP_MOD ------------------------------ */
1130 #else // USE(JSVALUE32_64)
1132 void JIT::emit_op_lshift(Instruction
* currentInstruction
)
1134 unsigned result
= currentInstruction
[1].u
.operand
;
1135 unsigned op1
= currentInstruction
[2].u
.operand
;
1136 unsigned op2
= currentInstruction
[3].u
.operand
;
1138 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
1139 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
1140 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1141 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
1142 emitFastArithImmToInt(regT0
);
1143 emitFastArithImmToInt(regT2
);
1145 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
1146 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
1147 and32(Imm32(0x1f), regT2
);
1149 lshift32(regT2
, regT0
);
1151 addSlowCase(branchAdd32(Overflow
, regT0
, regT0
));
1152 signExtend32ToPtr(regT0
, regT0
);
1154 emitFastArithReTagImmediate(regT0
, regT0
);
1155 emitPutVirtualRegister(result
);
1158 void JIT::emitSlow_op_lshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1160 unsigned result
= currentInstruction
[1].u
.operand
;
1161 unsigned op1
= currentInstruction
[2].u
.operand
;
1162 unsigned op2
= currentInstruction
[3].u
.operand
;
1170 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
1171 Jump notImm1
= getSlowCase(iter
);
1172 Jump notImm2
= getSlowCase(iter
);
1174 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
1178 JITStubCall
stubCall(this, cti_op_lshift
);
1179 stubCall
.addArgument(regT0
);
1180 stubCall
.addArgument(regT2
);
1181 stubCall
.call(result
);
1184 void JIT::emit_op_rshift(Instruction
* currentInstruction
)
1186 unsigned result
= currentInstruction
[1].u
.operand
;
1187 unsigned op1
= currentInstruction
[2].u
.operand
;
1188 unsigned op2
= currentInstruction
[3].u
.operand
;
1190 if (isOperandConstantImmediateInt(op2
)) {
1191 // isOperandConstantImmediateInt(op2) => 1 SlowCase
1192 emitGetVirtualRegister(op1
, regT0
);
1193 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1194 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
1196 rshift32(Imm32(getConstantOperandImmediateInt(op2
) & 0x1f), regT0
);
1198 rshiftPtr(Imm32(getConstantOperandImmediateInt(op2
) & 0x1f), regT0
);
1201 emitGetVirtualRegisters(op1
, regT0
, op2
, regT2
);
1202 if (supportsFloatingPointTruncate()) {
1203 Jump lhsIsInt
= emitJumpIfImmediateInteger(regT0
);
1205 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
1206 addSlowCase(emitJumpIfNotImmediateNumber(regT0
));
1207 addPtr(tagTypeNumberRegister
, regT0
);
1208 movePtrToDouble(regT0
, fpRegT0
);
1209 addSlowCase(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
1211 // supportsFloatingPoint() && !USE(JSVALUE64) => 5 SlowCases (of which 1 IfNotJSCell)
1212 emitJumpSlowCaseIfNotJSCell(regT0
, op1
);
1213 addSlowCase(checkStructure(regT0
, m_globalData
->numberStructure
.get()));
1214 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
1215 addSlowCase(branchTruncateDoubleToInt32(fpRegT0
, regT0
));
1216 addSlowCase(branchAdd32(Overflow
, regT0
, regT0
));
1218 lhsIsInt
.link(this);
1219 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
1221 // !supportsFloatingPoint() => 2 SlowCases
1222 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1223 emitJumpSlowCaseIfNotImmediateInteger(regT2
);
1225 emitFastArithImmToInt(regT2
);
1227 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
1228 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
1229 and32(Imm32(0x1f), regT2
);
1232 rshift32(regT2
, regT0
);
1234 rshiftPtr(regT2
, regT0
);
1238 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1240 orPtr(Imm32(JSImmediate::TagTypeNumber
), regT0
);
1242 emitPutVirtualRegister(result
);
1245 void JIT::emitSlow_op_rshift(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1247 unsigned result
= currentInstruction
[1].u
.operand
;
1248 unsigned op1
= currentInstruction
[2].u
.operand
;
1249 unsigned op2
= currentInstruction
[3].u
.operand
;
1251 JITStubCall
stubCall(this, cti_op_rshift
);
1253 if (isOperandConstantImmediateInt(op2
)) {
1255 stubCall
.addArgument(regT0
);
1256 stubCall
.addArgument(op2
, regT2
);
1258 if (supportsFloatingPointTruncate()) {
1264 linkSlowCaseIfNotJSCell(iter
, op1
);
1270 // We're reloading op1 to regT0 as we can no longer guarantee that
1271 // we have not munged the operand. It may have already been shifted
1272 // correctly, but it still will not have been tagged.
1273 stubCall
.addArgument(op1
, regT0
);
1274 stubCall
.addArgument(regT2
);
1278 stubCall
.addArgument(regT0
);
1279 stubCall
.addArgument(regT2
);
1283 stubCall
.call(result
);
1286 void JIT::emit_op_jnless(Instruction
* currentInstruction
)
1288 unsigned op1
= currentInstruction
[1].u
.operand
;
1289 unsigned op2
= currentInstruction
[2].u
.operand
;
1290 unsigned target
= currentInstruction
[3].u
.operand
;
1292 // We generate inline code for the following cases in the fast path:
1293 // - int immediate to constant int immediate
1294 // - constant int immediate to int immediate
1295 // - int immediate to int immediate
1297 if (isOperandConstantImmediateInt(op2
)) {
1298 emitGetVirtualRegister(op1
, regT0
);
1299 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1301 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
1303 int32_t op2imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)));
1305 addJump(branch32(GreaterThanOrEqual
, regT0
, Imm32(op2imm
)), target
+ 3);
1306 } else if (isOperandConstantImmediateInt(op1
)) {
1307 emitGetVirtualRegister(op2
, regT1
);
1308 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1310 int32_t op1imm
= getConstantOperandImmediateInt(op1
);
1312 int32_t op1imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1
)));
1314 addJump(branch32(LessThanOrEqual
, regT1
, Imm32(op1imm
)), target
+ 3);
1316 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
1317 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1318 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1320 addJump(branch32(GreaterThanOrEqual
, regT0
, regT1
), target
+ 3);
1324 void JIT::emitSlow_op_jnless(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1326 unsigned op1
= currentInstruction
[1].u
.operand
;
1327 unsigned op2
= currentInstruction
[2].u
.operand
;
1328 unsigned target
= currentInstruction
[3].u
.operand
;
1330 // We generate inline code for the following cases in the slow path:
1331 // - floating-point number to constant int immediate
1332 // - constant int immediate to floating-point number
1333 // - floating-point number to floating-point number.
1335 if (isOperandConstantImmediateInt(op2
)) {
1338 if (supportsFloatingPoint()) {
1340 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
1341 addPtr(tagTypeNumberRegister
, regT0
);
1342 movePtrToDouble(regT0
, fpRegT0
);
1345 if (!m_codeBlock
->isKnownNotImmediate(op1
))
1346 fail1
= emitJumpIfNotJSCell(regT0
);
1348 Jump fail2
= checkStructure(regT0
, m_globalData
->numberStructure
.get());
1349 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
1352 int32_t op2imm
= getConstantOperand(op2
).asInt32();;
1354 move(Imm32(op2imm
), regT1
);
1355 convertInt32ToDouble(regT1
, fpRegT1
);
1357 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual
, fpRegT1
, fpRegT0
), target
+ 3);
1359 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
1364 if (!m_codeBlock
->isKnownNotImmediate(op1
))
1370 JITStubCall
stubCall(this, cti_op_jless
);
1371 stubCall
.addArgument(regT0
);
1372 stubCall
.addArgument(op2
, regT2
);
1374 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
+ 3);
1376 } else if (isOperandConstantImmediateInt(op1
)) {
1379 if (supportsFloatingPoint()) {
1381 Jump fail1
= emitJumpIfNotImmediateNumber(regT1
);
1382 addPtr(tagTypeNumberRegister
, regT1
);
1383 movePtrToDouble(regT1
, fpRegT1
);
1386 if (!m_codeBlock
->isKnownNotImmediate(op2
))
1387 fail1
= emitJumpIfNotJSCell(regT1
);
1389 Jump fail2
= checkStructure(regT1
, m_globalData
->numberStructure
.get());
1390 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
1393 int32_t op1imm
= getConstantOperand(op1
).asInt32();;
1395 move(Imm32(op1imm
), regT0
);
1396 convertInt32ToDouble(regT0
, fpRegT0
);
1398 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual
, fpRegT1
, fpRegT0
), target
+ 3);
1400 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
1405 if (!m_codeBlock
->isKnownNotImmediate(op2
))
1411 JITStubCall
stubCall(this, cti_op_jless
);
1412 stubCall
.addArgument(op1
, regT2
);
1413 stubCall
.addArgument(regT1
);
1415 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
+ 3);
1420 if (supportsFloatingPoint()) {
1422 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
1423 Jump fail2
= emitJumpIfNotImmediateNumber(regT1
);
1424 Jump fail3
= emitJumpIfImmediateInteger(regT1
);
1425 addPtr(tagTypeNumberRegister
, regT0
);
1426 addPtr(tagTypeNumberRegister
, regT1
);
1427 movePtrToDouble(regT0
, fpRegT0
);
1428 movePtrToDouble(regT1
, fpRegT1
);
1431 if (!m_codeBlock
->isKnownNotImmediate(op1
))
1432 fail1
= emitJumpIfNotJSCell(regT0
);
1435 if (!m_codeBlock
->isKnownNotImmediate(op2
))
1436 fail2
= emitJumpIfNotJSCell(regT1
);
1438 Jump fail3
= checkStructure(regT0
, m_globalData
->numberStructure
.get());
1439 Jump fail4
= checkStructure(regT1
, m_globalData
->numberStructure
.get());
1440 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
1441 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
1444 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual
, fpRegT1
, fpRegT0
), target
+ 3);
1446 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless
));
1453 if (!m_codeBlock
->isKnownNotImmediate(op1
))
1455 if (!m_codeBlock
->isKnownNotImmediate(op2
))
1463 JITStubCall
stubCall(this, cti_op_jless
);
1464 stubCall
.addArgument(regT0
);
1465 stubCall
.addArgument(regT1
);
1467 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
+ 3);
1471 void JIT::emit_op_jnlesseq(Instruction
* currentInstruction
)
1473 unsigned op1
= currentInstruction
[1].u
.operand
;
1474 unsigned op2
= currentInstruction
[2].u
.operand
;
1475 unsigned target
= currentInstruction
[3].u
.operand
;
1477 // We generate inline code for the following cases in the fast path:
1478 // - int immediate to constant int immediate
1479 // - constant int immediate to int immediate
1480 // - int immediate to int immediate
1482 if (isOperandConstantImmediateInt(op2
)) {
1483 emitGetVirtualRegister(op1
, regT0
);
1484 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1486 int32_t op2imm
= getConstantOperandImmediateInt(op2
);
1488 int32_t op2imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)));
1490 addJump(branch32(GreaterThan
, regT0
, Imm32(op2imm
)), target
+ 3);
1491 } else if (isOperandConstantImmediateInt(op1
)) {
1492 emitGetVirtualRegister(op2
, regT1
);
1493 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1495 int32_t op1imm
= getConstantOperandImmediateInt(op1
);
1497 int32_t op1imm
= static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1
)));
1499 addJump(branch32(LessThan
, regT1
, Imm32(op1imm
)), target
+ 3);
1501 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
1502 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1503 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1505 addJump(branch32(GreaterThan
, regT0
, regT1
), target
+ 3);
1509 void JIT::emitSlow_op_jnlesseq(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1511 unsigned op1
= currentInstruction
[1].u
.operand
;
1512 unsigned op2
= currentInstruction
[2].u
.operand
;
1513 unsigned target
= currentInstruction
[3].u
.operand
;
1515 // We generate inline code for the following cases in the slow path:
1516 // - floating-point number to constant int immediate
1517 // - constant int immediate to floating-point number
1518 // - floating-point number to floating-point number.
1520 if (isOperandConstantImmediateInt(op2
)) {
1523 if (supportsFloatingPoint()) {
1525 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
1526 addPtr(tagTypeNumberRegister
, regT0
);
1527 movePtrToDouble(regT0
, fpRegT0
);
1530 if (!m_codeBlock
->isKnownNotImmediate(op1
))
1531 fail1
= emitJumpIfNotJSCell(regT0
);
1533 Jump fail2
= checkStructure(regT0
, m_globalData
->numberStructure
.get());
1534 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
1537 int32_t op2imm
= getConstantOperand(op2
).asInt32();;
1539 move(Imm32(op2imm
), regT1
);
1540 convertInt32ToDouble(regT1
, fpRegT1
);
1542 emitJumpSlowToHot(branchDouble(DoubleLessThan
, fpRegT1
, fpRegT0
), target
+ 3);
1544 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq
));
1549 if (!m_codeBlock
->isKnownNotImmediate(op1
))
1555 JITStubCall
stubCall(this, cti_op_jlesseq
);
1556 stubCall
.addArgument(regT0
);
1557 stubCall
.addArgument(op2
, regT2
);
1559 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
+ 3);
1561 } else if (isOperandConstantImmediateInt(op1
)) {
1564 if (supportsFloatingPoint()) {
1566 Jump fail1
= emitJumpIfNotImmediateNumber(regT1
);
1567 addPtr(tagTypeNumberRegister
, regT1
);
1568 movePtrToDouble(regT1
, fpRegT1
);
1571 if (!m_codeBlock
->isKnownNotImmediate(op2
))
1572 fail1
= emitJumpIfNotJSCell(regT1
);
1574 Jump fail2
= checkStructure(regT1
, m_globalData
->numberStructure
.get());
1575 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
1578 int32_t op1imm
= getConstantOperand(op1
).asInt32();;
1580 move(Imm32(op1imm
), regT0
);
1581 convertInt32ToDouble(regT0
, fpRegT0
);
1583 emitJumpSlowToHot(branchDouble(DoubleLessThan
, fpRegT1
, fpRegT0
), target
+ 3);
1585 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq
));
1590 if (!m_codeBlock
->isKnownNotImmediate(op2
))
1596 JITStubCall
stubCall(this, cti_op_jlesseq
);
1597 stubCall
.addArgument(op1
, regT2
);
1598 stubCall
.addArgument(regT1
);
1600 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
+ 3);
1605 if (supportsFloatingPoint()) {
1607 Jump fail1
= emitJumpIfNotImmediateNumber(regT0
);
1608 Jump fail2
= emitJumpIfNotImmediateNumber(regT1
);
1609 Jump fail3
= emitJumpIfImmediateInteger(regT1
);
1610 addPtr(tagTypeNumberRegister
, regT0
);
1611 addPtr(tagTypeNumberRegister
, regT1
);
1612 movePtrToDouble(regT0
, fpRegT0
);
1613 movePtrToDouble(regT1
, fpRegT1
);
1616 if (!m_codeBlock
->isKnownNotImmediate(op1
))
1617 fail1
= emitJumpIfNotJSCell(regT0
);
1620 if (!m_codeBlock
->isKnownNotImmediate(op2
))
1621 fail2
= emitJumpIfNotJSCell(regT1
);
1623 Jump fail3
= checkStructure(regT0
, m_globalData
->numberStructure
.get());
1624 Jump fail4
= checkStructure(regT1
, m_globalData
->numberStructure
.get());
1625 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
1626 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
1629 emitJumpSlowToHot(branchDouble(DoubleLessThan
, fpRegT1
, fpRegT0
), target
+ 3);
1631 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq
));
1638 if (!m_codeBlock
->isKnownNotImmediate(op1
))
1640 if (!m_codeBlock
->isKnownNotImmediate(op2
))
1648 JITStubCall
stubCall(this, cti_op_jlesseq
);
1649 stubCall
.addArgument(regT0
);
1650 stubCall
.addArgument(regT1
);
1652 emitJumpSlowToHot(branchTest32(Zero
, regT0
), target
+ 3);
1656 void JIT::emit_op_bitand(Instruction
* currentInstruction
)
1658 unsigned result
= currentInstruction
[1].u
.operand
;
1659 unsigned op1
= currentInstruction
[2].u
.operand
;
1660 unsigned op2
= currentInstruction
[3].u
.operand
;
1662 if (isOperandConstantImmediateInt(op1
)) {
1663 emitGetVirtualRegister(op2
, regT0
);
1664 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1666 int32_t imm
= getConstantOperandImmediateInt(op1
);
1667 andPtr(Imm32(imm
), regT0
);
1669 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1671 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1
)))), regT0
);
1673 } else if (isOperandConstantImmediateInt(op2
)) {
1674 emitGetVirtualRegister(op1
, regT0
);
1675 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1677 int32_t imm
= getConstantOperandImmediateInt(op2
);
1678 andPtr(Imm32(imm
), regT0
);
1680 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1682 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2
)))), regT0
);
1685 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
1686 andPtr(regT1
, regT0
);
1687 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1689 emitPutVirtualRegister(result
);
1692 void JIT::emitSlow_op_bitand(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1694 unsigned result
= currentInstruction
[1].u
.operand
;
1695 unsigned op1
= currentInstruction
[2].u
.operand
;
1696 unsigned op2
= currentInstruction
[3].u
.operand
;
1699 if (isOperandConstantImmediateInt(op1
)) {
1700 JITStubCall
stubCall(this, cti_op_bitand
);
1701 stubCall
.addArgument(op1
, regT2
);
1702 stubCall
.addArgument(regT0
);
1703 stubCall
.call(result
);
1704 } else if (isOperandConstantImmediateInt(op2
)) {
1705 JITStubCall
stubCall(this, cti_op_bitand
);
1706 stubCall
.addArgument(regT0
);
1707 stubCall
.addArgument(op2
, regT2
);
1708 stubCall
.call(result
);
1710 JITStubCall
stubCall(this, cti_op_bitand
);
1711 stubCall
.addArgument(op1
, regT2
);
1712 stubCall
.addArgument(regT1
);
1713 stubCall
.call(result
);
1717 void JIT::emit_op_post_inc(Instruction
* currentInstruction
)
1719 unsigned result
= currentInstruction
[1].u
.operand
;
1720 unsigned srcDst
= currentInstruction
[2].u
.operand
;
1722 emitGetVirtualRegister(srcDst
, regT0
);
1724 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1726 addSlowCase(branchAdd32(Overflow
, Imm32(1), regT1
));
1727 emitFastArithIntToImmNoCheck(regT1
, regT1
);
1729 addSlowCase(branchAdd32(Overflow
, Imm32(1 << JSImmediate::IntegerPayloadShift
), regT1
));
1730 signExtend32ToPtr(regT1
, regT1
);
1732 emitPutVirtualRegister(srcDst
, regT1
);
1733 emitPutVirtualRegister(result
);
1736 void JIT::emitSlow_op_post_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1738 unsigned result
= currentInstruction
[1].u
.operand
;
1739 unsigned srcDst
= currentInstruction
[2].u
.operand
;
1743 JITStubCall
stubCall(this, cti_op_post_inc
);
1744 stubCall
.addArgument(regT0
);
1745 stubCall
.addArgument(Imm32(srcDst
));
1746 stubCall
.call(result
);
1749 void JIT::emit_op_post_dec(Instruction
* currentInstruction
)
1751 unsigned result
= currentInstruction
[1].u
.operand
;
1752 unsigned srcDst
= currentInstruction
[2].u
.operand
;
1754 emitGetVirtualRegister(srcDst
, regT0
);
1756 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1758 addSlowCase(branchSub32(Zero
, Imm32(1), regT1
));
1759 emitFastArithIntToImmNoCheck(regT1
, regT1
);
1761 addSlowCase(branchSub32(Zero
, Imm32(1 << JSImmediate::IntegerPayloadShift
), regT1
));
1762 signExtend32ToPtr(regT1
, regT1
);
1764 emitPutVirtualRegister(srcDst
, regT1
);
1765 emitPutVirtualRegister(result
);
1768 void JIT::emitSlow_op_post_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1770 unsigned result
= currentInstruction
[1].u
.operand
;
1771 unsigned srcDst
= currentInstruction
[2].u
.operand
;
1775 JITStubCall
stubCall(this, cti_op_post_dec
);
1776 stubCall
.addArgument(regT0
);
1777 stubCall
.addArgument(Imm32(srcDst
));
1778 stubCall
.call(result
);
1781 void JIT::emit_op_pre_inc(Instruction
* currentInstruction
)
1783 unsigned srcDst
= currentInstruction
[1].u
.operand
;
1785 emitGetVirtualRegister(srcDst
, regT0
);
1786 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1788 addSlowCase(branchAdd32(Overflow
, Imm32(1), regT0
));
1789 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1791 addSlowCase(branchAdd32(Overflow
, Imm32(1 << JSImmediate::IntegerPayloadShift
), regT0
));
1792 signExtend32ToPtr(regT0
, regT0
);
1794 emitPutVirtualRegister(srcDst
);
1797 void JIT::emitSlow_op_pre_inc(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1799 unsigned srcDst
= currentInstruction
[1].u
.operand
;
1801 Jump notImm
= getSlowCase(iter
);
1803 emitGetVirtualRegister(srcDst
, regT0
);
1805 JITStubCall
stubCall(this, cti_op_pre_inc
);
1806 stubCall
.addArgument(regT0
);
1807 stubCall
.call(srcDst
);
1810 void JIT::emit_op_pre_dec(Instruction
* currentInstruction
)
1812 unsigned srcDst
= currentInstruction
[1].u
.operand
;
1814 emitGetVirtualRegister(srcDst
, regT0
);
1815 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1817 addSlowCase(branchSub32(Zero
, Imm32(1), regT0
));
1818 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1820 addSlowCase(branchSub32(Zero
, Imm32(1 << JSImmediate::IntegerPayloadShift
), regT0
));
1821 signExtend32ToPtr(regT0
, regT0
);
1823 emitPutVirtualRegister(srcDst
);
1826 void JIT::emitSlow_op_pre_dec(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1828 unsigned srcDst
= currentInstruction
[1].u
.operand
;
1830 Jump notImm
= getSlowCase(iter
);
1832 emitGetVirtualRegister(srcDst
, regT0
);
1834 JITStubCall
stubCall(this, cti_op_pre_dec
);
1835 stubCall
.addArgument(regT0
);
1836 stubCall
.call(srcDst
);
1839 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1841 #if PLATFORM(X86) || PLATFORM(X86_64)
1843 void JIT::emit_op_mod(Instruction
* currentInstruction
)
1845 unsigned result
= currentInstruction
[1].u
.operand
;
1846 unsigned op1
= currentInstruction
[2].u
.operand
;
1847 unsigned op2
= currentInstruction
[3].u
.operand
;
1849 emitGetVirtualRegisters(op1
, X86::eax
, op2
, X86::ecx
);
1850 emitJumpSlowCaseIfNotImmediateInteger(X86::eax
);
1851 emitJumpSlowCaseIfNotImmediateInteger(X86::ecx
);
1853 addSlowCase(branchPtr(Equal
, X86::ecx
, ImmPtr(JSValue::encode(jsNumber(m_globalData
, 0)))));
1855 m_assembler
.idivl_r(X86::ecx
);
1857 emitFastArithDeTagImmediate(X86::eax
);
1858 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx
));
1860 m_assembler
.idivl_r(X86::ecx
);
1861 signExtend32ToPtr(X86::edx
, X86::edx
);
1863 emitFastArithReTagImmediate(X86::edx
, X86::eax
);
1864 emitPutVirtualRegister(result
);
1867 void JIT::emitSlow_op_mod(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
1869 unsigned result
= currentInstruction
[1].u
.operand
;
1876 Jump notImm1
= getSlowCase(iter
);
1877 Jump notImm2
= getSlowCase(iter
);
1879 emitFastArithReTagImmediate(X86::eax
, X86::eax
);
1880 emitFastArithReTagImmediate(X86::ecx
, X86::ecx
);
1884 JITStubCall
stubCall(this, cti_op_mod
);
1885 stubCall
.addArgument(X86::eax
);
1886 stubCall
.addArgument(X86::ecx
);
1887 stubCall
.call(result
);
1890 #else // PLATFORM(X86) || PLATFORM(X86_64)
1892 void JIT::emit_op_mod(Instruction
* currentInstruction
)
1894 unsigned result
= currentInstruction
[1].u
.operand
;
1895 unsigned op1
= currentInstruction
[2].u
.operand
;
1896 unsigned op2
= currentInstruction
[3].u
.operand
;
1898 JITStubCall
stubCall(this, cti_op_mod
);
1899 stubCall
.addArgument(op1
, regT2
);
1900 stubCall
.addArgument(op2
, regT2
);
1901 stubCall
.call(result
);
1904 void JIT::emitSlow_op_mod(Instruction
*, Vector
<SlowCaseEntry
>::iterator
&)
1906 ASSERT_NOT_REACHED();
1909 #endif // PLATFORM(X86) || PLATFORM(X86_64)
1911 /* ------------------------------ END: OP_MOD ------------------------------ */
1915 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
1917 void JIT::compileBinaryArithOp(OpcodeID opcodeID
, unsigned, unsigned op1
, unsigned op2
, OperandTypes
)
1919 emitGetVirtualRegisters(op1
, regT0
, op2
, regT1
);
1920 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
1921 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
1922 if (opcodeID
== op_add
)
1923 addSlowCase(branchAdd32(Overflow
, regT1
, regT0
));
1924 else if (opcodeID
== op_sub
)
1925 addSlowCase(branchSub32(Overflow
, regT1
, regT0
));
1927 ASSERT(opcodeID
== op_mul
);
1928 addSlowCase(branchMul32(Overflow
, regT1
, regT0
));
1929 addSlowCase(branchTest32(Zero
, regT0
));
1931 emitFastArithIntToImmNoCheck(regT0
, regT0
);
1934 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID
, Vector
<SlowCaseEntry
>::iterator
& iter
, unsigned result
, unsigned op1
, unsigned, OperandTypes types
)
1936 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
1937 COMPILE_ASSERT(((JSImmediate::TagTypeNumber
+ JSImmediate::DoubleEncodeOffset
) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0
);
1939 Jump notImm1
= getSlowCase(iter
);
1940 Jump notImm2
= getSlowCase(iter
);
1942 linkSlowCase(iter
); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
1943 if (opcodeID
== op_mul
) // op_mul has an extra slow case to handle 0 * negative number.
1945 emitGetVirtualRegister(op1
, regT0
);
1947 Label
stubFunctionCall(this);
1948 JITStubCall
stubCall(this, opcodeID
== op_add
? cti_op_add
: opcodeID
== op_sub
? cti_op_sub
: cti_op_mul
);
1949 stubCall
.addArgument(regT0
);
1950 stubCall
.addArgument(regT1
);
1951 stubCall
.call(result
);
1954 // if we get here, eax is not an int32, edx not yet checked.
1956 if (!types
.first().definitelyIsNumber())
1957 emitJumpIfNotImmediateNumber(regT0
).linkTo(stubFunctionCall
, this);
1958 if (!types
.second().definitelyIsNumber())
1959 emitJumpIfNotImmediateNumber(regT1
).linkTo(stubFunctionCall
, this);
1960 addPtr(tagTypeNumberRegister
, regT0
);
1961 movePtrToDouble(regT0
, fpRegT1
);
1962 Jump op2isDouble
= emitJumpIfNotImmediateInteger(regT1
);
1963 convertInt32ToDouble(regT1
, fpRegT2
);
1964 Jump op2wasInteger
= jump();
1966 // if we get here, eax IS an int32, edx is not.
1968 if (!types
.second().definitelyIsNumber())
1969 emitJumpIfNotImmediateNumber(regT1
).linkTo(stubFunctionCall
, this);
1970 convertInt32ToDouble(regT0
, fpRegT1
);
1971 op2isDouble
.link(this);
1972 addPtr(tagTypeNumberRegister
, regT1
);
1973 movePtrToDouble(regT1
, fpRegT2
);
1974 op2wasInteger
.link(this);
1976 if (opcodeID
== op_add
)
1977 addDouble(fpRegT2
, fpRegT1
);
1978 else if (opcodeID
== op_sub
)
1979 subDouble(fpRegT2
, fpRegT1
);
1981 ASSERT(opcodeID
== op_mul
);
1982 mulDouble(fpRegT2
, fpRegT1
);
1984 moveDoubleToPtr(fpRegT1
, regT0
);
1985 subPtr(tagTypeNumberRegister
, regT0
);
1986 emitPutVirtualRegister(result
, regT0
);
1991 void JIT::emit_op_add(Instruction
* currentInstruction
)
1993 unsigned result
= currentInstruction
[1].u
.operand
;
1994 unsigned op1
= currentInstruction
[2].u
.operand
;
1995 unsigned op2
= currentInstruction
[3].u
.operand
;
1996 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
1998 if (!types
.first().mightBeNumber() || !types
.second().mightBeNumber()) {
1999 JITStubCall
stubCall(this, cti_op_add
);
2000 stubCall
.addArgument(op1
, regT2
);
2001 stubCall
.addArgument(op2
, regT2
);
2002 stubCall
.call(result
);
2006 if (isOperandConstantImmediateInt(op1
)) {
2007 emitGetVirtualRegister(op2
, regT0
);
2008 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
2009 addSlowCase(branchAdd32(Overflow
, Imm32(getConstantOperandImmediateInt(op1
)), regT0
));
2010 emitFastArithIntToImmNoCheck(regT0
, regT0
);
2011 } else if (isOperandConstantImmediateInt(op2
)) {
2012 emitGetVirtualRegister(op1
, regT0
);
2013 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
2014 addSlowCase(branchAdd32(Overflow
, Imm32(getConstantOperandImmediateInt(op2
)), regT0
));
2015 emitFastArithIntToImmNoCheck(regT0
, regT0
);
2017 compileBinaryArithOp(op_add
, result
, op1
, op2
, types
);
2019 emitPutVirtualRegister(result
);
2022 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2024 unsigned result
= currentInstruction
[1].u
.operand
;
2025 unsigned op1
= currentInstruction
[2].u
.operand
;
2026 unsigned op2
= currentInstruction
[3].u
.operand
;
2028 if (isOperandConstantImmediateInt(op1
) || isOperandConstantImmediateInt(op2
)) {
2031 JITStubCall
stubCall(this, cti_op_add
);
2032 stubCall
.addArgument(op1
, regT2
);
2033 stubCall
.addArgument(op2
, regT2
);
2034 stubCall
.call(result
);
2036 compileBinaryArithOpSlowCase(op_add
, iter
, result
, op1
, op2
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
));
2039 void JIT::emit_op_mul(Instruction
* currentInstruction
)
2041 unsigned result
= currentInstruction
[1].u
.operand
;
2042 unsigned op1
= currentInstruction
[2].u
.operand
;
2043 unsigned op2
= currentInstruction
[3].u
.operand
;
2044 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
2046 // For now, only plant a fast int case if the constant operand is greater than zero.
2048 if (isOperandConstantImmediateInt(op1
) && ((value
= getConstantOperandImmediateInt(op1
)) > 0)) {
2049 emitGetVirtualRegister(op2
, regT0
);
2050 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
2051 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT0
));
2052 emitFastArithReTagImmediate(regT0
, regT0
);
2053 } else if (isOperandConstantImmediateInt(op2
) && ((value
= getConstantOperandImmediateInt(op2
)) > 0)) {
2054 emitGetVirtualRegister(op1
, regT0
);
2055 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
2056 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT0
));
2057 emitFastArithReTagImmediate(regT0
, regT0
);
2059 compileBinaryArithOp(op_mul
, result
, op1
, op2
, types
);
2061 emitPutVirtualRegister(result
);
2064 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2066 unsigned result
= currentInstruction
[1].u
.operand
;
2067 unsigned op1
= currentInstruction
[2].u
.operand
;
2068 unsigned op2
= currentInstruction
[3].u
.operand
;
2069 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
2071 if ((isOperandConstantImmediateInt(op1
) && (getConstantOperandImmediateInt(op1
) > 0))
2072 || (isOperandConstantImmediateInt(op2
) && (getConstantOperandImmediateInt(op2
) > 0))) {
2075 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2076 JITStubCall
stubCall(this, cti_op_mul
);
2077 stubCall
.addArgument(op1
, regT2
);
2078 stubCall
.addArgument(op2
, regT2
);
2079 stubCall
.call(result
);
2081 compileBinaryArithOpSlowCase(op_mul
, iter
, result
, op1
, op2
, types
);
2084 void JIT::emit_op_sub(Instruction
* currentInstruction
)
2086 unsigned result
= currentInstruction
[1].u
.operand
;
2087 unsigned op1
= currentInstruction
[2].u
.operand
;
2088 unsigned op2
= currentInstruction
[3].u
.operand
;
2089 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
2091 compileBinaryArithOp(op_sub
, result
, op1
, op2
, types
);
2093 emitPutVirtualRegister(result
);
2096 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2098 unsigned result
= currentInstruction
[1].u
.operand
;
2099 unsigned op1
= currentInstruction
[2].u
.operand
;
2100 unsigned op2
= currentInstruction
[3].u
.operand
;
2101 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
2103 compileBinaryArithOpSlowCase(op_sub
, iter
, result
, op1
, op2
, types
);
2106 #else // USE(JSVALUE64)
2108 /* ------------------------------ BEGIN: !USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
2110 void JIT::compileBinaryArithOp(OpcodeID opcodeID
, unsigned dst
, unsigned src1
, unsigned src2
, OperandTypes types
)
2112 Structure
* numberStructure
= m_globalData
->numberStructure
.get();
2113 Jump wasJSNumberCell1
;
2114 Jump wasJSNumberCell2
;
2116 emitGetVirtualRegisters(src1
, regT0
, src2
, regT1
);
2118 if (types
.second().isReusable() && supportsFloatingPoint()) {
2119 ASSERT(types
.second().mightBeNumber());
2121 // Check op2 is a number
2122 Jump op2imm
= emitJumpIfImmediateInteger(regT1
);
2123 if (!types
.second().definitelyIsNumber()) {
2124 emitJumpSlowCaseIfNotJSCell(regT1
, src2
);
2125 addSlowCase(checkStructure(regT1
, numberStructure
));
2128 // (1) In this case src2 is a reusable number cell.
2129 // Slow case if src1 is not a number type.
2130 Jump op1imm
= emitJumpIfImmediateInteger(regT0
);
2131 if (!types
.first().definitelyIsNumber()) {
2132 emitJumpSlowCaseIfNotJSCell(regT0
, src1
);
2133 addSlowCase(checkStructure(regT0
, numberStructure
));
2136 // (1a) if we get here, src1 is also a number cell
2137 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
2138 Jump loadedDouble
= jump();
2139 // (1b) if we get here, src1 is an immediate
2141 emitFastArithImmToInt(regT0
);
2142 convertInt32ToDouble(regT0
, fpRegT0
);
2144 loadedDouble
.link(this);
2145 if (opcodeID
== op_add
)
2146 addDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
2147 else if (opcodeID
== op_sub
)
2148 subDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
2150 ASSERT(opcodeID
== op_mul
);
2151 mulDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
2154 // Store the result to the JSNumberCell and jump.
2155 storeDouble(fpRegT0
, Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)));
2157 emitPutVirtualRegister(dst
);
2158 wasJSNumberCell2
= jump();
2160 // (2) This handles cases where src2 is an immediate number.
2161 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
2163 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
2164 } else if (types
.first().isReusable() && supportsFloatingPoint()) {
2165 ASSERT(types
.first().mightBeNumber());
2167 // Check op1 is a number
2168 Jump op1imm
= emitJumpIfImmediateInteger(regT0
);
2169 if (!types
.first().definitelyIsNumber()) {
2170 emitJumpSlowCaseIfNotJSCell(regT0
, src1
);
2171 addSlowCase(checkStructure(regT0
, numberStructure
));
2174 // (1) In this case src1 is a reusable number cell.
2175 // Slow case if src2 is not a number type.
2176 Jump op2imm
= emitJumpIfImmediateInteger(regT1
);
2177 if (!types
.second().definitelyIsNumber()) {
2178 emitJumpSlowCaseIfNotJSCell(regT1
, src2
);
2179 addSlowCase(checkStructure(regT1
, numberStructure
));
2182 // (1a) if we get here, src2 is also a number cell
2183 loadDouble(Address(regT1
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT1
);
2184 Jump loadedDouble
= jump();
2185 // (1b) if we get here, src2 is an immediate
2187 emitFastArithImmToInt(regT1
);
2188 convertInt32ToDouble(regT1
, fpRegT1
);
2190 loadedDouble
.link(this);
2191 loadDouble(Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)), fpRegT0
);
2192 if (opcodeID
== op_add
)
2193 addDouble(fpRegT1
, fpRegT0
);
2194 else if (opcodeID
== op_sub
)
2195 subDouble(fpRegT1
, fpRegT0
);
2197 ASSERT(opcodeID
== op_mul
);
2198 mulDouble(fpRegT1
, fpRegT0
);
2200 storeDouble(fpRegT0
, Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)));
2201 emitPutVirtualRegister(dst
);
2203 // Store the result to the JSNumberCell and jump.
2204 storeDouble(fpRegT0
, Address(regT0
, OBJECT_OFFSETOF(JSNumberCell
, m_value
)));
2205 emitPutVirtualRegister(dst
);
2206 wasJSNumberCell1
= jump();
2208 // (2) This handles cases where src1 is an immediate number.
2209 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
2211 emitJumpSlowCaseIfNotImmediateInteger(regT1
);
2213 emitJumpSlowCaseIfNotImmediateIntegers(regT0
, regT1
, regT2
);
2215 if (opcodeID
== op_add
) {
2216 emitFastArithDeTagImmediate(regT0
);
2217 addSlowCase(branchAdd32(Overflow
, regT1
, regT0
));
2218 } else if (opcodeID
== op_sub
) {
2219 addSlowCase(branchSub32(Overflow
, regT1
, regT0
));
2220 signExtend32ToPtr(regT0
, regT0
);
2221 emitFastArithReTagImmediate(regT0
, regT0
);
2223 ASSERT(opcodeID
== op_mul
);
2224 // convert eax & edx from JSImmediates to ints, and check if either are zero
2225 emitFastArithImmToInt(regT1
);
2226 Jump op1Zero
= emitFastArithDeTagImmediateJumpIfZero(regT0
);
2227 Jump op2NonZero
= branchTest32(NonZero
, regT1
);
2229 // if either input is zero, add the two together, and check if the result is < 0.
2230 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
2232 addSlowCase(branchAdd32(Signed
, regT1
, regT2
));
2233 // Skip the above check if neither input is zero
2234 op2NonZero
.link(this);
2235 addSlowCase(branchMul32(Overflow
, regT1
, regT0
));
2236 signExtend32ToPtr(regT0
, regT0
);
2237 emitFastArithReTagImmediate(regT0
, regT0
);
2239 emitPutVirtualRegister(dst
);
2241 if (types
.second().isReusable() && supportsFloatingPoint())
2242 wasJSNumberCell2
.link(this);
2243 else if (types
.first().isReusable() && supportsFloatingPoint())
2244 wasJSNumberCell1
.link(this);
2247 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID
, Vector
<SlowCaseEntry
>::iterator
& iter
, unsigned dst
, unsigned src1
, unsigned src2
, OperandTypes types
)
2250 if (types
.second().isReusable() && supportsFloatingPoint()) {
2251 if (!types
.first().definitelyIsNumber()) {
2252 linkSlowCaseIfNotJSCell(iter
, src1
);
2255 if (!types
.second().definitelyIsNumber()) {
2256 linkSlowCaseIfNotJSCell(iter
, src2
);
2259 } else if (types
.first().isReusable() && supportsFloatingPoint()) {
2260 if (!types
.first().definitelyIsNumber()) {
2261 linkSlowCaseIfNotJSCell(iter
, src1
);
2264 if (!types
.second().definitelyIsNumber()) {
2265 linkSlowCaseIfNotJSCell(iter
, src2
);
2271 // additional entry point to handle -0 cases.
2272 if (opcodeID
== op_mul
)
2275 JITStubCall
stubCall(this, opcodeID
== op_add
? cti_op_add
: opcodeID
== op_sub
? cti_op_sub
: cti_op_mul
);
2276 stubCall
.addArgument(src1
, regT2
);
2277 stubCall
.addArgument(src2
, regT2
);
2281 void JIT::emit_op_add(Instruction
* currentInstruction
)
2283 unsigned result
= currentInstruction
[1].u
.operand
;
2284 unsigned op1
= currentInstruction
[2].u
.operand
;
2285 unsigned op2
= currentInstruction
[3].u
.operand
;
2287 if (isOperandConstantImmediateInt(op1
)) {
2288 emitGetVirtualRegister(op2
, regT0
);
2289 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
2290 addSlowCase(branchAdd32(Overflow
, Imm32(getConstantOperandImmediateInt(op1
) << JSImmediate::IntegerPayloadShift
), regT0
));
2291 signExtend32ToPtr(regT0
, regT0
);
2292 emitPutVirtualRegister(result
);
2293 } else if (isOperandConstantImmediateInt(op2
)) {
2294 emitGetVirtualRegister(op1
, regT0
);
2295 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
2296 addSlowCase(branchAdd32(Overflow
, Imm32(getConstantOperandImmediateInt(op2
) << JSImmediate::IntegerPayloadShift
), regT0
));
2297 signExtend32ToPtr(regT0
, regT0
);
2298 emitPutVirtualRegister(result
);
2300 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
2301 if (types
.first().mightBeNumber() && types
.second().mightBeNumber())
2302 compileBinaryArithOp(op_add
, result
, op1
, op2
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
));
2304 JITStubCall
stubCall(this, cti_op_add
);
2305 stubCall
.addArgument(op1
, regT2
);
2306 stubCall
.addArgument(op2
, regT2
);
2307 stubCall
.call(result
);
2312 void JIT::emitSlow_op_add(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2314 unsigned result
= currentInstruction
[1].u
.operand
;
2315 unsigned op1
= currentInstruction
[2].u
.operand
;
2316 unsigned op2
= currentInstruction
[3].u
.operand
;
2318 if (isOperandConstantImmediateInt(op1
)) {
2319 Jump notImm
= getSlowCase(iter
);
2321 sub32(Imm32(getConstantOperandImmediateInt(op1
) << JSImmediate::IntegerPayloadShift
), regT0
);
2323 JITStubCall
stubCall(this, cti_op_add
);
2324 stubCall
.addArgument(op1
, regT2
);
2325 stubCall
.addArgument(regT0
);
2326 stubCall
.call(result
);
2327 } else if (isOperandConstantImmediateInt(op2
)) {
2328 Jump notImm
= getSlowCase(iter
);
2330 sub32(Imm32(getConstantOperandImmediateInt(op2
) << JSImmediate::IntegerPayloadShift
), regT0
);
2332 JITStubCall
stubCall(this, cti_op_add
);
2333 stubCall
.addArgument(regT0
);
2334 stubCall
.addArgument(op2
, regT2
);
2335 stubCall
.call(result
);
2337 OperandTypes types
= OperandTypes::fromInt(currentInstruction
[4].u
.operand
);
2338 ASSERT(types
.first().mightBeNumber() && types
.second().mightBeNumber());
2339 compileBinaryArithOpSlowCase(op_add
, iter
, result
, op1
, op2
, types
);
2343 void JIT::emit_op_mul(Instruction
* currentInstruction
)
2345 unsigned result
= currentInstruction
[1].u
.operand
;
2346 unsigned op1
= currentInstruction
[2].u
.operand
;
2347 unsigned op2
= currentInstruction
[3].u
.operand
;
2349 // For now, only plant a fast int case if the constant operand is greater than zero.
2351 if (isOperandConstantImmediateInt(op1
) && ((value
= getConstantOperandImmediateInt(op1
)) > 0)) {
2352 emitGetVirtualRegister(op2
, regT0
);
2353 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
2354 emitFastArithDeTagImmediate(regT0
);
2355 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT0
));
2356 signExtend32ToPtr(regT0
, regT0
);
2357 emitFastArithReTagImmediate(regT0
, regT0
);
2358 emitPutVirtualRegister(result
);
2359 } else if (isOperandConstantImmediateInt(op2
) && ((value
= getConstantOperandImmediateInt(op2
)) > 0)) {
2360 emitGetVirtualRegister(op1
, regT0
);
2361 emitJumpSlowCaseIfNotImmediateInteger(regT0
);
2362 emitFastArithDeTagImmediate(regT0
);
2363 addSlowCase(branchMul32(Overflow
, Imm32(value
), regT0
, regT0
));
2364 signExtend32ToPtr(regT0
, regT0
);
2365 emitFastArithReTagImmediate(regT0
, regT0
);
2366 emitPutVirtualRegister(result
);
2368 compileBinaryArithOp(op_mul
, result
, op1
, op2
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
));
2371 void JIT::emitSlow_op_mul(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2373 unsigned result
= currentInstruction
[1].u
.operand
;
2374 unsigned op1
= currentInstruction
[2].u
.operand
;
2375 unsigned op2
= currentInstruction
[3].u
.operand
;
2377 if ((isOperandConstantImmediateInt(op1
) && (getConstantOperandImmediateInt(op1
) > 0))
2378 || (isOperandConstantImmediateInt(op2
) && (getConstantOperandImmediateInt(op2
) > 0))) {
2381 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2382 JITStubCall
stubCall(this, cti_op_mul
);
2383 stubCall
.addArgument(op1
, regT2
);
2384 stubCall
.addArgument(op2
, regT2
);
2385 stubCall
.call(result
);
2387 compileBinaryArithOpSlowCase(op_mul
, iter
, result
, op1
, op2
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
));
2390 void JIT::emit_op_sub(Instruction
* currentInstruction
)
2392 compileBinaryArithOp(op_sub
, currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
));
2395 void JIT::emitSlow_op_sub(Instruction
* currentInstruction
, Vector
<SlowCaseEntry
>::iterator
& iter
)
2397 compileBinaryArithOpSlowCase(op_sub
, iter
, currentInstruction
[1].u
.operand
, currentInstruction
[2].u
.operand
, currentInstruction
[3].u
.operand
, OperandTypes::fromInt(currentInstruction
[4].u
.operand
));
2400 #endif // USE(JSVALUE64)
2402 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
2404 #endif // USE(JSVALUE32_64)
2408 #endif // ENABLE(JIT)