]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITArithmetic.cpp
JavaScriptCore-554.1.tar.gz
[apple/javascriptcore.git] / jit / JITArithmetic.cpp
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "JIT.h"
28
29 #if ENABLE(JIT)
30
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
33 #include "JITStubCall.h"
34 #include "JSArray.h"
35 #include "JSFunction.h"
36 #include "Interpreter.h"
37 #include "ResultType.h"
38 #include "SamplingTool.h"
39
40 #ifndef NDEBUG
41 #include <stdio.h>
42 #endif
43
44 using namespace std;
45
46 namespace JSC {
47
48 #if USE(JSVALUE32_64)
49
50 void JIT::emit_op_negate(Instruction* currentInstruction)
51 {
52 unsigned dst = currentInstruction[1].u.operand;
53 unsigned src = currentInstruction[2].u.operand;
54
55 emitLoad(src, regT1, regT0);
56
57 Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
58 addSlowCase(branch32(Equal, regT0, Imm32(0)));
59
60 neg32(regT0);
61 emitStoreInt32(dst, regT0, (dst == src));
62
63 Jump end = jump();
64
65 srcNotInt.link(this);
66 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
67
68 xor32(Imm32(1 << 31), regT1);
69 store32(regT1, tagFor(dst));
70 if (dst != src)
71 store32(regT0, payloadFor(dst));
72
73 end.link(this);
74 }
75
76 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
77 {
78 unsigned dst = currentInstruction[1].u.operand;
79
80 linkSlowCase(iter); // 0 check
81 linkSlowCase(iter); // double check
82
83 JITStubCall stubCall(this, cti_op_negate);
84 stubCall.addArgument(regT1, regT0);
85 stubCall.call(dst);
86 }
87
88 void JIT::emit_op_jnless(Instruction* currentInstruction)
89 {
90 unsigned op1 = currentInstruction[1].u.operand;
91 unsigned op2 = currentInstruction[2].u.operand;
92 unsigned target = currentInstruction[3].u.operand;
93
94 JumpList notInt32Op1;
95 JumpList notInt32Op2;
96
97 // Int32 less.
98 if (isOperandConstantImmediateInt(op1)) {
99 emitLoad(op2, regT3, regT2);
100 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
101 addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target + 3);
102 } else if (isOperandConstantImmediateInt(op2)) {
103 emitLoad(op1, regT1, regT0);
104 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
105 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
106 } else {
107 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
108 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
109 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
110 addJump(branch32(GreaterThanOrEqual, regT0, regT2), target + 3);
111 }
112
113 if (!supportsFloatingPoint()) {
114 addSlowCase(notInt32Op1);
115 addSlowCase(notInt32Op2);
116 return;
117 }
118 Jump end = jump();
119
120 // Double less.
121 emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
122 end.link(this);
123 }
124
125 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
126 {
127 unsigned op1 = currentInstruction[1].u.operand;
128 unsigned op2 = currentInstruction[2].u.operand;
129 unsigned target = currentInstruction[3].u.operand;
130
131 if (!supportsFloatingPoint()) {
132 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
133 linkSlowCase(iter); // int32 check
134 linkSlowCase(iter); // int32 check
135 } else {
136 if (!isOperandConstantImmediateInt(op1)) {
137 linkSlowCase(iter); // double check
138 linkSlowCase(iter); // int32 check
139 }
140 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
141 linkSlowCase(iter); // double check
142 }
143
144 JITStubCall stubCall(this, cti_op_jless);
145 stubCall.addArgument(op1);
146 stubCall.addArgument(op2);
147 stubCall.call();
148 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
149 }
150
151 void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
152 {
153 unsigned op1 = currentInstruction[1].u.operand;
154 unsigned op2 = currentInstruction[2].u.operand;
155 unsigned target = currentInstruction[3].u.operand;
156
157 JumpList notInt32Op1;
158 JumpList notInt32Op2;
159
160 // Int32 less.
161 if (isOperandConstantImmediateInt(op1)) {
162 emitLoad(op2, regT3, regT2);
163 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
164 addJump(branch32(LessThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target + 3);
165 } else if (isOperandConstantImmediateInt(op2)) {
166 emitLoad(op1, regT1, regT0);
167 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
168 addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target + 3);
169 } else {
170 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
171 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
172 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
173 addJump(branch32(GreaterThan, regT0, regT2), target + 3);
174 }
175
176 if (!supportsFloatingPoint()) {
177 addSlowCase(notInt32Op1);
178 addSlowCase(notInt32Op2);
179 return;
180 }
181 Jump end = jump();
182
183 // Double less.
184 emitBinaryDoubleOp(op_jnlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
185 end.link(this);
186 }
187
188 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
189 {
190 unsigned op1 = currentInstruction[1].u.operand;
191 unsigned op2 = currentInstruction[2].u.operand;
192 unsigned target = currentInstruction[3].u.operand;
193
194 if (!supportsFloatingPoint()) {
195 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
196 linkSlowCase(iter); // int32 check
197 linkSlowCase(iter); // int32 check
198 } else {
199 if (!isOperandConstantImmediateInt(op1)) {
200 linkSlowCase(iter); // double check
201 linkSlowCase(iter); // int32 check
202 }
203 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
204 linkSlowCase(iter); // double check
205 }
206
207 JITStubCall stubCall(this, cti_op_jlesseq);
208 stubCall.addArgument(op1);
209 stubCall.addArgument(op2);
210 stubCall.call();
211 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
212 }
213
214 // LeftShift (<<)
215
216 void JIT::emit_op_lshift(Instruction* currentInstruction)
217 {
218 unsigned dst = currentInstruction[1].u.operand;
219 unsigned op1 = currentInstruction[2].u.operand;
220 unsigned op2 = currentInstruction[3].u.operand;
221
222 if (isOperandConstantImmediateInt(op2)) {
223 emitLoad(op1, regT1, regT0);
224 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
225 lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
226 emitStoreInt32(dst, regT0, dst == op1);
227 return;
228 }
229
230 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
231 if (!isOperandConstantImmediateInt(op1))
232 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
233 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
234 lshift32(regT2, regT0);
235 emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
236 }
237
238 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
239 {
240 unsigned dst = currentInstruction[1].u.operand;
241 unsigned op1 = currentInstruction[2].u.operand;
242 unsigned op2 = currentInstruction[3].u.operand;
243
244 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
245 linkSlowCase(iter); // int32 check
246 linkSlowCase(iter); // int32 check
247
248 JITStubCall stubCall(this, cti_op_lshift);
249 stubCall.addArgument(op1);
250 stubCall.addArgument(op2);
251 stubCall.call(dst);
252 }
253
254 // RightShift (>>)
255
256 void JIT::emit_op_rshift(Instruction* currentInstruction)
257 {
258 unsigned dst = currentInstruction[1].u.operand;
259 unsigned op1 = currentInstruction[2].u.operand;
260 unsigned op2 = currentInstruction[3].u.operand;
261
262 if (isOperandConstantImmediateInt(op2)) {
263 emitLoad(op1, regT1, regT0);
264 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
265 rshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
266 emitStoreInt32(dst, regT0, dst == op1);
267 return;
268 }
269
270 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
271 if (!isOperandConstantImmediateInt(op1))
272 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
273 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
274 rshift32(regT2, regT0);
275 emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
276 }
277
278 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
279 {
280 unsigned dst = currentInstruction[1].u.operand;
281 unsigned op1 = currentInstruction[2].u.operand;
282 unsigned op2 = currentInstruction[3].u.operand;
283
284 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
285 linkSlowCase(iter); // int32 check
286 linkSlowCase(iter); // int32 check
287
288 JITStubCall stubCall(this, cti_op_rshift);
289 stubCall.addArgument(op1);
290 stubCall.addArgument(op2);
291 stubCall.call(dst);
292 }
293
294 // BitAnd (&)
295
296 void JIT::emit_op_bitand(Instruction* currentInstruction)
297 {
298 unsigned dst = currentInstruction[1].u.operand;
299 unsigned op1 = currentInstruction[2].u.operand;
300 unsigned op2 = currentInstruction[3].u.operand;
301
302 unsigned op;
303 int32_t constant;
304 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
305 emitLoad(op, regT1, regT0);
306 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
307 and32(Imm32(constant), regT0);
308 emitStoreInt32(dst, regT0, (op == dst));
309 return;
310 }
311
312 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
313 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
314 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
315 and32(regT2, regT0);
316 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
317 }
318
319 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
320 {
321 unsigned dst = currentInstruction[1].u.operand;
322 unsigned op1 = currentInstruction[2].u.operand;
323 unsigned op2 = currentInstruction[3].u.operand;
324
325 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
326 linkSlowCase(iter); // int32 check
327 linkSlowCase(iter); // int32 check
328
329 JITStubCall stubCall(this, cti_op_bitand);
330 stubCall.addArgument(op1);
331 stubCall.addArgument(op2);
332 stubCall.call(dst);
333 }
334
335 // BitOr (|)
336
337 void JIT::emit_op_bitor(Instruction* currentInstruction)
338 {
339 unsigned dst = currentInstruction[1].u.operand;
340 unsigned op1 = currentInstruction[2].u.operand;
341 unsigned op2 = currentInstruction[3].u.operand;
342
343 unsigned op;
344 int32_t constant;
345 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
346 emitLoad(op, regT1, regT0);
347 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
348 or32(Imm32(constant), regT0);
349 emitStoreInt32(dst, regT0, (op == dst));
350 return;
351 }
352
353 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
354 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
355 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
356 or32(regT2, regT0);
357 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
358 }
359
360 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
361 {
362 unsigned dst = currentInstruction[1].u.operand;
363 unsigned op1 = currentInstruction[2].u.operand;
364 unsigned op2 = currentInstruction[3].u.operand;
365
366 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
367 linkSlowCase(iter); // int32 check
368 linkSlowCase(iter); // int32 check
369
370 JITStubCall stubCall(this, cti_op_bitor);
371 stubCall.addArgument(op1);
372 stubCall.addArgument(op2);
373 stubCall.call(dst);
374 }
375
376 // BitXor (^)
377
378 void JIT::emit_op_bitxor(Instruction* currentInstruction)
379 {
380 unsigned dst = currentInstruction[1].u.operand;
381 unsigned op1 = currentInstruction[2].u.operand;
382 unsigned op2 = currentInstruction[3].u.operand;
383
384 unsigned op;
385 int32_t constant;
386 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
387 emitLoad(op, regT1, regT0);
388 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
389 xor32(Imm32(constant), regT0);
390 emitStoreInt32(dst, regT0, (op == dst));
391 return;
392 }
393
394 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
395 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
396 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
397 xor32(regT2, regT0);
398 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
399 }
400
401 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
402 {
403 unsigned dst = currentInstruction[1].u.operand;
404 unsigned op1 = currentInstruction[2].u.operand;
405 unsigned op2 = currentInstruction[3].u.operand;
406
407 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
408 linkSlowCase(iter); // int32 check
409 linkSlowCase(iter); // int32 check
410
411 JITStubCall stubCall(this, cti_op_bitxor);
412 stubCall.addArgument(op1);
413 stubCall.addArgument(op2);
414 stubCall.call(dst);
415 }
416
417 // BitNot (~)
418
419 void JIT::emit_op_bitnot(Instruction* currentInstruction)
420 {
421 unsigned dst = currentInstruction[1].u.operand;
422 unsigned src = currentInstruction[2].u.operand;
423
424 emitLoad(src, regT1, regT0);
425 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
426
427 not32(regT0);
428 emitStoreInt32(dst, regT0, (dst == src));
429 }
430
431 void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
432 {
433 unsigned dst = currentInstruction[1].u.operand;
434
435 linkSlowCase(iter); // int32 check
436
437 JITStubCall stubCall(this, cti_op_bitnot);
438 stubCall.addArgument(regT1, regT0);
439 stubCall.call(dst);
440 }
441
442 // PostInc (i++)
443
444 void JIT::emit_op_post_inc(Instruction* currentInstruction)
445 {
446 unsigned dst = currentInstruction[1].u.operand;
447 unsigned srcDst = currentInstruction[2].u.operand;
448
449 emitLoad(srcDst, regT1, regT0);
450 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
451
452 if (dst == srcDst) // x = x++ is a noop for ints.
453 return;
454
455 emitStoreInt32(dst, regT0);
456
457 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
458 emitStoreInt32(srcDst, regT0, true);
459 }
460
461 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
462 {
463 unsigned dst = currentInstruction[1].u.operand;
464 unsigned srcDst = currentInstruction[2].u.operand;
465
466 linkSlowCase(iter); // int32 check
467 if (dst != srcDst)
468 linkSlowCase(iter); // overflow check
469
470 JITStubCall stubCall(this, cti_op_post_inc);
471 stubCall.addArgument(srcDst);
472 stubCall.addArgument(Imm32(srcDst));
473 stubCall.call(dst);
474 }
475
476 // PostDec (i--)
477
478 void JIT::emit_op_post_dec(Instruction* currentInstruction)
479 {
480 unsigned dst = currentInstruction[1].u.operand;
481 unsigned srcDst = currentInstruction[2].u.operand;
482
483 emitLoad(srcDst, regT1, regT0);
484 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
485
486 if (dst == srcDst) // x = x-- is a noop for ints.
487 return;
488
489 emitStoreInt32(dst, regT0);
490
491 addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
492 emitStoreInt32(srcDst, regT0, true);
493 }
494
495 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
496 {
497 unsigned dst = currentInstruction[1].u.operand;
498 unsigned srcDst = currentInstruction[2].u.operand;
499
500 linkSlowCase(iter); // int32 check
501 if (dst != srcDst)
502 linkSlowCase(iter); // overflow check
503
504 JITStubCall stubCall(this, cti_op_post_dec);
505 stubCall.addArgument(srcDst);
506 stubCall.addArgument(Imm32(srcDst));
507 stubCall.call(dst);
508 }
509
510 // PreInc (++i)
511
512 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
513 {
514 unsigned srcDst = currentInstruction[1].u.operand;
515
516 emitLoad(srcDst, regT1, regT0);
517
518 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
519 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
520 emitStoreInt32(srcDst, regT0, true);
521 }
522
523 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
524 {
525 unsigned srcDst = currentInstruction[1].u.operand;
526
527 linkSlowCase(iter); // int32 check
528 linkSlowCase(iter); // overflow check
529
530 JITStubCall stubCall(this, cti_op_pre_inc);
531 stubCall.addArgument(srcDst);
532 stubCall.call(srcDst);
533 }
534
535 // PreDec (--i)
536
537 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
538 {
539 unsigned srcDst = currentInstruction[1].u.operand;
540
541 emitLoad(srcDst, regT1, regT0);
542
543 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
544 addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
545 emitStoreInt32(srcDst, regT0, true);
546 }
547
548 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
549 {
550 unsigned srcDst = currentInstruction[1].u.operand;
551
552 linkSlowCase(iter); // int32 check
553 linkSlowCase(iter); // overflow check
554
555 JITStubCall stubCall(this, cti_op_pre_dec);
556 stubCall.addArgument(srcDst);
557 stubCall.call(srcDst);
558 }
559
560 // Addition (+)
561
562 void JIT::emit_op_add(Instruction* currentInstruction)
563 {
564 unsigned dst = currentInstruction[1].u.operand;
565 unsigned op1 = currentInstruction[2].u.operand;
566 unsigned op2 = currentInstruction[3].u.operand;
567 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
568
569 JumpList notInt32Op1;
570 JumpList notInt32Op2;
571
572 unsigned op;
573 int32_t constant;
574 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
575 emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
576 return;
577 }
578
579 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
580 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
581 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
582
583 // Int32 case.
584 addSlowCase(branchAdd32(Overflow, regT2, regT0));
585 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
586
587 if (!supportsFloatingPoint()) {
588 addSlowCase(notInt32Op1);
589 addSlowCase(notInt32Op2);
590 return;
591 }
592 Jump end = jump();
593
594 // Double case.
595 emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
596 end.link(this);
597 }
598
599 void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
600 {
601 // Int32 case.
602 emitLoad(op, regT1, regT0);
603 Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
604 addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
605 emitStoreInt32(dst, regT0, (op == dst));
606
607 // Double case.
608 if (!supportsFloatingPoint()) {
609 addSlowCase(notInt32);
610 return;
611 }
612 Jump end = jump();
613
614 notInt32.link(this);
615 if (!opType.definitelyIsNumber())
616 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
617 move(Imm32(constant), regT2);
618 convertInt32ToDouble(regT2, fpRegT0);
619 emitLoadDouble(op, fpRegT1);
620 addDouble(fpRegT1, fpRegT0);
621 emitStoreDouble(dst, fpRegT0);
622
623 end.link(this);
624 }
625
626 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
627 {
628 unsigned dst = currentInstruction[1].u.operand;
629 unsigned op1 = currentInstruction[2].u.operand;
630 unsigned op2 = currentInstruction[3].u.operand;
631 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
632
633 unsigned op;
634 int32_t constant;
635 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
636 linkSlowCase(iter); // overflow check
637
638 if (!supportsFloatingPoint())
639 linkSlowCase(iter); // non-sse case
640 else {
641 ResultType opType = op == op1 ? types.first() : types.second();
642 if (!opType.definitelyIsNumber())
643 linkSlowCase(iter); // double check
644 }
645 } else {
646 linkSlowCase(iter); // overflow check
647
648 if (!supportsFloatingPoint()) {
649 linkSlowCase(iter); // int32 check
650 linkSlowCase(iter); // int32 check
651 } else {
652 if (!types.first().definitelyIsNumber())
653 linkSlowCase(iter); // double check
654
655 if (!types.second().definitelyIsNumber()) {
656 linkSlowCase(iter); // int32 check
657 linkSlowCase(iter); // double check
658 }
659 }
660 }
661
662 JITStubCall stubCall(this, cti_op_add);
663 stubCall.addArgument(op1);
664 stubCall.addArgument(op2);
665 stubCall.call(dst);
666 }
667
668 // Subtraction (-)
669
670 void JIT::emit_op_sub(Instruction* currentInstruction)
671 {
672 unsigned dst = currentInstruction[1].u.operand;
673 unsigned op1 = currentInstruction[2].u.operand;
674 unsigned op2 = currentInstruction[3].u.operand;
675 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
676
677 JumpList notInt32Op1;
678 JumpList notInt32Op2;
679
680 if (isOperandConstantImmediateInt(op2)) {
681 emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
682 return;
683 }
684
685 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
686 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
687 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
688
689 // Int32 case.
690 addSlowCase(branchSub32(Overflow, regT2, regT0));
691 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
692
693 if (!supportsFloatingPoint()) {
694 addSlowCase(notInt32Op1);
695 addSlowCase(notInt32Op2);
696 return;
697 }
698 Jump end = jump();
699
700 // Double case.
701 emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
702 end.link(this);
703 }
704
705 void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
706 {
707 // Int32 case.
708 emitLoad(op, regT1, regT0);
709 Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
710 addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
711 emitStoreInt32(dst, regT0, (op == dst));
712
713 // Double case.
714 if (!supportsFloatingPoint()) {
715 addSlowCase(notInt32);
716 return;
717 }
718 Jump end = jump();
719
720 notInt32.link(this);
721 if (!opType.definitelyIsNumber())
722 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
723 move(Imm32(constant), regT2);
724 convertInt32ToDouble(regT2, fpRegT0);
725 emitLoadDouble(op, fpRegT1);
726 subDouble(fpRegT0, fpRegT1);
727 emitStoreDouble(dst, fpRegT1);
728
729 end.link(this);
730 }
731
732 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
733 {
734 unsigned dst = currentInstruction[1].u.operand;
735 unsigned op1 = currentInstruction[2].u.operand;
736 unsigned op2 = currentInstruction[3].u.operand;
737 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
738
739 if (isOperandConstantImmediateInt(op2)) {
740 linkSlowCase(iter); // overflow check
741
742 if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
743 linkSlowCase(iter); // int32 or double check
744 } else {
745 linkSlowCase(iter); // overflow check
746
747 if (!supportsFloatingPoint()) {
748 linkSlowCase(iter); // int32 check
749 linkSlowCase(iter); // int32 check
750 } else {
751 if (!types.first().definitelyIsNumber())
752 linkSlowCase(iter); // double check
753
754 if (!types.second().definitelyIsNumber()) {
755 linkSlowCase(iter); // int32 check
756 linkSlowCase(iter); // double check
757 }
758 }
759 }
760
761 JITStubCall stubCall(this, cti_op_sub);
762 stubCall.addArgument(op1);
763 stubCall.addArgument(op2);
764 stubCall.call(dst);
765 }
766
767 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
768 {
769 JumpList end;
770
771 if (!notInt32Op1.empty()) {
772 // Double case 1: Op1 is not int32; Op2 is unknown.
773 notInt32Op1.link(this);
774
775 ASSERT(op1IsInRegisters);
776
777 // Verify Op1 is double.
778 if (!types.first().definitelyIsNumber())
779 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
780
781 if (!op2IsInRegisters)
782 emitLoad(op2, regT3, regT2);
783
784 Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
785
786 if (!types.second().definitelyIsNumber())
787 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
788
789 convertInt32ToDouble(regT2, fpRegT0);
790 Jump doTheMath = jump();
791
792 // Load Op2 as double into double register.
793 doubleOp2.link(this);
794 emitLoadDouble(op2, fpRegT0);
795
796 // Do the math.
797 doTheMath.link(this);
798 switch (opcodeID) {
799 case op_mul:
800 emitLoadDouble(op1, fpRegT2);
801 mulDouble(fpRegT2, fpRegT0);
802 emitStoreDouble(dst, fpRegT0);
803 break;
804 case op_add:
805 emitLoadDouble(op1, fpRegT2);
806 addDouble(fpRegT2, fpRegT0);
807 emitStoreDouble(dst, fpRegT0);
808 break;
809 case op_sub:
810 emitLoadDouble(op1, fpRegT1);
811 subDouble(fpRegT0, fpRegT1);
812 emitStoreDouble(dst, fpRegT1);
813 break;
814 case op_div:
815 emitLoadDouble(op1, fpRegT1);
816 divDouble(fpRegT0, fpRegT1);
817 emitStoreDouble(dst, fpRegT1);
818 break;
819 case op_jnless:
820 emitLoadDouble(op1, fpRegT2);
821 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT2), dst + 3);
822 break;
823 case op_jnlesseq:
824 emitLoadDouble(op1, fpRegT2);
825 addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT2), dst + 3);
826 break;
827 default:
828 ASSERT_NOT_REACHED();
829 }
830
831 if (!notInt32Op2.empty())
832 end.append(jump());
833 }
834
835 if (!notInt32Op2.empty()) {
836 // Double case 2: Op1 is int32; Op2 is not int32.
837 notInt32Op2.link(this);
838
839 ASSERT(op2IsInRegisters);
840
841 if (!op1IsInRegisters)
842 emitLoadPayload(op1, regT0);
843
844 convertInt32ToDouble(regT0, fpRegT0);
845
846 // Verify op2 is double.
847 if (!types.second().definitelyIsNumber())
848 addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
849
850 // Do the math.
851 switch (opcodeID) {
852 case op_mul:
853 emitLoadDouble(op2, fpRegT2);
854 mulDouble(fpRegT2, fpRegT0);
855 emitStoreDouble(dst, fpRegT0);
856 break;
857 case op_add:
858 emitLoadDouble(op2, fpRegT2);
859 addDouble(fpRegT2, fpRegT0);
860 emitStoreDouble(dst, fpRegT0);
861 break;
862 case op_sub:
863 emitLoadDouble(op2, fpRegT2);
864 subDouble(fpRegT2, fpRegT0);
865 emitStoreDouble(dst, fpRegT0);
866 break;
867 case op_div:
868 emitLoadDouble(op2, fpRegT2);
869 divDouble(fpRegT2, fpRegT0);
870 emitStoreDouble(dst, fpRegT0);
871 break;
872 case op_jnless:
873 emitLoadDouble(op2, fpRegT1);
874 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), dst + 3);
875 break;
876 case op_jnlesseq:
877 emitLoadDouble(op2, fpRegT1);
878 addJump(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), dst + 3);
879 break;
880 default:
881 ASSERT_NOT_REACHED();
882 }
883 }
884
885 end.link(this);
886 }
887
888 // Multiplication (*)
889
890 void JIT::emit_op_mul(Instruction* currentInstruction)
891 {
892 unsigned dst = currentInstruction[1].u.operand;
893 unsigned op1 = currentInstruction[2].u.operand;
894 unsigned op2 = currentInstruction[3].u.operand;
895 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
896
897 JumpList notInt32Op1;
898 JumpList notInt32Op2;
899
900 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
901 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
902 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
903
904 // Int32 case.
905 move(regT0, regT3);
906 addSlowCase(branchMul32(Overflow, regT2, regT0));
907 addSlowCase(branchTest32(Zero, regT0));
908 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
909
910 if (!supportsFloatingPoint()) {
911 addSlowCase(notInt32Op1);
912 addSlowCase(notInt32Op2);
913 return;
914 }
915 Jump end = jump();
916
917 // Double case.
918 emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
919 end.link(this);
920 }
921
922 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
923 {
924 unsigned dst = currentInstruction[1].u.operand;
925 unsigned op1 = currentInstruction[2].u.operand;
926 unsigned op2 = currentInstruction[3].u.operand;
927 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
928
929 Jump overflow = getSlowCase(iter); // overflow check
930 linkSlowCase(iter); // zero result check
931
932 Jump negZero = branchOr32(Signed, regT2, regT3);
933 emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
934
935 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
936
937 negZero.link(this);
938 overflow.link(this);
939
940 if (!supportsFloatingPoint()) {
941 linkSlowCase(iter); // int32 check
942 linkSlowCase(iter); // int32 check
943 }
944
945 if (supportsFloatingPoint()) {
946 if (!types.first().definitelyIsNumber())
947 linkSlowCase(iter); // double check
948
949 if (!types.second().definitelyIsNumber()) {
950 linkSlowCase(iter); // int32 check
951 linkSlowCase(iter); // double check
952 }
953 }
954
955 Label jitStubCall(this);
956 JITStubCall stubCall(this, cti_op_mul);
957 stubCall.addArgument(op1);
958 stubCall.addArgument(op2);
959 stubCall.call(dst);
960 }
961
962 // Division (/)
963
964 void JIT::emit_op_div(Instruction* currentInstruction)
965 {
966 unsigned dst = currentInstruction[1].u.operand;
967 unsigned op1 = currentInstruction[2].u.operand;
968 unsigned op2 = currentInstruction[3].u.operand;
969 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
970
971 if (!supportsFloatingPoint()) {
972 addSlowCase(jump());
973 return;
974 }
975
976 // Int32 divide.
977 JumpList notInt32Op1;
978 JumpList notInt32Op2;
979
980 JumpList end;
981
982 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
983
984 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
985 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
986
987 convertInt32ToDouble(regT0, fpRegT0);
988 convertInt32ToDouble(regT2, fpRegT1);
989 divDouble(fpRegT1, fpRegT0);
990
991 JumpList doubleResult;
992 if (!isOperandConstantImmediateInt(op1) || getConstantOperand(op1).asInt32() > 1) {
993 m_assembler.cvttsd2si_rr(fpRegT0, regT0);
994 convertInt32ToDouble(regT0, fpRegT1);
995 m_assembler.ucomisd_rr(fpRegT1, fpRegT0);
996
997 doubleResult.append(m_assembler.jne());
998 doubleResult.append(m_assembler.jp());
999
1000 doubleResult.append(branchTest32(Zero, regT0));
1001
1002 // Int32 result.
1003 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
1004 end.append(jump());
1005 }
1006
1007 // Double result.
1008 doubleResult.link(this);
1009 emitStoreDouble(dst, fpRegT0);
1010 end.append(jump());
1011
1012 // Double divide.
1013 emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1014 end.link(this);
1015 }
1016
1017 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1018 {
1019 unsigned dst = currentInstruction[1].u.operand;
1020 unsigned op1 = currentInstruction[2].u.operand;
1021 unsigned op2 = currentInstruction[3].u.operand;
1022 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1023
1024 if (!supportsFloatingPoint())
1025 linkSlowCase(iter);
1026 else {
1027 if (!types.first().definitelyIsNumber())
1028 linkSlowCase(iter); // double check
1029
1030 if (!types.second().definitelyIsNumber()) {
1031 linkSlowCase(iter); // int32 check
1032 linkSlowCase(iter); // double check
1033 }
1034 }
1035
1036 JITStubCall stubCall(this, cti_op_div);
1037 stubCall.addArgument(op1);
1038 stubCall.addArgument(op2);
1039 stubCall.call(dst);
1040 }
1041
1042 // Mod (%)
1043
1044 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1045
1046 #if PLATFORM(X86) || PLATFORM(X86_64)
1047
1048 void JIT::emit_op_mod(Instruction* currentInstruction)
1049 {
1050 unsigned dst = currentInstruction[1].u.operand;
1051 unsigned op1 = currentInstruction[2].u.operand;
1052 unsigned op2 = currentInstruction[3].u.operand;
1053
1054 if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
1055 emitLoad(op1, X86::edx, X86::eax);
1056 move(Imm32(getConstantOperand(op2).asInt32()), X86::ecx);
1057 addSlowCase(branch32(NotEqual, X86::edx, Imm32(JSValue::Int32Tag)));
1058 if (getConstantOperand(op2).asInt32() == -1)
1059 addSlowCase(branch32(Equal, X86::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1060 } else {
1061 emitLoad2(op1, X86::edx, X86::eax, op2, X86::ebx, X86::ecx);
1062 addSlowCase(branch32(NotEqual, X86::edx, Imm32(JSValue::Int32Tag)));
1063 addSlowCase(branch32(NotEqual, X86::ebx, Imm32(JSValue::Int32Tag)));
1064
1065 addSlowCase(branch32(Equal, X86::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1066 addSlowCase(branch32(Equal, X86::ecx, Imm32(0))); // divide by 0
1067 }
1068
1069 move(X86::eax, X86::ebx); // Save dividend payload, in case of 0.
1070 m_assembler.cdq();
1071 m_assembler.idivl_r(X86::ecx);
1072
1073 // If the remainder is zero and the dividend is negative, the result is -0.
1074 Jump storeResult1 = branchTest32(NonZero, X86::edx);
1075 Jump storeResult2 = branchTest32(Zero, X86::ebx, Imm32(0x80000000)); // not negative
1076 emitStore(dst, jsNumber(m_globalData, -0.0));
1077 Jump end = jump();
1078
1079 storeResult1.link(this);
1080 storeResult2.link(this);
1081 emitStoreInt32(dst, X86::edx, (op1 == dst || op2 == dst));
1082 end.link(this);
1083 }
1084
1085 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1086 {
1087 unsigned dst = currentInstruction[1].u.operand;
1088 unsigned op1 = currentInstruction[2].u.operand;
1089 unsigned op2 = currentInstruction[3].u.operand;
1090
1091 if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
1092 linkSlowCase(iter); // int32 check
1093 if (getConstantOperand(op2).asInt32() == -1)
1094 linkSlowCase(iter); // 0x80000000 check
1095 } else {
1096 linkSlowCase(iter); // int32 check
1097 linkSlowCase(iter); // int32 check
1098 linkSlowCase(iter); // 0 check
1099 linkSlowCase(iter); // 0x80000000 check
1100 }
1101
1102 JITStubCall stubCall(this, cti_op_mod);
1103 stubCall.addArgument(op1);
1104 stubCall.addArgument(op2);
1105 stubCall.call(dst);
1106 }
1107
1108 #else // PLATFORM(X86) || PLATFORM(X86_64)
1109
1110 void JIT::emit_op_mod(Instruction* currentInstruction)
1111 {
1112 unsigned dst = currentInstruction[1].u.operand;
1113 unsigned op1 = currentInstruction[2].u.operand;
1114 unsigned op2 = currentInstruction[3].u.operand;
1115
1116 JITStubCall stubCall(this, cti_op_mod);
1117 stubCall.addArgument(op1);
1118 stubCall.addArgument(op2);
1119 stubCall.call(dst);
1120 }
1121
1122 void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
1123 {
1124 }
1125
1126 #endif // PLATFORM(X86) || PLATFORM(X86_64)
1127
1128 /* ------------------------------ END: OP_MOD ------------------------------ */
1129
1130 #else // USE(JSVALUE32_64)
1131
1132 void JIT::emit_op_lshift(Instruction* currentInstruction)
1133 {
1134 unsigned result = currentInstruction[1].u.operand;
1135 unsigned op1 = currentInstruction[2].u.operand;
1136 unsigned op2 = currentInstruction[3].u.operand;
1137
1138 emitGetVirtualRegisters(op1, regT0, op2, regT2);
1139 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
1140 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1141 emitJumpSlowCaseIfNotImmediateInteger(regT2);
1142 emitFastArithImmToInt(regT0);
1143 emitFastArithImmToInt(regT2);
1144 #if !PLATFORM(X86)
1145 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
1146 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
1147 and32(Imm32(0x1f), regT2);
1148 #endif
1149 lshift32(regT2, regT0);
1150 #if !USE(JSVALUE64)
1151 addSlowCase(branchAdd32(Overflow, regT0, regT0));
1152 signExtend32ToPtr(regT0, regT0);
1153 #endif
1154 emitFastArithReTagImmediate(regT0, regT0);
1155 emitPutVirtualRegister(result);
1156 }
1157
1158 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1159 {
1160 unsigned result = currentInstruction[1].u.operand;
1161 unsigned op1 = currentInstruction[2].u.operand;
1162 unsigned op2 = currentInstruction[3].u.operand;
1163
1164 #if USE(JSVALUE64)
1165 UNUSED_PARAM(op1);
1166 UNUSED_PARAM(op2);
1167 linkSlowCase(iter);
1168 linkSlowCase(iter);
1169 #else
1170 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
1171 Jump notImm1 = getSlowCase(iter);
1172 Jump notImm2 = getSlowCase(iter);
1173 linkSlowCase(iter);
1174 emitGetVirtualRegisters(op1, regT0, op2, regT2);
1175 notImm1.link(this);
1176 notImm2.link(this);
1177 #endif
1178 JITStubCall stubCall(this, cti_op_lshift);
1179 stubCall.addArgument(regT0);
1180 stubCall.addArgument(regT2);
1181 stubCall.call(result);
1182 }
1183
1184 void JIT::emit_op_rshift(Instruction* currentInstruction)
1185 {
1186 unsigned result = currentInstruction[1].u.operand;
1187 unsigned op1 = currentInstruction[2].u.operand;
1188 unsigned op2 = currentInstruction[3].u.operand;
1189
1190 if (isOperandConstantImmediateInt(op2)) {
1191 // isOperandConstantImmediateInt(op2) => 1 SlowCase
1192 emitGetVirtualRegister(op1, regT0);
1193 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1194 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
1195 #if USE(JSVALUE64)
1196 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
1197 #else
1198 rshiftPtr(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
1199 #endif
1200 } else {
1201 emitGetVirtualRegisters(op1, regT0, op2, regT2);
1202 if (supportsFloatingPointTruncate()) {
1203 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
1204 #if USE(JSVALUE64)
1205 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
1206 addSlowCase(emitJumpIfNotImmediateNumber(regT0));
1207 addPtr(tagTypeNumberRegister, regT0);
1208 movePtrToDouble(regT0, fpRegT0);
1209 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
1210 #else
1211 // supportsFloatingPoint() && !USE(JSVALUE64) => 5 SlowCases (of which 1 IfNotJSCell)
1212 emitJumpSlowCaseIfNotJSCell(regT0, op1);
1213 addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
1214 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1215 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
1216 addSlowCase(branchAdd32(Overflow, regT0, regT0));
1217 #endif
1218 lhsIsInt.link(this);
1219 emitJumpSlowCaseIfNotImmediateInteger(regT2);
1220 } else {
1221 // !supportsFloatingPoint() => 2 SlowCases
1222 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1223 emitJumpSlowCaseIfNotImmediateInteger(regT2);
1224 }
1225 emitFastArithImmToInt(regT2);
1226 #if !PLATFORM(X86)
1227 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
1228 // On 32-bit x86 this is not necessary, since the shift anount is implicitly masked in the instruction.
1229 and32(Imm32(0x1f), regT2);
1230 #endif
1231 #if USE(JSVALUE64)
1232 rshift32(regT2, regT0);
1233 #else
1234 rshiftPtr(regT2, regT0);
1235 #endif
1236 }
1237 #if USE(JSVALUE64)
1238 emitFastArithIntToImmNoCheck(regT0, regT0);
1239 #else
1240 orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
1241 #endif
1242 emitPutVirtualRegister(result);
1243 }
1244
1245 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1246 {
1247 unsigned result = currentInstruction[1].u.operand;
1248 unsigned op1 = currentInstruction[2].u.operand;
1249 unsigned op2 = currentInstruction[3].u.operand;
1250
1251 JITStubCall stubCall(this, cti_op_rshift);
1252
1253 if (isOperandConstantImmediateInt(op2)) {
1254 linkSlowCase(iter);
1255 stubCall.addArgument(regT0);
1256 stubCall.addArgument(op2, regT2);
1257 } else {
1258 if (supportsFloatingPointTruncate()) {
1259 #if USE(JSVALUE64)
1260 linkSlowCase(iter);
1261 linkSlowCase(iter);
1262 linkSlowCase(iter);
1263 #else
1264 linkSlowCaseIfNotJSCell(iter, op1);
1265 linkSlowCase(iter);
1266 linkSlowCase(iter);
1267 linkSlowCase(iter);
1268 linkSlowCase(iter);
1269 #endif
1270 // We're reloading op1 to regT0 as we can no longer guarantee that
1271 // we have not munged the operand. It may have already been shifted
1272 // correctly, but it still will not have been tagged.
1273 stubCall.addArgument(op1, regT0);
1274 stubCall.addArgument(regT2);
1275 } else {
1276 linkSlowCase(iter);
1277 linkSlowCase(iter);
1278 stubCall.addArgument(regT0);
1279 stubCall.addArgument(regT2);
1280 }
1281 }
1282
1283 stubCall.call(result);
1284 }
1285
1286 void JIT::emit_op_jnless(Instruction* currentInstruction)
1287 {
1288 unsigned op1 = currentInstruction[1].u.operand;
1289 unsigned op2 = currentInstruction[2].u.operand;
1290 unsigned target = currentInstruction[3].u.operand;
1291
1292 // We generate inline code for the following cases in the fast path:
1293 // - int immediate to constant int immediate
1294 // - constant int immediate to int immediate
1295 // - int immediate to int immediate
1296
1297 if (isOperandConstantImmediateInt(op2)) {
1298 emitGetVirtualRegister(op1, regT0);
1299 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1300 #if USE(JSVALUE64)
1301 int32_t op2imm = getConstantOperandImmediateInt(op2);
1302 #else
1303 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1304 #endif
1305 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target + 3);
1306 } else if (isOperandConstantImmediateInt(op1)) {
1307 emitGetVirtualRegister(op2, regT1);
1308 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1309 #if USE(JSVALUE64)
1310 int32_t op1imm = getConstantOperandImmediateInt(op1);
1311 #else
1312 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
1313 #endif
1314 addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target + 3);
1315 } else {
1316 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1317 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1318 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1319
1320 addJump(branch32(GreaterThanOrEqual, regT0, regT1), target + 3);
1321 }
1322 }
1323
1324 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1325 {
1326 unsigned op1 = currentInstruction[1].u.operand;
1327 unsigned op2 = currentInstruction[2].u.operand;
1328 unsigned target = currentInstruction[3].u.operand;
1329
1330 // We generate inline code for the following cases in the slow path:
1331 // - floating-point number to constant int immediate
1332 // - constant int immediate to floating-point number
1333 // - floating-point number to floating-point number.
1334
1335 if (isOperandConstantImmediateInt(op2)) {
1336 linkSlowCase(iter);
1337
1338 if (supportsFloatingPoint()) {
1339 #if USE(JSVALUE64)
1340 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1341 addPtr(tagTypeNumberRegister, regT0);
1342 movePtrToDouble(regT0, fpRegT0);
1343 #else
1344 Jump fail1;
1345 if (!m_codeBlock->isKnownNotImmediate(op1))
1346 fail1 = emitJumpIfNotJSCell(regT0);
1347
1348 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
1349 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1350 #endif
1351
1352 int32_t op2imm = getConstantOperand(op2).asInt32();;
1353
1354 move(Imm32(op2imm), regT1);
1355 convertInt32ToDouble(regT1, fpRegT1);
1356
1357 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
1358
1359 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1360
1361 #if USE(JSVALUE64)
1362 fail1.link(this);
1363 #else
1364 if (!m_codeBlock->isKnownNotImmediate(op1))
1365 fail1.link(this);
1366 fail2.link(this);
1367 #endif
1368 }
1369
1370 JITStubCall stubCall(this, cti_op_jless);
1371 stubCall.addArgument(regT0);
1372 stubCall.addArgument(op2, regT2);
1373 stubCall.call();
1374 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1375
1376 } else if (isOperandConstantImmediateInt(op1)) {
1377 linkSlowCase(iter);
1378
1379 if (supportsFloatingPoint()) {
1380 #if USE(JSVALUE64)
1381 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
1382 addPtr(tagTypeNumberRegister, regT1);
1383 movePtrToDouble(regT1, fpRegT1);
1384 #else
1385 Jump fail1;
1386 if (!m_codeBlock->isKnownNotImmediate(op2))
1387 fail1 = emitJumpIfNotJSCell(regT1);
1388
1389 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
1390 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1391 #endif
1392
1393 int32_t op1imm = getConstantOperand(op1).asInt32();;
1394
1395 move(Imm32(op1imm), regT0);
1396 convertInt32ToDouble(regT0, fpRegT0);
1397
1398 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
1399
1400 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1401
1402 #if USE(JSVALUE64)
1403 fail1.link(this);
1404 #else
1405 if (!m_codeBlock->isKnownNotImmediate(op2))
1406 fail1.link(this);
1407 fail2.link(this);
1408 #endif
1409 }
1410
1411 JITStubCall stubCall(this, cti_op_jless);
1412 stubCall.addArgument(op1, regT2);
1413 stubCall.addArgument(regT1);
1414 stubCall.call();
1415 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1416
1417 } else {
1418 linkSlowCase(iter);
1419
1420 if (supportsFloatingPoint()) {
1421 #if USE(JSVALUE64)
1422 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1423 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
1424 Jump fail3 = emitJumpIfImmediateInteger(regT1);
1425 addPtr(tagTypeNumberRegister, regT0);
1426 addPtr(tagTypeNumberRegister, regT1);
1427 movePtrToDouble(regT0, fpRegT0);
1428 movePtrToDouble(regT1, fpRegT1);
1429 #else
1430 Jump fail1;
1431 if (!m_codeBlock->isKnownNotImmediate(op1))
1432 fail1 = emitJumpIfNotJSCell(regT0);
1433
1434 Jump fail2;
1435 if (!m_codeBlock->isKnownNotImmediate(op2))
1436 fail2 = emitJumpIfNotJSCell(regT1);
1437
1438 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
1439 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
1440 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1441 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1442 #endif
1443
1444 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqual, fpRegT1, fpRegT0), target + 3);
1445
1446 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1447
1448 #if USE(JSVALUE64)
1449 fail1.link(this);
1450 fail2.link(this);
1451 fail3.link(this);
1452 #else
1453 if (!m_codeBlock->isKnownNotImmediate(op1))
1454 fail1.link(this);
1455 if (!m_codeBlock->isKnownNotImmediate(op2))
1456 fail2.link(this);
1457 fail3.link(this);
1458 fail4.link(this);
1459 #endif
1460 }
1461
1462 linkSlowCase(iter);
1463 JITStubCall stubCall(this, cti_op_jless);
1464 stubCall.addArgument(regT0);
1465 stubCall.addArgument(regT1);
1466 stubCall.call();
1467 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1468 }
1469 }
1470
1471 void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
1472 {
1473 unsigned op1 = currentInstruction[1].u.operand;
1474 unsigned op2 = currentInstruction[2].u.operand;
1475 unsigned target = currentInstruction[3].u.operand;
1476
1477 // We generate inline code for the following cases in the fast path:
1478 // - int immediate to constant int immediate
1479 // - constant int immediate to int immediate
1480 // - int immediate to int immediate
1481
1482 if (isOperandConstantImmediateInt(op2)) {
1483 emitGetVirtualRegister(op1, regT0);
1484 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1485 #if USE(JSVALUE64)
1486 int32_t op2imm = getConstantOperandImmediateInt(op2);
1487 #else
1488 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1489 #endif
1490 addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target + 3);
1491 } else if (isOperandConstantImmediateInt(op1)) {
1492 emitGetVirtualRegister(op2, regT1);
1493 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1494 #if USE(JSVALUE64)
1495 int32_t op1imm = getConstantOperandImmediateInt(op1);
1496 #else
1497 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
1498 #endif
1499 addJump(branch32(LessThan, regT1, Imm32(op1imm)), target + 3);
1500 } else {
1501 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1502 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1503 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1504
1505 addJump(branch32(GreaterThan, regT0, regT1), target + 3);
1506 }
1507 }
1508
1509 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1510 {
1511 unsigned op1 = currentInstruction[1].u.operand;
1512 unsigned op2 = currentInstruction[2].u.operand;
1513 unsigned target = currentInstruction[3].u.operand;
1514
1515 // We generate inline code for the following cases in the slow path:
1516 // - floating-point number to constant int immediate
1517 // - constant int immediate to floating-point number
1518 // - floating-point number to floating-point number.
1519
1520 if (isOperandConstantImmediateInt(op2)) {
1521 linkSlowCase(iter);
1522
1523 if (supportsFloatingPoint()) {
1524 #if USE(JSVALUE64)
1525 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1526 addPtr(tagTypeNumberRegister, regT0);
1527 movePtrToDouble(regT0, fpRegT0);
1528 #else
1529 Jump fail1;
1530 if (!m_codeBlock->isKnownNotImmediate(op1))
1531 fail1 = emitJumpIfNotJSCell(regT0);
1532
1533 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
1534 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1535 #endif
1536
1537 int32_t op2imm = getConstantOperand(op2).asInt32();;
1538
1539 move(Imm32(op2imm), regT1);
1540 convertInt32ToDouble(regT1, fpRegT1);
1541
1542 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
1543
1544 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1545
1546 #if USE(JSVALUE64)
1547 fail1.link(this);
1548 #else
1549 if (!m_codeBlock->isKnownNotImmediate(op1))
1550 fail1.link(this);
1551 fail2.link(this);
1552 #endif
1553 }
1554
1555 JITStubCall stubCall(this, cti_op_jlesseq);
1556 stubCall.addArgument(regT0);
1557 stubCall.addArgument(op2, regT2);
1558 stubCall.call();
1559 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1560
1561 } else if (isOperandConstantImmediateInt(op1)) {
1562 linkSlowCase(iter);
1563
1564 if (supportsFloatingPoint()) {
1565 #if USE(JSVALUE64)
1566 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
1567 addPtr(tagTypeNumberRegister, regT1);
1568 movePtrToDouble(regT1, fpRegT1);
1569 #else
1570 Jump fail1;
1571 if (!m_codeBlock->isKnownNotImmediate(op2))
1572 fail1 = emitJumpIfNotJSCell(regT1);
1573
1574 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
1575 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1576 #endif
1577
1578 int32_t op1imm = getConstantOperand(op1).asInt32();;
1579
1580 move(Imm32(op1imm), regT0);
1581 convertInt32ToDouble(regT0, fpRegT0);
1582
1583 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
1584
1585 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1586
1587 #if USE(JSVALUE64)
1588 fail1.link(this);
1589 #else
1590 if (!m_codeBlock->isKnownNotImmediate(op2))
1591 fail1.link(this);
1592 fail2.link(this);
1593 #endif
1594 }
1595
1596 JITStubCall stubCall(this, cti_op_jlesseq);
1597 stubCall.addArgument(op1, regT2);
1598 stubCall.addArgument(regT1);
1599 stubCall.call();
1600 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1601
1602 } else {
1603 linkSlowCase(iter);
1604
1605 if (supportsFloatingPoint()) {
1606 #if USE(JSVALUE64)
1607 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1608 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
1609 Jump fail3 = emitJumpIfImmediateInteger(regT1);
1610 addPtr(tagTypeNumberRegister, regT0);
1611 addPtr(tagTypeNumberRegister, regT1);
1612 movePtrToDouble(regT0, fpRegT0);
1613 movePtrToDouble(regT1, fpRegT1);
1614 #else
1615 Jump fail1;
1616 if (!m_codeBlock->isKnownNotImmediate(op1))
1617 fail1 = emitJumpIfNotJSCell(regT0);
1618
1619 Jump fail2;
1620 if (!m_codeBlock->isKnownNotImmediate(op2))
1621 fail2 = emitJumpIfNotJSCell(regT1);
1622
1623 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
1624 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
1625 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1626 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1627 #endif
1628
1629 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT1, fpRegT0), target + 3);
1630
1631 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1632
1633 #if USE(JSVALUE64)
1634 fail1.link(this);
1635 fail2.link(this);
1636 fail3.link(this);
1637 #else
1638 if (!m_codeBlock->isKnownNotImmediate(op1))
1639 fail1.link(this);
1640 if (!m_codeBlock->isKnownNotImmediate(op2))
1641 fail2.link(this);
1642 fail3.link(this);
1643 fail4.link(this);
1644 #endif
1645 }
1646
1647 linkSlowCase(iter);
1648 JITStubCall stubCall(this, cti_op_jlesseq);
1649 stubCall.addArgument(regT0);
1650 stubCall.addArgument(regT1);
1651 stubCall.call();
1652 emitJumpSlowToHot(branchTest32(Zero, regT0), target + 3);
1653 }
1654 }
1655
1656 void JIT::emit_op_bitand(Instruction* currentInstruction)
1657 {
1658 unsigned result = currentInstruction[1].u.operand;
1659 unsigned op1 = currentInstruction[2].u.operand;
1660 unsigned op2 = currentInstruction[3].u.operand;
1661
1662 if (isOperandConstantImmediateInt(op1)) {
1663 emitGetVirtualRegister(op2, regT0);
1664 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1665 #if USE(JSVALUE64)
1666 int32_t imm = getConstantOperandImmediateInt(op1);
1667 andPtr(Imm32(imm), regT0);
1668 if (imm >= 0)
1669 emitFastArithIntToImmNoCheck(regT0, regT0);
1670 #else
1671 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
1672 #endif
1673 } else if (isOperandConstantImmediateInt(op2)) {
1674 emitGetVirtualRegister(op1, regT0);
1675 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1676 #if USE(JSVALUE64)
1677 int32_t imm = getConstantOperandImmediateInt(op2);
1678 andPtr(Imm32(imm), regT0);
1679 if (imm >= 0)
1680 emitFastArithIntToImmNoCheck(regT0, regT0);
1681 #else
1682 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
1683 #endif
1684 } else {
1685 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1686 andPtr(regT1, regT0);
1687 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1688 }
1689 emitPutVirtualRegister(result);
1690 }
1691
1692 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1693 {
1694 unsigned result = currentInstruction[1].u.operand;
1695 unsigned op1 = currentInstruction[2].u.operand;
1696 unsigned op2 = currentInstruction[3].u.operand;
1697
1698 linkSlowCase(iter);
1699 if (isOperandConstantImmediateInt(op1)) {
1700 JITStubCall stubCall(this, cti_op_bitand);
1701 stubCall.addArgument(op1, regT2);
1702 stubCall.addArgument(regT0);
1703 stubCall.call(result);
1704 } else if (isOperandConstantImmediateInt(op2)) {
1705 JITStubCall stubCall(this, cti_op_bitand);
1706 stubCall.addArgument(regT0);
1707 stubCall.addArgument(op2, regT2);
1708 stubCall.call(result);
1709 } else {
1710 JITStubCall stubCall(this, cti_op_bitand);
1711 stubCall.addArgument(op1, regT2);
1712 stubCall.addArgument(regT1);
1713 stubCall.call(result);
1714 }
1715 }
1716
1717 void JIT::emit_op_post_inc(Instruction* currentInstruction)
1718 {
1719 unsigned result = currentInstruction[1].u.operand;
1720 unsigned srcDst = currentInstruction[2].u.operand;
1721
1722 emitGetVirtualRegister(srcDst, regT0);
1723 move(regT0, regT1);
1724 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1725 #if USE(JSVALUE64)
1726 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
1727 emitFastArithIntToImmNoCheck(regT1, regT1);
1728 #else
1729 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
1730 signExtend32ToPtr(regT1, regT1);
1731 #endif
1732 emitPutVirtualRegister(srcDst, regT1);
1733 emitPutVirtualRegister(result);
1734 }
1735
1736 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1737 {
1738 unsigned result = currentInstruction[1].u.operand;
1739 unsigned srcDst = currentInstruction[2].u.operand;
1740
1741 linkSlowCase(iter);
1742 linkSlowCase(iter);
1743 JITStubCall stubCall(this, cti_op_post_inc);
1744 stubCall.addArgument(regT0);
1745 stubCall.addArgument(Imm32(srcDst));
1746 stubCall.call(result);
1747 }
1748
1749 void JIT::emit_op_post_dec(Instruction* currentInstruction)
1750 {
1751 unsigned result = currentInstruction[1].u.operand;
1752 unsigned srcDst = currentInstruction[2].u.operand;
1753
1754 emitGetVirtualRegister(srcDst, regT0);
1755 move(regT0, regT1);
1756 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1757 #if USE(JSVALUE64)
1758 addSlowCase(branchSub32(Zero, Imm32(1), regT1));
1759 emitFastArithIntToImmNoCheck(regT1, regT1);
1760 #else
1761 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
1762 signExtend32ToPtr(regT1, regT1);
1763 #endif
1764 emitPutVirtualRegister(srcDst, regT1);
1765 emitPutVirtualRegister(result);
1766 }
1767
1768 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1769 {
1770 unsigned result = currentInstruction[1].u.operand;
1771 unsigned srcDst = currentInstruction[2].u.operand;
1772
1773 linkSlowCase(iter);
1774 linkSlowCase(iter);
1775 JITStubCall stubCall(this, cti_op_post_dec);
1776 stubCall.addArgument(regT0);
1777 stubCall.addArgument(Imm32(srcDst));
1778 stubCall.call(result);
1779 }
1780
1781 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
1782 {
1783 unsigned srcDst = currentInstruction[1].u.operand;
1784
1785 emitGetVirtualRegister(srcDst, regT0);
1786 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1787 #if USE(JSVALUE64)
1788 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
1789 emitFastArithIntToImmNoCheck(regT0, regT0);
1790 #else
1791 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
1792 signExtend32ToPtr(regT0, regT0);
1793 #endif
1794 emitPutVirtualRegister(srcDst);
1795 }
1796
1797 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1798 {
1799 unsigned srcDst = currentInstruction[1].u.operand;
1800
1801 Jump notImm = getSlowCase(iter);
1802 linkSlowCase(iter);
1803 emitGetVirtualRegister(srcDst, regT0);
1804 notImm.link(this);
1805 JITStubCall stubCall(this, cti_op_pre_inc);
1806 stubCall.addArgument(regT0);
1807 stubCall.call(srcDst);
1808 }
1809
1810 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
1811 {
1812 unsigned srcDst = currentInstruction[1].u.operand;
1813
1814 emitGetVirtualRegister(srcDst, regT0);
1815 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1816 #if USE(JSVALUE64)
1817 addSlowCase(branchSub32(Zero, Imm32(1), regT0));
1818 emitFastArithIntToImmNoCheck(regT0, regT0);
1819 #else
1820 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
1821 signExtend32ToPtr(regT0, regT0);
1822 #endif
1823 emitPutVirtualRegister(srcDst);
1824 }
1825
1826 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1827 {
1828 unsigned srcDst = currentInstruction[1].u.operand;
1829
1830 Jump notImm = getSlowCase(iter);
1831 linkSlowCase(iter);
1832 emitGetVirtualRegister(srcDst, regT0);
1833 notImm.link(this);
1834 JITStubCall stubCall(this, cti_op_pre_dec);
1835 stubCall.addArgument(regT0);
1836 stubCall.call(srcDst);
1837 }
1838
1839 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1840
1841 #if PLATFORM(X86) || PLATFORM(X86_64)
1842
1843 void JIT::emit_op_mod(Instruction* currentInstruction)
1844 {
1845 unsigned result = currentInstruction[1].u.operand;
1846 unsigned op1 = currentInstruction[2].u.operand;
1847 unsigned op2 = currentInstruction[3].u.operand;
1848
1849 emitGetVirtualRegisters(op1, X86::eax, op2, X86::ecx);
1850 emitJumpSlowCaseIfNotImmediateInteger(X86::eax);
1851 emitJumpSlowCaseIfNotImmediateInteger(X86::ecx);
1852 #if USE(JSVALUE64)
1853 addSlowCase(branchPtr(Equal, X86::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
1854 m_assembler.cdq();
1855 m_assembler.idivl_r(X86::ecx);
1856 #else
1857 emitFastArithDeTagImmediate(X86::eax);
1858 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86::ecx));
1859 m_assembler.cdq();
1860 m_assembler.idivl_r(X86::ecx);
1861 signExtend32ToPtr(X86::edx, X86::edx);
1862 #endif
1863 emitFastArithReTagImmediate(X86::edx, X86::eax);
1864 emitPutVirtualRegister(result);
1865 }
1866
1867 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1868 {
1869 unsigned result = currentInstruction[1].u.operand;
1870
1871 #if USE(JSVALUE64)
1872 linkSlowCase(iter);
1873 linkSlowCase(iter);
1874 linkSlowCase(iter);
1875 #else
1876 Jump notImm1 = getSlowCase(iter);
1877 Jump notImm2 = getSlowCase(iter);
1878 linkSlowCase(iter);
1879 emitFastArithReTagImmediate(X86::eax, X86::eax);
1880 emitFastArithReTagImmediate(X86::ecx, X86::ecx);
1881 notImm1.link(this);
1882 notImm2.link(this);
1883 #endif
1884 JITStubCall stubCall(this, cti_op_mod);
1885 stubCall.addArgument(X86::eax);
1886 stubCall.addArgument(X86::ecx);
1887 stubCall.call(result);
1888 }
1889
1890 #else // PLATFORM(X86) || PLATFORM(X86_64)
1891
1892 void JIT::emit_op_mod(Instruction* currentInstruction)
1893 {
1894 unsigned result = currentInstruction[1].u.operand;
1895 unsigned op1 = currentInstruction[2].u.operand;
1896 unsigned op2 = currentInstruction[3].u.operand;
1897
1898 JITStubCall stubCall(this, cti_op_mod);
1899 stubCall.addArgument(op1, regT2);
1900 stubCall.addArgument(op2, regT2);
1901 stubCall.call(result);
1902 }
1903
1904 void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
1905 {
1906 ASSERT_NOT_REACHED();
1907 }
1908
1909 #endif // PLATFORM(X86) || PLATFORM(X86_64)
1910
1911 /* ------------------------------ END: OP_MOD ------------------------------ */
1912
1913 #if USE(JSVALUE64)
1914
1915 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
1916
1917 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
1918 {
1919 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1920 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1921 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1922 if (opcodeID == op_add)
1923 addSlowCase(branchAdd32(Overflow, regT1, regT0));
1924 else if (opcodeID == op_sub)
1925 addSlowCase(branchSub32(Overflow, regT1, regT0));
1926 else {
1927 ASSERT(opcodeID == op_mul);
1928 addSlowCase(branchMul32(Overflow, regT1, regT0));
1929 addSlowCase(branchTest32(Zero, regT0));
1930 }
1931 emitFastArithIntToImmNoCheck(regT0, regT0);
1932 }
1933
1934 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned, OperandTypes types)
1935 {
1936 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
1937 COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
1938
1939 Jump notImm1 = getSlowCase(iter);
1940 Jump notImm2 = getSlowCase(iter);
1941
1942 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
1943 if (opcodeID == op_mul) // op_mul has an extra slow case to handle 0 * negative number.
1944 linkSlowCase(iter);
1945 emitGetVirtualRegister(op1, regT0);
1946
1947 Label stubFunctionCall(this);
1948 JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
1949 stubCall.addArgument(regT0);
1950 stubCall.addArgument(regT1);
1951 stubCall.call(result);
1952 Jump end = jump();
1953
1954 // if we get here, eax is not an int32, edx not yet checked.
1955 notImm1.link(this);
1956 if (!types.first().definitelyIsNumber())
1957 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
1958 if (!types.second().definitelyIsNumber())
1959 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
1960 addPtr(tagTypeNumberRegister, regT0);
1961 movePtrToDouble(regT0, fpRegT1);
1962 Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
1963 convertInt32ToDouble(regT1, fpRegT2);
1964 Jump op2wasInteger = jump();
1965
1966 // if we get here, eax IS an int32, edx is not.
1967 notImm2.link(this);
1968 if (!types.second().definitelyIsNumber())
1969 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
1970 convertInt32ToDouble(regT0, fpRegT1);
1971 op2isDouble.link(this);
1972 addPtr(tagTypeNumberRegister, regT1);
1973 movePtrToDouble(regT1, fpRegT2);
1974 op2wasInteger.link(this);
1975
1976 if (opcodeID == op_add)
1977 addDouble(fpRegT2, fpRegT1);
1978 else if (opcodeID == op_sub)
1979 subDouble(fpRegT2, fpRegT1);
1980 else {
1981 ASSERT(opcodeID == op_mul);
1982 mulDouble(fpRegT2, fpRegT1);
1983 }
1984 moveDoubleToPtr(fpRegT1, regT0);
1985 subPtr(tagTypeNumberRegister, regT0);
1986 emitPutVirtualRegister(result, regT0);
1987
1988 end.link(this);
1989 }
1990
1991 void JIT::emit_op_add(Instruction* currentInstruction)
1992 {
1993 unsigned result = currentInstruction[1].u.operand;
1994 unsigned op1 = currentInstruction[2].u.operand;
1995 unsigned op2 = currentInstruction[3].u.operand;
1996 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1997
1998 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
1999 JITStubCall stubCall(this, cti_op_add);
2000 stubCall.addArgument(op1, regT2);
2001 stubCall.addArgument(op2, regT2);
2002 stubCall.call(result);
2003 return;
2004 }
2005
2006 if (isOperandConstantImmediateInt(op1)) {
2007 emitGetVirtualRegister(op2, regT0);
2008 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2009 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
2010 emitFastArithIntToImmNoCheck(regT0, regT0);
2011 } else if (isOperandConstantImmediateInt(op2)) {
2012 emitGetVirtualRegister(op1, regT0);
2013 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2014 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
2015 emitFastArithIntToImmNoCheck(regT0, regT0);
2016 } else
2017 compileBinaryArithOp(op_add, result, op1, op2, types);
2018
2019 emitPutVirtualRegister(result);
2020 }
2021
2022 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2023 {
2024 unsigned result = currentInstruction[1].u.operand;
2025 unsigned op1 = currentInstruction[2].u.operand;
2026 unsigned op2 = currentInstruction[3].u.operand;
2027
2028 if (isOperandConstantImmediateInt(op1) || isOperandConstantImmediateInt(op2)) {
2029 linkSlowCase(iter);
2030 linkSlowCase(iter);
2031 JITStubCall stubCall(this, cti_op_add);
2032 stubCall.addArgument(op1, regT2);
2033 stubCall.addArgument(op2, regT2);
2034 stubCall.call(result);
2035 } else
2036 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2037 }
2038
2039 void JIT::emit_op_mul(Instruction* currentInstruction)
2040 {
2041 unsigned result = currentInstruction[1].u.operand;
2042 unsigned op1 = currentInstruction[2].u.operand;
2043 unsigned op2 = currentInstruction[3].u.operand;
2044 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2045
2046 // For now, only plant a fast int case if the constant operand is greater than zero.
2047 int32_t value;
2048 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
2049 emitGetVirtualRegister(op2, regT0);
2050 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2051 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2052 emitFastArithReTagImmediate(regT0, regT0);
2053 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
2054 emitGetVirtualRegister(op1, regT0);
2055 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2056 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2057 emitFastArithReTagImmediate(regT0, regT0);
2058 } else
2059 compileBinaryArithOp(op_mul, result, op1, op2, types);
2060
2061 emitPutVirtualRegister(result);
2062 }
2063
2064 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2065 {
2066 unsigned result = currentInstruction[1].u.operand;
2067 unsigned op1 = currentInstruction[2].u.operand;
2068 unsigned op2 = currentInstruction[3].u.operand;
2069 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2070
2071 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
2072 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
2073 linkSlowCase(iter);
2074 linkSlowCase(iter);
2075 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2076 JITStubCall stubCall(this, cti_op_mul);
2077 stubCall.addArgument(op1, regT2);
2078 stubCall.addArgument(op2, regT2);
2079 stubCall.call(result);
2080 } else
2081 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types);
2082 }
2083
2084 void JIT::emit_op_sub(Instruction* currentInstruction)
2085 {
2086 unsigned result = currentInstruction[1].u.operand;
2087 unsigned op1 = currentInstruction[2].u.operand;
2088 unsigned op2 = currentInstruction[3].u.operand;
2089 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2090
2091 compileBinaryArithOp(op_sub, result, op1, op2, types);
2092
2093 emitPutVirtualRegister(result);
2094 }
2095
2096 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2097 {
2098 unsigned result = currentInstruction[1].u.operand;
2099 unsigned op1 = currentInstruction[2].u.operand;
2100 unsigned op2 = currentInstruction[3].u.operand;
2101 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2102
2103 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types);
2104 }
2105
2106 #else // USE(JSVALUE64)
2107
2108 /* ------------------------------ BEGIN: !USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
2109
2110 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
2111 {
2112 Structure* numberStructure = m_globalData->numberStructure.get();
2113 Jump wasJSNumberCell1;
2114 Jump wasJSNumberCell2;
2115
2116 emitGetVirtualRegisters(src1, regT0, src2, regT1);
2117
2118 if (types.second().isReusable() && supportsFloatingPoint()) {
2119 ASSERT(types.second().mightBeNumber());
2120
2121 // Check op2 is a number
2122 Jump op2imm = emitJumpIfImmediateInteger(regT1);
2123 if (!types.second().definitelyIsNumber()) {
2124 emitJumpSlowCaseIfNotJSCell(regT1, src2);
2125 addSlowCase(checkStructure(regT1, numberStructure));
2126 }
2127
2128 // (1) In this case src2 is a reusable number cell.
2129 // Slow case if src1 is not a number type.
2130 Jump op1imm = emitJumpIfImmediateInteger(regT0);
2131 if (!types.first().definitelyIsNumber()) {
2132 emitJumpSlowCaseIfNotJSCell(regT0, src1);
2133 addSlowCase(checkStructure(regT0, numberStructure));
2134 }
2135
2136 // (1a) if we get here, src1 is also a number cell
2137 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2138 Jump loadedDouble = jump();
2139 // (1b) if we get here, src1 is an immediate
2140 op1imm.link(this);
2141 emitFastArithImmToInt(regT0);
2142 convertInt32ToDouble(regT0, fpRegT0);
2143 // (1c)
2144 loadedDouble.link(this);
2145 if (opcodeID == op_add)
2146 addDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2147 else if (opcodeID == op_sub)
2148 subDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2149 else {
2150 ASSERT(opcodeID == op_mul);
2151 mulDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2152 }
2153
2154 // Store the result to the JSNumberCell and jump.
2155 storeDouble(fpRegT0, Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2156 move(regT1, regT0);
2157 emitPutVirtualRegister(dst);
2158 wasJSNumberCell2 = jump();
2159
2160 // (2) This handles cases where src2 is an immediate number.
2161 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
2162 op2imm.link(this);
2163 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2164 } else if (types.first().isReusable() && supportsFloatingPoint()) {
2165 ASSERT(types.first().mightBeNumber());
2166
2167 // Check op1 is a number
2168 Jump op1imm = emitJumpIfImmediateInteger(regT0);
2169 if (!types.first().definitelyIsNumber()) {
2170 emitJumpSlowCaseIfNotJSCell(regT0, src1);
2171 addSlowCase(checkStructure(regT0, numberStructure));
2172 }
2173
2174 // (1) In this case src1 is a reusable number cell.
2175 // Slow case if src2 is not a number type.
2176 Jump op2imm = emitJumpIfImmediateInteger(regT1);
2177 if (!types.second().definitelyIsNumber()) {
2178 emitJumpSlowCaseIfNotJSCell(regT1, src2);
2179 addSlowCase(checkStructure(regT1, numberStructure));
2180 }
2181
2182 // (1a) if we get here, src2 is also a number cell
2183 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
2184 Jump loadedDouble = jump();
2185 // (1b) if we get here, src2 is an immediate
2186 op2imm.link(this);
2187 emitFastArithImmToInt(regT1);
2188 convertInt32ToDouble(regT1, fpRegT1);
2189 // (1c)
2190 loadedDouble.link(this);
2191 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2192 if (opcodeID == op_add)
2193 addDouble(fpRegT1, fpRegT0);
2194 else if (opcodeID == op_sub)
2195 subDouble(fpRegT1, fpRegT0);
2196 else {
2197 ASSERT(opcodeID == op_mul);
2198 mulDouble(fpRegT1, fpRegT0);
2199 }
2200 storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2201 emitPutVirtualRegister(dst);
2202
2203 // Store the result to the JSNumberCell and jump.
2204 storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2205 emitPutVirtualRegister(dst);
2206 wasJSNumberCell1 = jump();
2207
2208 // (2) This handles cases where src1 is an immediate number.
2209 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
2210 op1imm.link(this);
2211 emitJumpSlowCaseIfNotImmediateInteger(regT1);
2212 } else
2213 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
2214
2215 if (opcodeID == op_add) {
2216 emitFastArithDeTagImmediate(regT0);
2217 addSlowCase(branchAdd32(Overflow, regT1, regT0));
2218 } else if (opcodeID == op_sub) {
2219 addSlowCase(branchSub32(Overflow, regT1, regT0));
2220 signExtend32ToPtr(regT0, regT0);
2221 emitFastArithReTagImmediate(regT0, regT0);
2222 } else {
2223 ASSERT(opcodeID == op_mul);
2224 // convert eax & edx from JSImmediates to ints, and check if either are zero
2225 emitFastArithImmToInt(regT1);
2226 Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(regT0);
2227 Jump op2NonZero = branchTest32(NonZero, regT1);
2228 op1Zero.link(this);
2229 // if either input is zero, add the two together, and check if the result is < 0.
2230 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
2231 move(regT0, regT2);
2232 addSlowCase(branchAdd32(Signed, regT1, regT2));
2233 // Skip the above check if neither input is zero
2234 op2NonZero.link(this);
2235 addSlowCase(branchMul32(Overflow, regT1, regT0));
2236 signExtend32ToPtr(regT0, regT0);
2237 emitFastArithReTagImmediate(regT0, regT0);
2238 }
2239 emitPutVirtualRegister(dst);
2240
2241 if (types.second().isReusable() && supportsFloatingPoint())
2242 wasJSNumberCell2.link(this);
2243 else if (types.first().isReusable() && supportsFloatingPoint())
2244 wasJSNumberCell1.link(this);
2245 }
2246
2247 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
2248 {
2249 linkSlowCase(iter);
2250 if (types.second().isReusable() && supportsFloatingPoint()) {
2251 if (!types.first().definitelyIsNumber()) {
2252 linkSlowCaseIfNotJSCell(iter, src1);
2253 linkSlowCase(iter);
2254 }
2255 if (!types.second().definitelyIsNumber()) {
2256 linkSlowCaseIfNotJSCell(iter, src2);
2257 linkSlowCase(iter);
2258 }
2259 } else if (types.first().isReusable() && supportsFloatingPoint()) {
2260 if (!types.first().definitelyIsNumber()) {
2261 linkSlowCaseIfNotJSCell(iter, src1);
2262 linkSlowCase(iter);
2263 }
2264 if (!types.second().definitelyIsNumber()) {
2265 linkSlowCaseIfNotJSCell(iter, src2);
2266 linkSlowCase(iter);
2267 }
2268 }
2269 linkSlowCase(iter);
2270
2271 // additional entry point to handle -0 cases.
2272 if (opcodeID == op_mul)
2273 linkSlowCase(iter);
2274
2275 JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
2276 stubCall.addArgument(src1, regT2);
2277 stubCall.addArgument(src2, regT2);
2278 stubCall.call(dst);
2279 }
2280
2281 void JIT::emit_op_add(Instruction* currentInstruction)
2282 {
2283 unsigned result = currentInstruction[1].u.operand;
2284 unsigned op1 = currentInstruction[2].u.operand;
2285 unsigned op2 = currentInstruction[3].u.operand;
2286
2287 if (isOperandConstantImmediateInt(op1)) {
2288 emitGetVirtualRegister(op2, regT0);
2289 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2290 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0));
2291 signExtend32ToPtr(regT0, regT0);
2292 emitPutVirtualRegister(result);
2293 } else if (isOperandConstantImmediateInt(op2)) {
2294 emitGetVirtualRegister(op1, regT0);
2295 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2296 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0));
2297 signExtend32ToPtr(regT0, regT0);
2298 emitPutVirtualRegister(result);
2299 } else {
2300 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2301 if (types.first().mightBeNumber() && types.second().mightBeNumber())
2302 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2303 else {
2304 JITStubCall stubCall(this, cti_op_add);
2305 stubCall.addArgument(op1, regT2);
2306 stubCall.addArgument(op2, regT2);
2307 stubCall.call(result);
2308 }
2309 }
2310 }
2311
2312 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2313 {
2314 unsigned result = currentInstruction[1].u.operand;
2315 unsigned op1 = currentInstruction[2].u.operand;
2316 unsigned op2 = currentInstruction[3].u.operand;
2317
2318 if (isOperandConstantImmediateInt(op1)) {
2319 Jump notImm = getSlowCase(iter);
2320 linkSlowCase(iter);
2321 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0);
2322 notImm.link(this);
2323 JITStubCall stubCall(this, cti_op_add);
2324 stubCall.addArgument(op1, regT2);
2325 stubCall.addArgument(regT0);
2326 stubCall.call(result);
2327 } else if (isOperandConstantImmediateInt(op2)) {
2328 Jump notImm = getSlowCase(iter);
2329 linkSlowCase(iter);
2330 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0);
2331 notImm.link(this);
2332 JITStubCall stubCall(this, cti_op_add);
2333 stubCall.addArgument(regT0);
2334 stubCall.addArgument(op2, regT2);
2335 stubCall.call(result);
2336 } else {
2337 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2338 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
2339 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
2340 }
2341 }
2342
2343 void JIT::emit_op_mul(Instruction* currentInstruction)
2344 {
2345 unsigned result = currentInstruction[1].u.operand;
2346 unsigned op1 = currentInstruction[2].u.operand;
2347 unsigned op2 = currentInstruction[3].u.operand;
2348
2349 // For now, only plant a fast int case if the constant operand is greater than zero.
2350 int32_t value;
2351 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
2352 emitGetVirtualRegister(op2, regT0);
2353 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2354 emitFastArithDeTagImmediate(regT0);
2355 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2356 signExtend32ToPtr(regT0, regT0);
2357 emitFastArithReTagImmediate(regT0, regT0);
2358 emitPutVirtualRegister(result);
2359 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
2360 emitGetVirtualRegister(op1, regT0);
2361 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2362 emitFastArithDeTagImmediate(regT0);
2363 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2364 signExtend32ToPtr(regT0, regT0);
2365 emitFastArithReTagImmediate(regT0, regT0);
2366 emitPutVirtualRegister(result);
2367 } else
2368 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2369 }
2370
2371 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2372 {
2373 unsigned result = currentInstruction[1].u.operand;
2374 unsigned op1 = currentInstruction[2].u.operand;
2375 unsigned op2 = currentInstruction[3].u.operand;
2376
2377 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
2378 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
2379 linkSlowCase(iter);
2380 linkSlowCase(iter);
2381 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2382 JITStubCall stubCall(this, cti_op_mul);
2383 stubCall.addArgument(op1, regT2);
2384 stubCall.addArgument(op2, regT2);
2385 stubCall.call(result);
2386 } else
2387 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2388 }
2389
2390 void JIT::emit_op_sub(Instruction* currentInstruction)
2391 {
2392 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
2393 }
2394
2395 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2396 {
2397 compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
2398 }
2399
2400 #endif // USE(JSVALUE64)
2401
2402 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
2403
2404 #endif // USE(JSVALUE32_64)
2405
2406 } // namespace JSC
2407
2408 #endif // ENABLE(JIT)