]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITArithmetic.cpp
JavaScriptCore-584.tar.gz
[apple/javascriptcore.git] / jit / JITArithmetic.cpp
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "JIT.h"
28
29 #if ENABLE(JIT)
30
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
33 #include "JITStubCall.h"
34 #include "JSArray.h"
35 #include "JSFunction.h"
36 #include "Interpreter.h"
37 #include "ResultType.h"
38 #include "SamplingTool.h"
39
40 #ifndef NDEBUG
41 #include <stdio.h>
42 #endif
43
44 using namespace std;
45
46 namespace JSC {
47
48 #if USE(JSVALUE32_64)
49
50 void JIT::emit_op_negate(Instruction* currentInstruction)
51 {
52 unsigned dst = currentInstruction[1].u.operand;
53 unsigned src = currentInstruction[2].u.operand;
54
55 emitLoad(src, regT1, regT0);
56
57 Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
58 addSlowCase(branch32(Equal, regT0, Imm32(0)));
59
60 neg32(regT0);
61 emitStoreInt32(dst, regT0, (dst == src));
62
63 Jump end = jump();
64
65 srcNotInt.link(this);
66 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
67
68 xor32(Imm32(1 << 31), regT1);
69 store32(regT1, tagFor(dst));
70 if (dst != src)
71 store32(regT0, payloadFor(dst));
72
73 end.link(this);
74 }
75
76 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
77 {
78 unsigned dst = currentInstruction[1].u.operand;
79
80 linkSlowCase(iter); // 0 check
81 linkSlowCase(iter); // double check
82
83 JITStubCall stubCall(this, cti_op_negate);
84 stubCall.addArgument(regT1, regT0);
85 stubCall.call(dst);
86 }
87
88 void JIT::emit_op_jnless(Instruction* currentInstruction)
89 {
90 unsigned op1 = currentInstruction[1].u.operand;
91 unsigned op2 = currentInstruction[2].u.operand;
92 unsigned target = currentInstruction[3].u.operand;
93
94 JumpList notInt32Op1;
95 JumpList notInt32Op2;
96
97 // Int32 less.
98 if (isOperandConstantImmediateInt(op1)) {
99 emitLoad(op2, regT3, regT2);
100 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
101 addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
102 } else if (isOperandConstantImmediateInt(op2)) {
103 emitLoad(op1, regT1, regT0);
104 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
105 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
106 } else {
107 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
108 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
109 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
110 addJump(branch32(GreaterThanOrEqual, regT0, regT2), target);
111 }
112
113 if (!supportsFloatingPoint()) {
114 addSlowCase(notInt32Op1);
115 addSlowCase(notInt32Op2);
116 return;
117 }
118 Jump end = jump();
119
120 // Double less.
121 emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
122 end.link(this);
123 }
124
125 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
126 {
127 unsigned op1 = currentInstruction[1].u.operand;
128 unsigned op2 = currentInstruction[2].u.operand;
129 unsigned target = currentInstruction[3].u.operand;
130
131 if (!supportsFloatingPoint()) {
132 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
133 linkSlowCase(iter); // int32 check
134 linkSlowCase(iter); // int32 check
135 } else {
136 if (!isOperandConstantImmediateInt(op1)) {
137 linkSlowCase(iter); // double check
138 linkSlowCase(iter); // int32 check
139 }
140 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
141 linkSlowCase(iter); // double check
142 }
143
144 JITStubCall stubCall(this, cti_op_jless);
145 stubCall.addArgument(op1);
146 stubCall.addArgument(op2);
147 stubCall.call();
148 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
149 }
150
151 void JIT::emit_op_jless(Instruction* currentInstruction)
152 {
153 unsigned op1 = currentInstruction[1].u.operand;
154 unsigned op2 = currentInstruction[2].u.operand;
155 unsigned target = currentInstruction[3].u.operand;
156
157 JumpList notInt32Op1;
158 JumpList notInt32Op2;
159
160 // Int32 less.
161 if (isOperandConstantImmediateInt(op1)) {
162 emitLoad(op2, regT3, regT2);
163 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
164 addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
165 } else if (isOperandConstantImmediateInt(op2)) {
166 emitLoad(op1, regT1, regT0);
167 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
168 addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
169 } else {
170 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
171 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
172 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
173 addJump(branch32(LessThan, regT0, regT2), target);
174 }
175
176 if (!supportsFloatingPoint()) {
177 addSlowCase(notInt32Op1);
178 addSlowCase(notInt32Op2);
179 return;
180 }
181 Jump end = jump();
182
183 // Double less.
184 emitBinaryDoubleOp(op_jless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
185 end.link(this);
186 }
187
188 void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
189 {
190 unsigned op1 = currentInstruction[1].u.operand;
191 unsigned op2 = currentInstruction[2].u.operand;
192 unsigned target = currentInstruction[3].u.operand;
193
194 if (!supportsFloatingPoint()) {
195 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
196 linkSlowCase(iter); // int32 check
197 linkSlowCase(iter); // int32 check
198 } else {
199 if (!isOperandConstantImmediateInt(op1)) {
200 linkSlowCase(iter); // double check
201 linkSlowCase(iter); // int32 check
202 }
203 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
204 linkSlowCase(iter); // double check
205 }
206
207 JITStubCall stubCall(this, cti_op_jless);
208 stubCall.addArgument(op1);
209 stubCall.addArgument(op2);
210 stubCall.call();
211 emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
212 }
213
214 void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
215 {
216 unsigned op1 = currentInstruction[1].u.operand;
217 unsigned op2 = currentInstruction[2].u.operand;
218 unsigned target = currentInstruction[3].u.operand;
219
220 JumpList notInt32Op1;
221 JumpList notInt32Op2;
222
223 // Int32 less.
224 if (isOperandConstantImmediateInt(op1)) {
225 emitLoad(op2, regT3, regT2);
226 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
227 addJump(branch32(LessThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
228 } else if (isOperandConstantImmediateInt(op2)) {
229 emitLoad(op1, regT1, regT0);
230 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
231 addJump(branch32(GreaterThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
232 } else {
233 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
234 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
235 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
236 addJump(branch32(GreaterThan, regT0, regT2), target);
237 }
238
239 if (!supportsFloatingPoint()) {
240 addSlowCase(notInt32Op1);
241 addSlowCase(notInt32Op2);
242 return;
243 }
244 Jump end = jump();
245
246 // Double less.
247 emitBinaryDoubleOp(op_jnlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
248 end.link(this);
249 }
250
251 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
252 {
253 unsigned op1 = currentInstruction[1].u.operand;
254 unsigned op2 = currentInstruction[2].u.operand;
255 unsigned target = currentInstruction[3].u.operand;
256
257 if (!supportsFloatingPoint()) {
258 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
259 linkSlowCase(iter); // int32 check
260 linkSlowCase(iter); // int32 check
261 } else {
262 if (!isOperandConstantImmediateInt(op1)) {
263 linkSlowCase(iter); // double check
264 linkSlowCase(iter); // int32 check
265 }
266 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
267 linkSlowCase(iter); // double check
268 }
269
270 JITStubCall stubCall(this, cti_op_jlesseq);
271 stubCall.addArgument(op1);
272 stubCall.addArgument(op2);
273 stubCall.call();
274 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
275 }
276
277 // LeftShift (<<)
278
279 void JIT::emit_op_lshift(Instruction* currentInstruction)
280 {
281 unsigned dst = currentInstruction[1].u.operand;
282 unsigned op1 = currentInstruction[2].u.operand;
283 unsigned op2 = currentInstruction[3].u.operand;
284
285 if (isOperandConstantImmediateInt(op2)) {
286 emitLoad(op1, regT1, regT0);
287 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
288 lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
289 emitStoreInt32(dst, regT0, dst == op1);
290 return;
291 }
292
293 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
294 if (!isOperandConstantImmediateInt(op1))
295 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
296 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
297 lshift32(regT2, regT0);
298 emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
299 }
300
301 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
302 {
303 unsigned dst = currentInstruction[1].u.operand;
304 unsigned op1 = currentInstruction[2].u.operand;
305 unsigned op2 = currentInstruction[3].u.operand;
306
307 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
308 linkSlowCase(iter); // int32 check
309 linkSlowCase(iter); // int32 check
310
311 JITStubCall stubCall(this, cti_op_lshift);
312 stubCall.addArgument(op1);
313 stubCall.addArgument(op2);
314 stubCall.call(dst);
315 }
316
317 // RightShift (>>)
318
319 void JIT::emit_op_rshift(Instruction* currentInstruction)
320 {
321 unsigned dst = currentInstruction[1].u.operand;
322 unsigned op1 = currentInstruction[2].u.operand;
323 unsigned op2 = currentInstruction[3].u.operand;
324
325 if (isOperandConstantImmediateInt(op2)) {
326 emitLoad(op1, regT1, regT0);
327 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
328 rshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
329 emitStoreInt32(dst, regT0, dst == op1);
330 return;
331 }
332
333 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
334 if (!isOperandConstantImmediateInt(op1))
335 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
336 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
337 rshift32(regT2, regT0);
338 emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
339 }
340
341 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
342 {
343 unsigned dst = currentInstruction[1].u.operand;
344 unsigned op1 = currentInstruction[2].u.operand;
345 unsigned op2 = currentInstruction[3].u.operand;
346
347 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
348 linkSlowCase(iter); // int32 check
349 linkSlowCase(iter); // int32 check
350
351 JITStubCall stubCall(this, cti_op_rshift);
352 stubCall.addArgument(op1);
353 stubCall.addArgument(op2);
354 stubCall.call(dst);
355 }
356
357 // BitAnd (&)
358
359 void JIT::emit_op_bitand(Instruction* currentInstruction)
360 {
361 unsigned dst = currentInstruction[1].u.operand;
362 unsigned op1 = currentInstruction[2].u.operand;
363 unsigned op2 = currentInstruction[3].u.operand;
364
365 unsigned op;
366 int32_t constant;
367 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
368 emitLoad(op, regT1, regT0);
369 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
370 and32(Imm32(constant), regT0);
371 emitStoreInt32(dst, regT0, (op == dst));
372 return;
373 }
374
375 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
376 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
377 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
378 and32(regT2, regT0);
379 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
380 }
381
382 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
383 {
384 unsigned dst = currentInstruction[1].u.operand;
385 unsigned op1 = currentInstruction[2].u.operand;
386 unsigned op2 = currentInstruction[3].u.operand;
387
388 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
389 linkSlowCase(iter); // int32 check
390 linkSlowCase(iter); // int32 check
391
392 JITStubCall stubCall(this, cti_op_bitand);
393 stubCall.addArgument(op1);
394 stubCall.addArgument(op2);
395 stubCall.call(dst);
396 }
397
398 // BitOr (|)
399
400 void JIT::emit_op_bitor(Instruction* currentInstruction)
401 {
402 unsigned dst = currentInstruction[1].u.operand;
403 unsigned op1 = currentInstruction[2].u.operand;
404 unsigned op2 = currentInstruction[3].u.operand;
405
406 unsigned op;
407 int32_t constant;
408 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
409 emitLoad(op, regT1, regT0);
410 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
411 or32(Imm32(constant), regT0);
412 emitStoreInt32(dst, regT0, (op == dst));
413 return;
414 }
415
416 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
417 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
418 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
419 or32(regT2, regT0);
420 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
421 }
422
423 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
424 {
425 unsigned dst = currentInstruction[1].u.operand;
426 unsigned op1 = currentInstruction[2].u.operand;
427 unsigned op2 = currentInstruction[3].u.operand;
428
429 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
430 linkSlowCase(iter); // int32 check
431 linkSlowCase(iter); // int32 check
432
433 JITStubCall stubCall(this, cti_op_bitor);
434 stubCall.addArgument(op1);
435 stubCall.addArgument(op2);
436 stubCall.call(dst);
437 }
438
439 // BitXor (^)
440
441 void JIT::emit_op_bitxor(Instruction* currentInstruction)
442 {
443 unsigned dst = currentInstruction[1].u.operand;
444 unsigned op1 = currentInstruction[2].u.operand;
445 unsigned op2 = currentInstruction[3].u.operand;
446
447 unsigned op;
448 int32_t constant;
449 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
450 emitLoad(op, regT1, regT0);
451 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
452 xor32(Imm32(constant), regT0);
453 emitStoreInt32(dst, regT0, (op == dst));
454 return;
455 }
456
457 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
458 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
459 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
460 xor32(regT2, regT0);
461 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
462 }
463
464 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
465 {
466 unsigned dst = currentInstruction[1].u.operand;
467 unsigned op1 = currentInstruction[2].u.operand;
468 unsigned op2 = currentInstruction[3].u.operand;
469
470 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
471 linkSlowCase(iter); // int32 check
472 linkSlowCase(iter); // int32 check
473
474 JITStubCall stubCall(this, cti_op_bitxor);
475 stubCall.addArgument(op1);
476 stubCall.addArgument(op2);
477 stubCall.call(dst);
478 }
479
480 // BitNot (~)
481
482 void JIT::emit_op_bitnot(Instruction* currentInstruction)
483 {
484 unsigned dst = currentInstruction[1].u.operand;
485 unsigned src = currentInstruction[2].u.operand;
486
487 emitLoad(src, regT1, regT0);
488 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
489
490 not32(regT0);
491 emitStoreInt32(dst, regT0, (dst == src));
492 }
493
494 void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
495 {
496 unsigned dst = currentInstruction[1].u.operand;
497
498 linkSlowCase(iter); // int32 check
499
500 JITStubCall stubCall(this, cti_op_bitnot);
501 stubCall.addArgument(regT1, regT0);
502 stubCall.call(dst);
503 }
504
505 // PostInc (i++)
506
507 void JIT::emit_op_post_inc(Instruction* currentInstruction)
508 {
509 unsigned dst = currentInstruction[1].u.operand;
510 unsigned srcDst = currentInstruction[2].u.operand;
511
512 emitLoad(srcDst, regT1, regT0);
513 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
514
515 if (dst == srcDst) // x = x++ is a noop for ints.
516 return;
517
518 emitStoreInt32(dst, regT0);
519
520 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
521 emitStoreInt32(srcDst, regT0, true);
522 }
523
524 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
525 {
526 unsigned dst = currentInstruction[1].u.operand;
527 unsigned srcDst = currentInstruction[2].u.operand;
528
529 linkSlowCase(iter); // int32 check
530 if (dst != srcDst)
531 linkSlowCase(iter); // overflow check
532
533 JITStubCall stubCall(this, cti_op_post_inc);
534 stubCall.addArgument(srcDst);
535 stubCall.addArgument(Imm32(srcDst));
536 stubCall.call(dst);
537 }
538
539 // PostDec (i--)
540
541 void JIT::emit_op_post_dec(Instruction* currentInstruction)
542 {
543 unsigned dst = currentInstruction[1].u.operand;
544 unsigned srcDst = currentInstruction[2].u.operand;
545
546 emitLoad(srcDst, regT1, regT0);
547 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
548
549 if (dst == srcDst) // x = x-- is a noop for ints.
550 return;
551
552 emitStoreInt32(dst, regT0);
553
554 addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
555 emitStoreInt32(srcDst, regT0, true);
556 }
557
558 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
559 {
560 unsigned dst = currentInstruction[1].u.operand;
561 unsigned srcDst = currentInstruction[2].u.operand;
562
563 linkSlowCase(iter); // int32 check
564 if (dst != srcDst)
565 linkSlowCase(iter); // overflow check
566
567 JITStubCall stubCall(this, cti_op_post_dec);
568 stubCall.addArgument(srcDst);
569 stubCall.addArgument(Imm32(srcDst));
570 stubCall.call(dst);
571 }
572
573 // PreInc (++i)
574
575 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
576 {
577 unsigned srcDst = currentInstruction[1].u.operand;
578
579 emitLoad(srcDst, regT1, regT0);
580
581 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
582 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
583 emitStoreInt32(srcDst, regT0, true);
584 }
585
586 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
587 {
588 unsigned srcDst = currentInstruction[1].u.operand;
589
590 linkSlowCase(iter); // int32 check
591 linkSlowCase(iter); // overflow check
592
593 JITStubCall stubCall(this, cti_op_pre_inc);
594 stubCall.addArgument(srcDst);
595 stubCall.call(srcDst);
596 }
597
598 // PreDec (--i)
599
600 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
601 {
602 unsigned srcDst = currentInstruction[1].u.operand;
603
604 emitLoad(srcDst, regT1, regT0);
605
606 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
607 addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
608 emitStoreInt32(srcDst, regT0, true);
609 }
610
611 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
612 {
613 unsigned srcDst = currentInstruction[1].u.operand;
614
615 linkSlowCase(iter); // int32 check
616 linkSlowCase(iter); // overflow check
617
618 JITStubCall stubCall(this, cti_op_pre_dec);
619 stubCall.addArgument(srcDst);
620 stubCall.call(srcDst);
621 }
622
623 // Addition (+)
624
625 void JIT::emit_op_add(Instruction* currentInstruction)
626 {
627 unsigned dst = currentInstruction[1].u.operand;
628 unsigned op1 = currentInstruction[2].u.operand;
629 unsigned op2 = currentInstruction[3].u.operand;
630 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
631
632 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
633 JITStubCall stubCall(this, cti_op_add);
634 stubCall.addArgument(op1);
635 stubCall.addArgument(op2);
636 stubCall.call(dst);
637 return;
638 }
639
640 JumpList notInt32Op1;
641 JumpList notInt32Op2;
642
643 unsigned op;
644 int32_t constant;
645 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
646 emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
647 return;
648 }
649
650 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
651 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
652 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
653
654 // Int32 case.
655 addSlowCase(branchAdd32(Overflow, regT2, regT0));
656 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
657
658 if (!supportsFloatingPoint()) {
659 addSlowCase(notInt32Op1);
660 addSlowCase(notInt32Op2);
661 return;
662 }
663 Jump end = jump();
664
665 // Double case.
666 emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
667 end.link(this);
668 }
669
670 void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
671 {
672 // Int32 case.
673 emitLoad(op, regT1, regT0);
674 Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
675 addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
676 emitStoreInt32(dst, regT0, (op == dst));
677
678 // Double case.
679 if (!supportsFloatingPoint()) {
680 addSlowCase(notInt32);
681 return;
682 }
683 Jump end = jump();
684
685 notInt32.link(this);
686 if (!opType.definitelyIsNumber())
687 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
688 move(Imm32(constant), regT2);
689 convertInt32ToDouble(regT2, fpRegT0);
690 emitLoadDouble(op, fpRegT1);
691 addDouble(fpRegT1, fpRegT0);
692 emitStoreDouble(dst, fpRegT0);
693
694 end.link(this);
695 }
696
697 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
698 {
699 unsigned dst = currentInstruction[1].u.operand;
700 unsigned op1 = currentInstruction[2].u.operand;
701 unsigned op2 = currentInstruction[3].u.operand;
702 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
703
704 if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
705 return;
706
707 unsigned op;
708 int32_t constant;
709 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
710 linkSlowCase(iter); // overflow check
711
712 if (!supportsFloatingPoint())
713 linkSlowCase(iter); // non-sse case
714 else {
715 ResultType opType = op == op1 ? types.first() : types.second();
716 if (!opType.definitelyIsNumber())
717 linkSlowCase(iter); // double check
718 }
719 } else {
720 linkSlowCase(iter); // overflow check
721
722 if (!supportsFloatingPoint()) {
723 linkSlowCase(iter); // int32 check
724 linkSlowCase(iter); // int32 check
725 } else {
726 if (!types.first().definitelyIsNumber())
727 linkSlowCase(iter); // double check
728
729 if (!types.second().definitelyIsNumber()) {
730 linkSlowCase(iter); // int32 check
731 linkSlowCase(iter); // double check
732 }
733 }
734 }
735
736 JITStubCall stubCall(this, cti_op_add);
737 stubCall.addArgument(op1);
738 stubCall.addArgument(op2);
739 stubCall.call(dst);
740 }
741
742 // Subtraction (-)
743
744 void JIT::emit_op_sub(Instruction* currentInstruction)
745 {
746 unsigned dst = currentInstruction[1].u.operand;
747 unsigned op1 = currentInstruction[2].u.operand;
748 unsigned op2 = currentInstruction[3].u.operand;
749 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
750
751 JumpList notInt32Op1;
752 JumpList notInt32Op2;
753
754 if (isOperandConstantImmediateInt(op2)) {
755 emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
756 return;
757 }
758
759 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
760 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
761 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
762
763 // Int32 case.
764 addSlowCase(branchSub32(Overflow, regT2, regT0));
765 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
766
767 if (!supportsFloatingPoint()) {
768 addSlowCase(notInt32Op1);
769 addSlowCase(notInt32Op2);
770 return;
771 }
772 Jump end = jump();
773
774 // Double case.
775 emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
776 end.link(this);
777 }
778
779 void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
780 {
781 // Int32 case.
782 emitLoad(op, regT1, regT0);
783 Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
784 addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
785 emitStoreInt32(dst, regT0, (op == dst));
786
787 // Double case.
788 if (!supportsFloatingPoint()) {
789 addSlowCase(notInt32);
790 return;
791 }
792 Jump end = jump();
793
794 notInt32.link(this);
795 if (!opType.definitelyIsNumber())
796 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
797 move(Imm32(constant), regT2);
798 convertInt32ToDouble(regT2, fpRegT0);
799 emitLoadDouble(op, fpRegT1);
800 subDouble(fpRegT0, fpRegT1);
801 emitStoreDouble(dst, fpRegT1);
802
803 end.link(this);
804 }
805
806 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
807 {
808 unsigned dst = currentInstruction[1].u.operand;
809 unsigned op1 = currentInstruction[2].u.operand;
810 unsigned op2 = currentInstruction[3].u.operand;
811 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
812
813 if (isOperandConstantImmediateInt(op2)) {
814 linkSlowCase(iter); // overflow check
815
816 if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
817 linkSlowCase(iter); // int32 or double check
818 } else {
819 linkSlowCase(iter); // overflow check
820
821 if (!supportsFloatingPoint()) {
822 linkSlowCase(iter); // int32 check
823 linkSlowCase(iter); // int32 check
824 } else {
825 if (!types.first().definitelyIsNumber())
826 linkSlowCase(iter); // double check
827
828 if (!types.second().definitelyIsNumber()) {
829 linkSlowCase(iter); // int32 check
830 linkSlowCase(iter); // double check
831 }
832 }
833 }
834
835 JITStubCall stubCall(this, cti_op_sub);
836 stubCall.addArgument(op1);
837 stubCall.addArgument(op2);
838 stubCall.call(dst);
839 }
840
841 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
842 {
843 JumpList end;
844
845 if (!notInt32Op1.empty()) {
846 // Double case 1: Op1 is not int32; Op2 is unknown.
847 notInt32Op1.link(this);
848
849 ASSERT(op1IsInRegisters);
850
851 // Verify Op1 is double.
852 if (!types.first().definitelyIsNumber())
853 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
854
855 if (!op2IsInRegisters)
856 emitLoad(op2, regT3, regT2);
857
858 Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
859
860 if (!types.second().definitelyIsNumber())
861 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
862
863 convertInt32ToDouble(regT2, fpRegT0);
864 Jump doTheMath = jump();
865
866 // Load Op2 as double into double register.
867 doubleOp2.link(this);
868 emitLoadDouble(op2, fpRegT0);
869
870 // Do the math.
871 doTheMath.link(this);
872 switch (opcodeID) {
873 case op_mul:
874 emitLoadDouble(op1, fpRegT2);
875 mulDouble(fpRegT2, fpRegT0);
876 emitStoreDouble(dst, fpRegT0);
877 break;
878 case op_add:
879 emitLoadDouble(op1, fpRegT2);
880 addDouble(fpRegT2, fpRegT0);
881 emitStoreDouble(dst, fpRegT0);
882 break;
883 case op_sub:
884 emitLoadDouble(op1, fpRegT1);
885 subDouble(fpRegT0, fpRegT1);
886 emitStoreDouble(dst, fpRegT1);
887 break;
888 case op_div:
889 emitLoadDouble(op1, fpRegT1);
890 divDouble(fpRegT0, fpRegT1);
891 emitStoreDouble(dst, fpRegT1);
892 break;
893 case op_jnless:
894 emitLoadDouble(op1, fpRegT2);
895 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
896 break;
897 case op_jless:
898 emitLoadDouble(op1, fpRegT2);
899 addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
900 break;
901 case op_jnlesseq:
902 emitLoadDouble(op1, fpRegT2);
903 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
904 break;
905 default:
906 ASSERT_NOT_REACHED();
907 }
908
909 if (!notInt32Op2.empty())
910 end.append(jump());
911 }
912
913 if (!notInt32Op2.empty()) {
914 // Double case 2: Op1 is int32; Op2 is not int32.
915 notInt32Op2.link(this);
916
917 ASSERT(op2IsInRegisters);
918
919 if (!op1IsInRegisters)
920 emitLoadPayload(op1, regT0);
921
922 convertInt32ToDouble(regT0, fpRegT0);
923
924 // Verify op2 is double.
925 if (!types.second().definitelyIsNumber())
926 addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
927
928 // Do the math.
929 switch (opcodeID) {
930 case op_mul:
931 emitLoadDouble(op2, fpRegT2);
932 mulDouble(fpRegT2, fpRegT0);
933 emitStoreDouble(dst, fpRegT0);
934 break;
935 case op_add:
936 emitLoadDouble(op2, fpRegT2);
937 addDouble(fpRegT2, fpRegT0);
938 emitStoreDouble(dst, fpRegT0);
939 break;
940 case op_sub:
941 emitLoadDouble(op2, fpRegT2);
942 subDouble(fpRegT2, fpRegT0);
943 emitStoreDouble(dst, fpRegT0);
944 break;
945 case op_div:
946 emitLoadDouble(op2, fpRegT2);
947 divDouble(fpRegT2, fpRegT0);
948 emitStoreDouble(dst, fpRegT0);
949 break;
950 case op_jnless:
951 emitLoadDouble(op2, fpRegT1);
952 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
953 break;
954 case op_jless:
955 emitLoadDouble(op2, fpRegT1);
956 addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
957 break;
958 case op_jnlesseq:
959 emitLoadDouble(op2, fpRegT1);
960 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
961 break;
962 default:
963 ASSERT_NOT_REACHED();
964 }
965 }
966
967 end.link(this);
968 }
969
970 // Multiplication (*)
971
972 void JIT::emit_op_mul(Instruction* currentInstruction)
973 {
974 unsigned dst = currentInstruction[1].u.operand;
975 unsigned op1 = currentInstruction[2].u.operand;
976 unsigned op2 = currentInstruction[3].u.operand;
977 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
978
979 JumpList notInt32Op1;
980 JumpList notInt32Op2;
981
982 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
983 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
984 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
985
986 // Int32 case.
987 move(regT0, regT3);
988 addSlowCase(branchMul32(Overflow, regT2, regT0));
989 addSlowCase(branchTest32(Zero, regT0));
990 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
991
992 if (!supportsFloatingPoint()) {
993 addSlowCase(notInt32Op1);
994 addSlowCase(notInt32Op2);
995 return;
996 }
997 Jump end = jump();
998
999 // Double case.
1000 emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1001 end.link(this);
1002 }
1003
1004 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1005 {
1006 unsigned dst = currentInstruction[1].u.operand;
1007 unsigned op1 = currentInstruction[2].u.operand;
1008 unsigned op2 = currentInstruction[3].u.operand;
1009 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1010
1011 Jump overflow = getSlowCase(iter); // overflow check
1012 linkSlowCase(iter); // zero result check
1013
1014 Jump negZero = branchOr32(Signed, regT2, regT3);
1015 emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
1016
1017 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
1018
1019 negZero.link(this);
1020 overflow.link(this);
1021
1022 if (!supportsFloatingPoint()) {
1023 linkSlowCase(iter); // int32 check
1024 linkSlowCase(iter); // int32 check
1025 }
1026
1027 if (supportsFloatingPoint()) {
1028 if (!types.first().definitelyIsNumber())
1029 linkSlowCase(iter); // double check
1030
1031 if (!types.second().definitelyIsNumber()) {
1032 linkSlowCase(iter); // int32 check
1033 linkSlowCase(iter); // double check
1034 }
1035 }
1036
1037 Label jitStubCall(this);
1038 JITStubCall stubCall(this, cti_op_mul);
1039 stubCall.addArgument(op1);
1040 stubCall.addArgument(op2);
1041 stubCall.call(dst);
1042 }
1043
1044 // Division (/)
1045
1046 void JIT::emit_op_div(Instruction* currentInstruction)
1047 {
1048 unsigned dst = currentInstruction[1].u.operand;
1049 unsigned op1 = currentInstruction[2].u.operand;
1050 unsigned op2 = currentInstruction[3].u.operand;
1051 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1052
1053 if (!supportsFloatingPoint()) {
1054 addSlowCase(jump());
1055 return;
1056 }
1057
1058 // Int32 divide.
1059 JumpList notInt32Op1;
1060 JumpList notInt32Op2;
1061
1062 JumpList end;
1063
1064 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1065
1066 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
1067 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
1068
1069 convertInt32ToDouble(regT0, fpRegT0);
1070 convertInt32ToDouble(regT2, fpRegT1);
1071 divDouble(fpRegT1, fpRegT0);
1072
1073 JumpList doubleResult;
1074 branchConvertDoubleToInt32(fpRegT0, regT0, doubleResult, fpRegT1);
1075
1076 // Int32 result.
1077 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
1078 end.append(jump());
1079
1080 // Double result.
1081 doubleResult.link(this);
1082 emitStoreDouble(dst, fpRegT0);
1083 end.append(jump());
1084
1085 // Double divide.
1086 emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1087 end.link(this);
1088 }
1089
1090 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1091 {
1092 unsigned dst = currentInstruction[1].u.operand;
1093 unsigned op1 = currentInstruction[2].u.operand;
1094 unsigned op2 = currentInstruction[3].u.operand;
1095 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1096
1097 if (!supportsFloatingPoint())
1098 linkSlowCase(iter);
1099 else {
1100 if (!types.first().definitelyIsNumber())
1101 linkSlowCase(iter); // double check
1102
1103 if (!types.second().definitelyIsNumber()) {
1104 linkSlowCase(iter); // int32 check
1105 linkSlowCase(iter); // double check
1106 }
1107 }
1108
1109 JITStubCall stubCall(this, cti_op_div);
1110 stubCall.addArgument(op1);
1111 stubCall.addArgument(op2);
1112 stubCall.call(dst);
1113 }
1114
1115 // Mod (%)
1116
1117 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1118
1119 #if CPU(X86) || CPU(X86_64)
1120
1121 void JIT::emit_op_mod(Instruction* currentInstruction)
1122 {
1123 unsigned dst = currentInstruction[1].u.operand;
1124 unsigned op1 = currentInstruction[2].u.operand;
1125 unsigned op2 = currentInstruction[3].u.operand;
1126
1127 if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
1128 emitLoad(op1, X86Registers::edx, X86Registers::eax);
1129 move(Imm32(getConstantOperand(op2).asInt32()), X86Registers::ecx);
1130 addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
1131 if (getConstantOperand(op2).asInt32() == -1)
1132 addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1133 } else {
1134 emitLoad2(op1, X86Registers::edx, X86Registers::eax, op2, X86Registers::ebx, X86Registers::ecx);
1135 addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
1136 addSlowCase(branch32(NotEqual, X86Registers::ebx, Imm32(JSValue::Int32Tag)));
1137
1138 addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1139 addSlowCase(branch32(Equal, X86Registers::ecx, Imm32(0))); // divide by 0
1140 }
1141
1142 move(X86Registers::eax, X86Registers::ebx); // Save dividend payload, in case of 0.
1143 m_assembler.cdq();
1144 m_assembler.idivl_r(X86Registers::ecx);
1145
1146 // If the remainder is zero and the dividend is negative, the result is -0.
1147 Jump storeResult1 = branchTest32(NonZero, X86Registers::edx);
1148 Jump storeResult2 = branchTest32(Zero, X86Registers::ebx, Imm32(0x80000000)); // not negative
1149 emitStore(dst, jsNumber(m_globalData, -0.0));
1150 Jump end = jump();
1151
1152 storeResult1.link(this);
1153 storeResult2.link(this);
1154 emitStoreInt32(dst, X86Registers::edx, (op1 == dst || op2 == dst));
1155 end.link(this);
1156 }
1157
1158 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1159 {
1160 unsigned dst = currentInstruction[1].u.operand;
1161 unsigned op1 = currentInstruction[2].u.operand;
1162 unsigned op2 = currentInstruction[3].u.operand;
1163
1164 if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
1165 linkSlowCase(iter); // int32 check
1166 if (getConstantOperand(op2).asInt32() == -1)
1167 linkSlowCase(iter); // 0x80000000 check
1168 } else {
1169 linkSlowCase(iter); // int32 check
1170 linkSlowCase(iter); // int32 check
1171 linkSlowCase(iter); // 0 check
1172 linkSlowCase(iter); // 0x80000000 check
1173 }
1174
1175 JITStubCall stubCall(this, cti_op_mod);
1176 stubCall.addArgument(op1);
1177 stubCall.addArgument(op2);
1178 stubCall.call(dst);
1179 }
1180
1181 #else // CPU(X86) || CPU(X86_64)
1182
1183 void JIT::emit_op_mod(Instruction* currentInstruction)
1184 {
1185 unsigned dst = currentInstruction[1].u.operand;
1186 unsigned op1 = currentInstruction[2].u.operand;
1187 unsigned op2 = currentInstruction[3].u.operand;
1188
1189 JITStubCall stubCall(this, cti_op_mod);
1190 stubCall.addArgument(op1);
1191 stubCall.addArgument(op2);
1192 stubCall.call(dst);
1193 }
1194
1195 void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
1196 {
1197 }
1198
1199 #endif // CPU(X86) || CPU(X86_64)
1200
1201 /* ------------------------------ END: OP_MOD ------------------------------ */
1202
1203 #else // USE(JSVALUE32_64)
1204
1205 void JIT::emit_op_lshift(Instruction* currentInstruction)
1206 {
1207 unsigned result = currentInstruction[1].u.operand;
1208 unsigned op1 = currentInstruction[2].u.operand;
1209 unsigned op2 = currentInstruction[3].u.operand;
1210
1211 emitGetVirtualRegisters(op1, regT0, op2, regT2);
1212 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
1213 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1214 emitJumpSlowCaseIfNotImmediateInteger(regT2);
1215 emitFastArithImmToInt(regT0);
1216 emitFastArithImmToInt(regT2);
1217 lshift32(regT2, regT0);
1218 #if USE(JSVALUE32)
1219 addSlowCase(branchAdd32(Overflow, regT0, regT0));
1220 signExtend32ToPtr(regT0, regT0);
1221 #endif
1222 emitFastArithReTagImmediate(regT0, regT0);
1223 emitPutVirtualRegister(result);
1224 }
1225
1226 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1227 {
1228 unsigned result = currentInstruction[1].u.operand;
1229 unsigned op1 = currentInstruction[2].u.operand;
1230 unsigned op2 = currentInstruction[3].u.operand;
1231
1232 #if USE(JSVALUE64)
1233 UNUSED_PARAM(op1);
1234 UNUSED_PARAM(op2);
1235 linkSlowCase(iter);
1236 linkSlowCase(iter);
1237 #else
1238 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
1239 Jump notImm1 = getSlowCase(iter);
1240 Jump notImm2 = getSlowCase(iter);
1241 linkSlowCase(iter);
1242 emitGetVirtualRegisters(op1, regT0, op2, regT2);
1243 notImm1.link(this);
1244 notImm2.link(this);
1245 #endif
1246 JITStubCall stubCall(this, cti_op_lshift);
1247 stubCall.addArgument(regT0);
1248 stubCall.addArgument(regT2);
1249 stubCall.call(result);
1250 }
1251
1252 void JIT::emit_op_rshift(Instruction* currentInstruction)
1253 {
1254 unsigned result = currentInstruction[1].u.operand;
1255 unsigned op1 = currentInstruction[2].u.operand;
1256 unsigned op2 = currentInstruction[3].u.operand;
1257
1258 if (isOperandConstantImmediateInt(op2)) {
1259 // isOperandConstantImmediateInt(op2) => 1 SlowCase
1260 emitGetVirtualRegister(op1, regT0);
1261 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1262 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
1263 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
1264 } else {
1265 emitGetVirtualRegisters(op1, regT0, op2, regT2);
1266 if (supportsFloatingPointTruncate()) {
1267 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
1268 #if USE(JSVALUE64)
1269 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
1270 addSlowCase(emitJumpIfNotImmediateNumber(regT0));
1271 addPtr(tagTypeNumberRegister, regT0);
1272 movePtrToDouble(regT0, fpRegT0);
1273 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
1274 #else
1275 // supportsFloatingPoint() && !USE(JSVALUE64) => 5 SlowCases (of which 1 IfNotJSCell)
1276 emitJumpSlowCaseIfNotJSCell(regT0, op1);
1277 addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
1278 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1279 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
1280 addSlowCase(branchAdd32(Overflow, regT0, regT0));
1281 #endif
1282 lhsIsInt.link(this);
1283 emitJumpSlowCaseIfNotImmediateInteger(regT2);
1284 } else {
1285 // !supportsFloatingPoint() => 2 SlowCases
1286 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1287 emitJumpSlowCaseIfNotImmediateInteger(regT2);
1288 }
1289 emitFastArithImmToInt(regT2);
1290 rshift32(regT2, regT0);
1291 #if USE(JSVALUE32)
1292 signExtend32ToPtr(regT0, regT0);
1293 #endif
1294 }
1295 #if USE(JSVALUE64)
1296 emitFastArithIntToImmNoCheck(regT0, regT0);
1297 #else
1298 orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
1299 #endif
1300 emitPutVirtualRegister(result);
1301 }
1302
1303 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1304 {
1305 unsigned result = currentInstruction[1].u.operand;
1306 unsigned op1 = currentInstruction[2].u.operand;
1307 unsigned op2 = currentInstruction[3].u.operand;
1308
1309 JITStubCall stubCall(this, cti_op_rshift);
1310
1311 if (isOperandConstantImmediateInt(op2)) {
1312 linkSlowCase(iter);
1313 stubCall.addArgument(regT0);
1314 stubCall.addArgument(op2, regT2);
1315 } else {
1316 if (supportsFloatingPointTruncate()) {
1317 #if USE(JSVALUE64)
1318 linkSlowCase(iter);
1319 linkSlowCase(iter);
1320 linkSlowCase(iter);
1321 #else
1322 linkSlowCaseIfNotJSCell(iter, op1);
1323 linkSlowCase(iter);
1324 linkSlowCase(iter);
1325 linkSlowCase(iter);
1326 linkSlowCase(iter);
1327 #endif
1328 // We're reloading op1 to regT0 as we can no longer guarantee that
1329 // we have not munged the operand. It may have already been shifted
1330 // correctly, but it still will not have been tagged.
1331 stubCall.addArgument(op1, regT0);
1332 stubCall.addArgument(regT2);
1333 } else {
1334 linkSlowCase(iter);
1335 linkSlowCase(iter);
1336 stubCall.addArgument(regT0);
1337 stubCall.addArgument(regT2);
1338 }
1339 }
1340
1341 stubCall.call(result);
1342 }
1343
1344 void JIT::emit_op_jnless(Instruction* currentInstruction)
1345 {
1346 unsigned op1 = currentInstruction[1].u.operand;
1347 unsigned op2 = currentInstruction[2].u.operand;
1348 unsigned target = currentInstruction[3].u.operand;
1349
1350 // We generate inline code for the following cases in the fast path:
1351 // - int immediate to constant int immediate
1352 // - constant int immediate to int immediate
1353 // - int immediate to int immediate
1354
1355 if (isOperandConstantImmediateInt(op2)) {
1356 emitGetVirtualRegister(op1, regT0);
1357 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1358 #if USE(JSVALUE64)
1359 int32_t op2imm = getConstantOperandImmediateInt(op2);
1360 #else
1361 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1362 #endif
1363 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target);
1364 } else if (isOperandConstantImmediateInt(op1)) {
1365 emitGetVirtualRegister(op2, regT1);
1366 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1367 #if USE(JSVALUE64)
1368 int32_t op1imm = getConstantOperandImmediateInt(op1);
1369 #else
1370 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
1371 #endif
1372 addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target);
1373 } else {
1374 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1375 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1376 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1377
1378 addJump(branch32(GreaterThanOrEqual, regT0, regT1), target);
1379 }
1380 }
1381
1382 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1383 {
1384 unsigned op1 = currentInstruction[1].u.operand;
1385 unsigned op2 = currentInstruction[2].u.operand;
1386 unsigned target = currentInstruction[3].u.operand;
1387
1388 // We generate inline code for the following cases in the slow path:
1389 // - floating-point number to constant int immediate
1390 // - constant int immediate to floating-point number
1391 // - floating-point number to floating-point number.
1392
1393 if (isOperandConstantImmediateInt(op2)) {
1394 linkSlowCase(iter);
1395
1396 if (supportsFloatingPoint()) {
1397 #if USE(JSVALUE64)
1398 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1399 addPtr(tagTypeNumberRegister, regT0);
1400 movePtrToDouble(regT0, fpRegT0);
1401 #else
1402 Jump fail1;
1403 if (!m_codeBlock->isKnownNotImmediate(op1))
1404 fail1 = emitJumpIfNotJSCell(regT0);
1405
1406 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
1407 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1408 #endif
1409
1410 int32_t op2imm = getConstantOperand(op2).asInt32();;
1411
1412 move(Imm32(op2imm), regT1);
1413 convertInt32ToDouble(regT1, fpRegT1);
1414
1415 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
1416
1417 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1418
1419 #if USE(JSVALUE64)
1420 fail1.link(this);
1421 #else
1422 if (!m_codeBlock->isKnownNotImmediate(op1))
1423 fail1.link(this);
1424 fail2.link(this);
1425 #endif
1426 }
1427
1428 JITStubCall stubCall(this, cti_op_jless);
1429 stubCall.addArgument(regT0);
1430 stubCall.addArgument(op2, regT2);
1431 stubCall.call();
1432 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1433
1434 } else if (isOperandConstantImmediateInt(op1)) {
1435 linkSlowCase(iter);
1436
1437 if (supportsFloatingPoint()) {
1438 #if USE(JSVALUE64)
1439 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
1440 addPtr(tagTypeNumberRegister, regT1);
1441 movePtrToDouble(regT1, fpRegT1);
1442 #else
1443 Jump fail1;
1444 if (!m_codeBlock->isKnownNotImmediate(op2))
1445 fail1 = emitJumpIfNotJSCell(regT1);
1446
1447 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
1448 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1449 #endif
1450
1451 int32_t op1imm = getConstantOperand(op1).asInt32();;
1452
1453 move(Imm32(op1imm), regT0);
1454 convertInt32ToDouble(regT0, fpRegT0);
1455
1456 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
1457
1458 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1459
1460 #if USE(JSVALUE64)
1461 fail1.link(this);
1462 #else
1463 if (!m_codeBlock->isKnownNotImmediate(op2))
1464 fail1.link(this);
1465 fail2.link(this);
1466 #endif
1467 }
1468
1469 JITStubCall stubCall(this, cti_op_jless);
1470 stubCall.addArgument(op1, regT2);
1471 stubCall.addArgument(regT1);
1472 stubCall.call();
1473 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1474
1475 } else {
1476 linkSlowCase(iter);
1477
1478 if (supportsFloatingPoint()) {
1479 #if USE(JSVALUE64)
1480 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1481 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
1482 Jump fail3 = emitJumpIfImmediateInteger(regT1);
1483 addPtr(tagTypeNumberRegister, regT0);
1484 addPtr(tagTypeNumberRegister, regT1);
1485 movePtrToDouble(regT0, fpRegT0);
1486 movePtrToDouble(regT1, fpRegT1);
1487 #else
1488 Jump fail1;
1489 if (!m_codeBlock->isKnownNotImmediate(op1))
1490 fail1 = emitJumpIfNotJSCell(regT0);
1491
1492 Jump fail2;
1493 if (!m_codeBlock->isKnownNotImmediate(op2))
1494 fail2 = emitJumpIfNotJSCell(regT1);
1495
1496 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
1497 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
1498 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1499 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1500 #endif
1501
1502 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
1503
1504 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1505
1506 #if USE(JSVALUE64)
1507 fail1.link(this);
1508 fail2.link(this);
1509 fail3.link(this);
1510 #else
1511 if (!m_codeBlock->isKnownNotImmediate(op1))
1512 fail1.link(this);
1513 if (!m_codeBlock->isKnownNotImmediate(op2))
1514 fail2.link(this);
1515 fail3.link(this);
1516 fail4.link(this);
1517 #endif
1518 }
1519
1520 linkSlowCase(iter);
1521 JITStubCall stubCall(this, cti_op_jless);
1522 stubCall.addArgument(regT0);
1523 stubCall.addArgument(regT1);
1524 stubCall.call();
1525 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1526 }
1527 }
1528
1529 void JIT::emit_op_jless(Instruction* currentInstruction)
1530 {
1531 unsigned op1 = currentInstruction[1].u.operand;
1532 unsigned op2 = currentInstruction[2].u.operand;
1533 unsigned target = currentInstruction[3].u.operand;
1534
1535 // We generate inline code for the following cases in the fast path:
1536 // - int immediate to constant int immediate
1537 // - constant int immediate to int immediate
1538 // - int immediate to int immediate
1539
1540 if (isOperandConstantImmediateInt(op2)) {
1541 emitGetVirtualRegister(op1, regT0);
1542 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1543 #if USE(JSVALUE64)
1544 int32_t op2imm = getConstantOperandImmediateInt(op2);
1545 #else
1546 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1547 #endif
1548 addJump(branch32(LessThan, regT0, Imm32(op2imm)), target);
1549 } else if (isOperandConstantImmediateInt(op1)) {
1550 emitGetVirtualRegister(op2, regT1);
1551 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1552 #if USE(JSVALUE64)
1553 int32_t op1imm = getConstantOperandImmediateInt(op1);
1554 #else
1555 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
1556 #endif
1557 addJump(branch32(GreaterThan, regT1, Imm32(op1imm)), target);
1558 } else {
1559 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1560 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1561 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1562
1563 addJump(branch32(LessThan, regT0, regT1), target);
1564 }
1565 }
1566
1567 void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1568 {
1569 unsigned op1 = currentInstruction[1].u.operand;
1570 unsigned op2 = currentInstruction[2].u.operand;
1571 unsigned target = currentInstruction[3].u.operand;
1572
1573 // We generate inline code for the following cases in the slow path:
1574 // - floating-point number to constant int immediate
1575 // - constant int immediate to floating-point number
1576 // - floating-point number to floating-point number.
1577
1578 if (isOperandConstantImmediateInt(op2)) {
1579 linkSlowCase(iter);
1580
1581 if (supportsFloatingPoint()) {
1582 #if USE(JSVALUE64)
1583 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1584 addPtr(tagTypeNumberRegister, regT0);
1585 movePtrToDouble(regT0, fpRegT0);
1586 #else
1587 Jump fail1;
1588 if (!m_codeBlock->isKnownNotImmediate(op1))
1589 fail1 = emitJumpIfNotJSCell(regT0);
1590
1591 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
1592 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1593 #endif
1594
1595 int32_t op2imm = getConstantOperand(op2).asInt32();
1596
1597 move(Imm32(op2imm), regT1);
1598 convertInt32ToDouble(regT1, fpRegT1);
1599
1600 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
1601
1602 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1603
1604 #if USE(JSVALUE64)
1605 fail1.link(this);
1606 #else
1607 if (!m_codeBlock->isKnownNotImmediate(op1))
1608 fail1.link(this);
1609 fail2.link(this);
1610 #endif
1611 }
1612
1613 JITStubCall stubCall(this, cti_op_jless);
1614 stubCall.addArgument(regT0);
1615 stubCall.addArgument(op2, regT2);
1616 stubCall.call();
1617 emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
1618
1619 } else if (isOperandConstantImmediateInt(op1)) {
1620 linkSlowCase(iter);
1621
1622 if (supportsFloatingPoint()) {
1623 #if USE(JSVALUE64)
1624 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
1625 addPtr(tagTypeNumberRegister, regT1);
1626 movePtrToDouble(regT1, fpRegT1);
1627 #else
1628 Jump fail1;
1629 if (!m_codeBlock->isKnownNotImmediate(op2))
1630 fail1 = emitJumpIfNotJSCell(regT1);
1631
1632 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
1633 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1634 #endif
1635
1636 int32_t op1imm = getConstantOperand(op1).asInt32();
1637
1638 move(Imm32(op1imm), regT0);
1639 convertInt32ToDouble(regT0, fpRegT0);
1640
1641 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
1642
1643 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1644
1645 #if USE(JSVALUE64)
1646 fail1.link(this);
1647 #else
1648 if (!m_codeBlock->isKnownNotImmediate(op2))
1649 fail1.link(this);
1650 fail2.link(this);
1651 #endif
1652 }
1653
1654 JITStubCall stubCall(this, cti_op_jless);
1655 stubCall.addArgument(op1, regT2);
1656 stubCall.addArgument(regT1);
1657 stubCall.call();
1658 emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
1659
1660 } else {
1661 linkSlowCase(iter);
1662
1663 if (supportsFloatingPoint()) {
1664 #if USE(JSVALUE64)
1665 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1666 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
1667 Jump fail3 = emitJumpIfImmediateInteger(regT1);
1668 addPtr(tagTypeNumberRegister, regT0);
1669 addPtr(tagTypeNumberRegister, regT1);
1670 movePtrToDouble(regT0, fpRegT0);
1671 movePtrToDouble(regT1, fpRegT1);
1672 #else
1673 Jump fail1;
1674 if (!m_codeBlock->isKnownNotImmediate(op1))
1675 fail1 = emitJumpIfNotJSCell(regT0);
1676
1677 Jump fail2;
1678 if (!m_codeBlock->isKnownNotImmediate(op2))
1679 fail2 = emitJumpIfNotJSCell(regT1);
1680
1681 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
1682 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
1683 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1684 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1685 #endif
1686
1687 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
1688
1689 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
1690
1691 #if USE(JSVALUE64)
1692 fail1.link(this);
1693 fail2.link(this);
1694 fail3.link(this);
1695 #else
1696 if (!m_codeBlock->isKnownNotImmediate(op1))
1697 fail1.link(this);
1698 if (!m_codeBlock->isKnownNotImmediate(op2))
1699 fail2.link(this);
1700 fail3.link(this);
1701 fail4.link(this);
1702 #endif
1703 }
1704
1705 linkSlowCase(iter);
1706 JITStubCall stubCall(this, cti_op_jless);
1707 stubCall.addArgument(regT0);
1708 stubCall.addArgument(regT1);
1709 stubCall.call();
1710 emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
1711 }
1712 }
1713
1714 void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
1715 {
1716 unsigned op1 = currentInstruction[1].u.operand;
1717 unsigned op2 = currentInstruction[2].u.operand;
1718 unsigned target = currentInstruction[3].u.operand;
1719
1720 // We generate inline code for the following cases in the fast path:
1721 // - int immediate to constant int immediate
1722 // - constant int immediate to int immediate
1723 // - int immediate to int immediate
1724
1725 if (isOperandConstantImmediateInt(op2)) {
1726 emitGetVirtualRegister(op1, regT0);
1727 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1728 #if USE(JSVALUE64)
1729 int32_t op2imm = getConstantOperandImmediateInt(op2);
1730 #else
1731 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
1732 #endif
1733 addJump(branch32(GreaterThan, regT0, Imm32(op2imm)), target);
1734 } else if (isOperandConstantImmediateInt(op1)) {
1735 emitGetVirtualRegister(op2, regT1);
1736 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1737 #if USE(JSVALUE64)
1738 int32_t op1imm = getConstantOperandImmediateInt(op1);
1739 #else
1740 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
1741 #endif
1742 addJump(branch32(LessThan, regT1, Imm32(op1imm)), target);
1743 } else {
1744 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1745 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1746 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1747
1748 addJump(branch32(GreaterThan, regT0, regT1), target);
1749 }
1750 }
1751
1752 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1753 {
1754 unsigned op1 = currentInstruction[1].u.operand;
1755 unsigned op2 = currentInstruction[2].u.operand;
1756 unsigned target = currentInstruction[3].u.operand;
1757
1758 // We generate inline code for the following cases in the slow path:
1759 // - floating-point number to constant int immediate
1760 // - constant int immediate to floating-point number
1761 // - floating-point number to floating-point number.
1762
1763 if (isOperandConstantImmediateInt(op2)) {
1764 linkSlowCase(iter);
1765
1766 if (supportsFloatingPoint()) {
1767 #if USE(JSVALUE64)
1768 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1769 addPtr(tagTypeNumberRegister, regT0);
1770 movePtrToDouble(regT0, fpRegT0);
1771 #else
1772 Jump fail1;
1773 if (!m_codeBlock->isKnownNotImmediate(op1))
1774 fail1 = emitJumpIfNotJSCell(regT0);
1775
1776 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
1777 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1778 #endif
1779
1780 int32_t op2imm = getConstantOperand(op2).asInt32();;
1781
1782 move(Imm32(op2imm), regT1);
1783 convertInt32ToDouble(regT1, fpRegT1);
1784
1785 emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
1786
1787 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1788
1789 #if USE(JSVALUE64)
1790 fail1.link(this);
1791 #else
1792 if (!m_codeBlock->isKnownNotImmediate(op1))
1793 fail1.link(this);
1794 fail2.link(this);
1795 #endif
1796 }
1797
1798 JITStubCall stubCall(this, cti_op_jlesseq);
1799 stubCall.addArgument(regT0);
1800 stubCall.addArgument(op2, regT2);
1801 stubCall.call();
1802 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1803
1804 } else if (isOperandConstantImmediateInt(op1)) {
1805 linkSlowCase(iter);
1806
1807 if (supportsFloatingPoint()) {
1808 #if USE(JSVALUE64)
1809 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
1810 addPtr(tagTypeNumberRegister, regT1);
1811 movePtrToDouble(regT1, fpRegT1);
1812 #else
1813 Jump fail1;
1814 if (!m_codeBlock->isKnownNotImmediate(op2))
1815 fail1 = emitJumpIfNotJSCell(regT1);
1816
1817 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
1818 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1819 #endif
1820
1821 int32_t op1imm = getConstantOperand(op1).asInt32();;
1822
1823 move(Imm32(op1imm), regT0);
1824 convertInt32ToDouble(regT0, fpRegT0);
1825
1826 emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
1827
1828 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1829
1830 #if USE(JSVALUE64)
1831 fail1.link(this);
1832 #else
1833 if (!m_codeBlock->isKnownNotImmediate(op2))
1834 fail1.link(this);
1835 fail2.link(this);
1836 #endif
1837 }
1838
1839 JITStubCall stubCall(this, cti_op_jlesseq);
1840 stubCall.addArgument(op1, regT2);
1841 stubCall.addArgument(regT1);
1842 stubCall.call();
1843 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1844
1845 } else {
1846 linkSlowCase(iter);
1847
1848 if (supportsFloatingPoint()) {
1849 #if USE(JSVALUE64)
1850 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
1851 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
1852 Jump fail3 = emitJumpIfImmediateInteger(regT1);
1853 addPtr(tagTypeNumberRegister, regT0);
1854 addPtr(tagTypeNumberRegister, regT1);
1855 movePtrToDouble(regT0, fpRegT0);
1856 movePtrToDouble(regT1, fpRegT1);
1857 #else
1858 Jump fail1;
1859 if (!m_codeBlock->isKnownNotImmediate(op1))
1860 fail1 = emitJumpIfNotJSCell(regT0);
1861
1862 Jump fail2;
1863 if (!m_codeBlock->isKnownNotImmediate(op2))
1864 fail2 = emitJumpIfNotJSCell(regT1);
1865
1866 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
1867 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
1868 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1869 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1870 #endif
1871
1872 emitJumpSlowToHot(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), target);
1873
1874 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
1875
1876 #if USE(JSVALUE64)
1877 fail1.link(this);
1878 fail2.link(this);
1879 fail3.link(this);
1880 #else
1881 if (!m_codeBlock->isKnownNotImmediate(op1))
1882 fail1.link(this);
1883 if (!m_codeBlock->isKnownNotImmediate(op2))
1884 fail2.link(this);
1885 fail3.link(this);
1886 fail4.link(this);
1887 #endif
1888 }
1889
1890 linkSlowCase(iter);
1891 JITStubCall stubCall(this, cti_op_jlesseq);
1892 stubCall.addArgument(regT0);
1893 stubCall.addArgument(regT1);
1894 stubCall.call();
1895 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
1896 }
1897 }
1898
1899 void JIT::emit_op_bitand(Instruction* currentInstruction)
1900 {
1901 unsigned result = currentInstruction[1].u.operand;
1902 unsigned op1 = currentInstruction[2].u.operand;
1903 unsigned op2 = currentInstruction[3].u.operand;
1904
1905 if (isOperandConstantImmediateInt(op1)) {
1906 emitGetVirtualRegister(op2, regT0);
1907 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1908 #if USE(JSVALUE64)
1909 int32_t imm = getConstantOperandImmediateInt(op1);
1910 andPtr(Imm32(imm), regT0);
1911 if (imm >= 0)
1912 emitFastArithIntToImmNoCheck(regT0, regT0);
1913 #else
1914 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
1915 #endif
1916 } else if (isOperandConstantImmediateInt(op2)) {
1917 emitGetVirtualRegister(op1, regT0);
1918 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1919 #if USE(JSVALUE64)
1920 int32_t imm = getConstantOperandImmediateInt(op2);
1921 andPtr(Imm32(imm), regT0);
1922 if (imm >= 0)
1923 emitFastArithIntToImmNoCheck(regT0, regT0);
1924 #else
1925 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
1926 #endif
1927 } else {
1928 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1929 andPtr(regT1, regT0);
1930 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1931 }
1932 emitPutVirtualRegister(result);
1933 }
1934
1935 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1936 {
1937 unsigned result = currentInstruction[1].u.operand;
1938 unsigned op1 = currentInstruction[2].u.operand;
1939 unsigned op2 = currentInstruction[3].u.operand;
1940
1941 linkSlowCase(iter);
1942 if (isOperandConstantImmediateInt(op1)) {
1943 JITStubCall stubCall(this, cti_op_bitand);
1944 stubCall.addArgument(op1, regT2);
1945 stubCall.addArgument(regT0);
1946 stubCall.call(result);
1947 } else if (isOperandConstantImmediateInt(op2)) {
1948 JITStubCall stubCall(this, cti_op_bitand);
1949 stubCall.addArgument(regT0);
1950 stubCall.addArgument(op2, regT2);
1951 stubCall.call(result);
1952 } else {
1953 JITStubCall stubCall(this, cti_op_bitand);
1954 stubCall.addArgument(op1, regT2);
1955 stubCall.addArgument(regT1);
1956 stubCall.call(result);
1957 }
1958 }
1959
1960 void JIT::emit_op_post_inc(Instruction* currentInstruction)
1961 {
1962 unsigned result = currentInstruction[1].u.operand;
1963 unsigned srcDst = currentInstruction[2].u.operand;
1964
1965 emitGetVirtualRegister(srcDst, regT0);
1966 move(regT0, regT1);
1967 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1968 #if USE(JSVALUE64)
1969 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
1970 emitFastArithIntToImmNoCheck(regT1, regT1);
1971 #else
1972 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
1973 signExtend32ToPtr(regT1, regT1);
1974 #endif
1975 emitPutVirtualRegister(srcDst, regT1);
1976 emitPutVirtualRegister(result);
1977 }
1978
1979 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1980 {
1981 unsigned result = currentInstruction[1].u.operand;
1982 unsigned srcDst = currentInstruction[2].u.operand;
1983
1984 linkSlowCase(iter);
1985 linkSlowCase(iter);
1986 JITStubCall stubCall(this, cti_op_post_inc);
1987 stubCall.addArgument(regT0);
1988 stubCall.addArgument(Imm32(srcDst));
1989 stubCall.call(result);
1990 }
1991
1992 void JIT::emit_op_post_dec(Instruction* currentInstruction)
1993 {
1994 unsigned result = currentInstruction[1].u.operand;
1995 unsigned srcDst = currentInstruction[2].u.operand;
1996
1997 emitGetVirtualRegister(srcDst, regT0);
1998 move(regT0, regT1);
1999 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2000 #if USE(JSVALUE64)
2001 addSlowCase(branchSub32(Zero, Imm32(1), regT1));
2002 emitFastArithIntToImmNoCheck(regT1, regT1);
2003 #else
2004 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
2005 signExtend32ToPtr(regT1, regT1);
2006 #endif
2007 emitPutVirtualRegister(srcDst, regT1);
2008 emitPutVirtualRegister(result);
2009 }
2010
2011 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2012 {
2013 unsigned result = currentInstruction[1].u.operand;
2014 unsigned srcDst = currentInstruction[2].u.operand;
2015
2016 linkSlowCase(iter);
2017 linkSlowCase(iter);
2018 JITStubCall stubCall(this, cti_op_post_dec);
2019 stubCall.addArgument(regT0);
2020 stubCall.addArgument(Imm32(srcDst));
2021 stubCall.call(result);
2022 }
2023
2024 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
2025 {
2026 unsigned srcDst = currentInstruction[1].u.operand;
2027
2028 emitGetVirtualRegister(srcDst, regT0);
2029 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2030 #if USE(JSVALUE64)
2031 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
2032 emitFastArithIntToImmNoCheck(regT0, regT0);
2033 #else
2034 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
2035 signExtend32ToPtr(regT0, regT0);
2036 #endif
2037 emitPutVirtualRegister(srcDst);
2038 }
2039
2040 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2041 {
2042 unsigned srcDst = currentInstruction[1].u.operand;
2043
2044 Jump notImm = getSlowCase(iter);
2045 linkSlowCase(iter);
2046 emitGetVirtualRegister(srcDst, regT0);
2047 notImm.link(this);
2048 JITStubCall stubCall(this, cti_op_pre_inc);
2049 stubCall.addArgument(regT0);
2050 stubCall.call(srcDst);
2051 }
2052
2053 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
2054 {
2055 unsigned srcDst = currentInstruction[1].u.operand;
2056
2057 emitGetVirtualRegister(srcDst, regT0);
2058 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2059 #if USE(JSVALUE64)
2060 addSlowCase(branchSub32(Zero, Imm32(1), regT0));
2061 emitFastArithIntToImmNoCheck(regT0, regT0);
2062 #else
2063 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
2064 signExtend32ToPtr(regT0, regT0);
2065 #endif
2066 emitPutVirtualRegister(srcDst);
2067 }
2068
2069 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2070 {
2071 unsigned srcDst = currentInstruction[1].u.operand;
2072
2073 Jump notImm = getSlowCase(iter);
2074 linkSlowCase(iter);
2075 emitGetVirtualRegister(srcDst, regT0);
2076 notImm.link(this);
2077 JITStubCall stubCall(this, cti_op_pre_dec);
2078 stubCall.addArgument(regT0);
2079 stubCall.call(srcDst);
2080 }
2081
2082 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
2083
2084 #if CPU(X86) || CPU(X86_64)
2085
2086 void JIT::emit_op_mod(Instruction* currentInstruction)
2087 {
2088 unsigned result = currentInstruction[1].u.operand;
2089 unsigned op1 = currentInstruction[2].u.operand;
2090 unsigned op2 = currentInstruction[3].u.operand;
2091
2092 emitGetVirtualRegisters(op1, X86Registers::eax, op2, X86Registers::ecx);
2093 emitJumpSlowCaseIfNotImmediateInteger(X86Registers::eax);
2094 emitJumpSlowCaseIfNotImmediateInteger(X86Registers::ecx);
2095 #if USE(JSVALUE64)
2096 addSlowCase(branchPtr(Equal, X86Registers::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
2097 m_assembler.cdq();
2098 m_assembler.idivl_r(X86Registers::ecx);
2099 #else
2100 emitFastArithDeTagImmediate(X86Registers::eax);
2101 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86Registers::ecx));
2102 m_assembler.cdq();
2103 m_assembler.idivl_r(X86Registers::ecx);
2104 signExtend32ToPtr(X86Registers::edx, X86Registers::edx);
2105 #endif
2106 emitFastArithReTagImmediate(X86Registers::edx, X86Registers::eax);
2107 emitPutVirtualRegister(result);
2108 }
2109
2110 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2111 {
2112 unsigned result = currentInstruction[1].u.operand;
2113
2114 #if USE(JSVALUE64)
2115 linkSlowCase(iter);
2116 linkSlowCase(iter);
2117 linkSlowCase(iter);
2118 #else
2119 Jump notImm1 = getSlowCase(iter);
2120 Jump notImm2 = getSlowCase(iter);
2121 linkSlowCase(iter);
2122 emitFastArithReTagImmediate(X86Registers::eax, X86Registers::eax);
2123 emitFastArithReTagImmediate(X86Registers::ecx, X86Registers::ecx);
2124 notImm1.link(this);
2125 notImm2.link(this);
2126 #endif
2127 JITStubCall stubCall(this, cti_op_mod);
2128 stubCall.addArgument(X86Registers::eax);
2129 stubCall.addArgument(X86Registers::ecx);
2130 stubCall.call(result);
2131 }
2132
2133 #else // CPU(X86) || CPU(X86_64)
2134
2135 void JIT::emit_op_mod(Instruction* currentInstruction)
2136 {
2137 unsigned result = currentInstruction[1].u.operand;
2138 unsigned op1 = currentInstruction[2].u.operand;
2139 unsigned op2 = currentInstruction[3].u.operand;
2140
2141 JITStubCall stubCall(this, cti_op_mod);
2142 stubCall.addArgument(op1, regT2);
2143 stubCall.addArgument(op2, regT2);
2144 stubCall.call(result);
2145 }
2146
2147 void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
2148 {
2149 ASSERT_NOT_REACHED();
2150 }
2151
2152 #endif // CPU(X86) || CPU(X86_64)
2153
2154 /* ------------------------------ END: OP_MOD ------------------------------ */
2155
2156 #if USE(JSVALUE64)
2157
2158 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
2159
2160 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
2161 {
2162 emitGetVirtualRegisters(op1, regT0, op2, regT1);
2163 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2164 emitJumpSlowCaseIfNotImmediateInteger(regT1);
2165 if (opcodeID == op_add)
2166 addSlowCase(branchAdd32(Overflow, regT1, regT0));
2167 else if (opcodeID == op_sub)
2168 addSlowCase(branchSub32(Overflow, regT1, regT0));
2169 else {
2170 ASSERT(opcodeID == op_mul);
2171 addSlowCase(branchMul32(Overflow, regT1, regT0));
2172 addSlowCase(branchTest32(Zero, regT0));
2173 }
2174 emitFastArithIntToImmNoCheck(regT0, regT0);
2175 }
2176
2177 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
2178 {
2179 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
2180 COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
2181
2182 Jump notImm1;
2183 Jump notImm2;
2184 if (op1HasImmediateIntFastCase) {
2185 notImm2 = getSlowCase(iter);
2186 } else if (op2HasImmediateIntFastCase) {
2187 notImm1 = getSlowCase(iter);
2188 } else {
2189 notImm1 = getSlowCase(iter);
2190 notImm2 = getSlowCase(iter);
2191 }
2192
2193 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
2194 if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
2195 linkSlowCase(iter);
2196 emitGetVirtualRegister(op1, regT0);
2197
2198 Label stubFunctionCall(this);
2199 JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
2200 if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) {
2201 emitGetVirtualRegister(op1, regT0);
2202 emitGetVirtualRegister(op2, regT1);
2203 }
2204 stubCall.addArgument(regT0);
2205 stubCall.addArgument(regT1);
2206 stubCall.call(result);
2207 Jump end = jump();
2208
2209 if (op1HasImmediateIntFastCase) {
2210 notImm2.link(this);
2211 if (!types.second().definitelyIsNumber())
2212 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
2213 emitGetVirtualRegister(op1, regT1);
2214 convertInt32ToDouble(regT1, fpRegT1);
2215 addPtr(tagTypeNumberRegister, regT0);
2216 movePtrToDouble(regT0, fpRegT2);
2217 } else if (op2HasImmediateIntFastCase) {
2218 notImm1.link(this);
2219 if (!types.first().definitelyIsNumber())
2220 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
2221 emitGetVirtualRegister(op2, regT1);
2222 convertInt32ToDouble(regT1, fpRegT1);
2223 addPtr(tagTypeNumberRegister, regT0);
2224 movePtrToDouble(regT0, fpRegT2);
2225 } else {
2226 // if we get here, eax is not an int32, edx not yet checked.
2227 notImm1.link(this);
2228 if (!types.first().definitelyIsNumber())
2229 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
2230 if (!types.second().definitelyIsNumber())
2231 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
2232 addPtr(tagTypeNumberRegister, regT0);
2233 movePtrToDouble(regT0, fpRegT1);
2234 Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
2235 convertInt32ToDouble(regT1, fpRegT2);
2236 Jump op2wasInteger = jump();
2237
2238 // if we get here, eax IS an int32, edx is not.
2239 notImm2.link(this);
2240 if (!types.second().definitelyIsNumber())
2241 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
2242 convertInt32ToDouble(regT0, fpRegT1);
2243 op2isDouble.link(this);
2244 addPtr(tagTypeNumberRegister, regT1);
2245 movePtrToDouble(regT1, fpRegT2);
2246 op2wasInteger.link(this);
2247 }
2248
2249 if (opcodeID == op_add)
2250 addDouble(fpRegT2, fpRegT1);
2251 else if (opcodeID == op_sub)
2252 subDouble(fpRegT2, fpRegT1);
2253 else if (opcodeID == op_mul)
2254 mulDouble(fpRegT2, fpRegT1);
2255 else {
2256 ASSERT(opcodeID == op_div);
2257 divDouble(fpRegT2, fpRegT1);
2258 }
2259 moveDoubleToPtr(fpRegT1, regT0);
2260 subPtr(tagTypeNumberRegister, regT0);
2261 emitPutVirtualRegister(result, regT0);
2262
2263 end.link(this);
2264 }
2265
2266 void JIT::emit_op_add(Instruction* currentInstruction)
2267 {
2268 unsigned result = currentInstruction[1].u.operand;
2269 unsigned op1 = currentInstruction[2].u.operand;
2270 unsigned op2 = currentInstruction[3].u.operand;
2271 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2272
2273 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
2274 JITStubCall stubCall(this, cti_op_add);
2275 stubCall.addArgument(op1, regT2);
2276 stubCall.addArgument(op2, regT2);
2277 stubCall.call(result);
2278 return;
2279 }
2280
2281 if (isOperandConstantImmediateInt(op1)) {
2282 emitGetVirtualRegister(op2, regT0);
2283 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2284 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
2285 emitFastArithIntToImmNoCheck(regT0, regT0);
2286 } else if (isOperandConstantImmediateInt(op2)) {
2287 emitGetVirtualRegister(op1, regT0);
2288 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2289 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
2290 emitFastArithIntToImmNoCheck(regT0, regT0);
2291 } else
2292 compileBinaryArithOp(op_add, result, op1, op2, types);
2293
2294 emitPutVirtualRegister(result);
2295 }
2296
2297 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2298 {
2299 unsigned result = currentInstruction[1].u.operand;
2300 unsigned op1 = currentInstruction[2].u.operand;
2301 unsigned op2 = currentInstruction[3].u.operand;
2302 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2303
2304 if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
2305 return;
2306
2307 bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
2308 bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
2309 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
2310 }
2311
2312 void JIT::emit_op_mul(Instruction* currentInstruction)
2313 {
2314 unsigned result = currentInstruction[1].u.operand;
2315 unsigned op1 = currentInstruction[2].u.operand;
2316 unsigned op2 = currentInstruction[3].u.operand;
2317 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2318
2319 // For now, only plant a fast int case if the constant operand is greater than zero.
2320 int32_t value;
2321 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
2322 emitGetVirtualRegister(op2, regT0);
2323 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2324 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2325 emitFastArithReTagImmediate(regT0, regT0);
2326 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
2327 emitGetVirtualRegister(op1, regT0);
2328 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2329 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2330 emitFastArithReTagImmediate(regT0, regT0);
2331 } else
2332 compileBinaryArithOp(op_mul, result, op1, op2, types);
2333
2334 emitPutVirtualRegister(result);
2335 }
2336
2337 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2338 {
2339 unsigned result = currentInstruction[1].u.operand;
2340 unsigned op1 = currentInstruction[2].u.operand;
2341 unsigned op2 = currentInstruction[3].u.operand;
2342 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2343
2344 bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
2345 bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
2346 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
2347 }
2348
2349 void JIT::emit_op_div(Instruction* currentInstruction)
2350 {
2351 unsigned dst = currentInstruction[1].u.operand;
2352 unsigned op1 = currentInstruction[2].u.operand;
2353 unsigned op2 = currentInstruction[3].u.operand;
2354 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2355
2356 if (isOperandConstantImmediateDouble(op1)) {
2357 emitGetVirtualRegister(op1, regT0);
2358 addPtr(tagTypeNumberRegister, regT0);
2359 movePtrToDouble(regT0, fpRegT0);
2360 } else if (isOperandConstantImmediateInt(op1)) {
2361 emitLoadInt32ToDouble(op1, fpRegT0);
2362 } else {
2363 emitGetVirtualRegister(op1, regT0);
2364 if (!types.first().definitelyIsNumber())
2365 emitJumpSlowCaseIfNotImmediateNumber(regT0);
2366 Jump notInt = emitJumpIfNotImmediateInteger(regT0);
2367 convertInt32ToDouble(regT0, fpRegT0);
2368 Jump skipDoubleLoad = jump();
2369 notInt.link(this);
2370 addPtr(tagTypeNumberRegister, regT0);
2371 movePtrToDouble(regT0, fpRegT0);
2372 skipDoubleLoad.link(this);
2373 }
2374
2375 if (isOperandConstantImmediateDouble(op2)) {
2376 emitGetVirtualRegister(op2, regT1);
2377 addPtr(tagTypeNumberRegister, regT1);
2378 movePtrToDouble(regT1, fpRegT1);
2379 } else if (isOperandConstantImmediateInt(op2)) {
2380 emitLoadInt32ToDouble(op2, fpRegT1);
2381 } else {
2382 emitGetVirtualRegister(op2, regT1);
2383 if (!types.second().definitelyIsNumber())
2384 emitJumpSlowCaseIfNotImmediateNumber(regT1);
2385 Jump notInt = emitJumpIfNotImmediateInteger(regT1);
2386 convertInt32ToDouble(regT1, fpRegT1);
2387 Jump skipDoubleLoad = jump();
2388 notInt.link(this);
2389 addPtr(tagTypeNumberRegister, regT1);
2390 movePtrToDouble(regT1, fpRegT1);
2391 skipDoubleLoad.link(this);
2392 }
2393 divDouble(fpRegT1, fpRegT0);
2394
2395 // Double result.
2396 moveDoubleToPtr(fpRegT0, regT0);
2397 subPtr(tagTypeNumberRegister, regT0);
2398
2399 emitPutVirtualRegister(dst, regT0);
2400 }
2401
2402 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2403 {
2404 unsigned result = currentInstruction[1].u.operand;
2405 unsigned op1 = currentInstruction[2].u.operand;
2406 unsigned op2 = currentInstruction[3].u.operand;
2407 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2408 if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
2409 #ifndef NDEBUG
2410 breakpoint();
2411 #endif
2412 return;
2413 }
2414 if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
2415 if (!types.first().definitelyIsNumber())
2416 linkSlowCase(iter);
2417 }
2418 if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
2419 if (!types.second().definitelyIsNumber())
2420 linkSlowCase(iter);
2421 }
2422 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2423 JITStubCall stubCall(this, cti_op_div);
2424 stubCall.addArgument(op1, regT2);
2425 stubCall.addArgument(op2, regT2);
2426 stubCall.call(result);
2427 }
2428
2429 void JIT::emit_op_sub(Instruction* currentInstruction)
2430 {
2431 unsigned result = currentInstruction[1].u.operand;
2432 unsigned op1 = currentInstruction[2].u.operand;
2433 unsigned op2 = currentInstruction[3].u.operand;
2434 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2435
2436 compileBinaryArithOp(op_sub, result, op1, op2, types);
2437 emitPutVirtualRegister(result);
2438 }
2439
2440 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2441 {
2442 unsigned result = currentInstruction[1].u.operand;
2443 unsigned op1 = currentInstruction[2].u.operand;
2444 unsigned op2 = currentInstruction[3].u.operand;
2445 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2446
2447 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false);
2448 }
2449
2450 #else // USE(JSVALUE64)
2451
2452 /* ------------------------------ BEGIN: !USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
2453
2454 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
2455 {
2456 Structure* numberStructure = m_globalData->numberStructure.get();
2457 Jump wasJSNumberCell1;
2458 Jump wasJSNumberCell2;
2459
2460 emitGetVirtualRegisters(src1, regT0, src2, regT1);
2461
2462 if (types.second().isReusable() && supportsFloatingPoint()) {
2463 ASSERT(types.second().mightBeNumber());
2464
2465 // Check op2 is a number
2466 Jump op2imm = emitJumpIfImmediateInteger(regT1);
2467 if (!types.second().definitelyIsNumber()) {
2468 emitJumpSlowCaseIfNotJSCell(regT1, src2);
2469 addSlowCase(checkStructure(regT1, numberStructure));
2470 }
2471
2472 // (1) In this case src2 is a reusable number cell.
2473 // Slow case if src1 is not a number type.
2474 Jump op1imm = emitJumpIfImmediateInteger(regT0);
2475 if (!types.first().definitelyIsNumber()) {
2476 emitJumpSlowCaseIfNotJSCell(regT0, src1);
2477 addSlowCase(checkStructure(regT0, numberStructure));
2478 }
2479
2480 // (1a) if we get here, src1 is also a number cell
2481 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2482 Jump loadedDouble = jump();
2483 // (1b) if we get here, src1 is an immediate
2484 op1imm.link(this);
2485 emitFastArithImmToInt(regT0);
2486 convertInt32ToDouble(regT0, fpRegT0);
2487 // (1c)
2488 loadedDouble.link(this);
2489 if (opcodeID == op_add)
2490 addDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2491 else if (opcodeID == op_sub)
2492 subDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2493 else {
2494 ASSERT(opcodeID == op_mul);
2495 mulDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2496 }
2497
2498 // Store the result to the JSNumberCell and jump.
2499 storeDouble(fpRegT0, Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2500 move(regT1, regT0);
2501 emitPutVirtualRegister(dst);
2502 wasJSNumberCell2 = jump();
2503
2504 // (2) This handles cases where src2 is an immediate number.
2505 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
2506 op2imm.link(this);
2507 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2508 } else if (types.first().isReusable() && supportsFloatingPoint()) {
2509 ASSERT(types.first().mightBeNumber());
2510
2511 // Check op1 is a number
2512 Jump op1imm = emitJumpIfImmediateInteger(regT0);
2513 if (!types.first().definitelyIsNumber()) {
2514 emitJumpSlowCaseIfNotJSCell(regT0, src1);
2515 addSlowCase(checkStructure(regT0, numberStructure));
2516 }
2517
2518 // (1) In this case src1 is a reusable number cell.
2519 // Slow case if src2 is not a number type.
2520 Jump op2imm = emitJumpIfImmediateInteger(regT1);
2521 if (!types.second().definitelyIsNumber()) {
2522 emitJumpSlowCaseIfNotJSCell(regT1, src2);
2523 addSlowCase(checkStructure(regT1, numberStructure));
2524 }
2525
2526 // (1a) if we get here, src2 is also a number cell
2527 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
2528 Jump loadedDouble = jump();
2529 // (1b) if we get here, src2 is an immediate
2530 op2imm.link(this);
2531 emitFastArithImmToInt(regT1);
2532 convertInt32ToDouble(regT1, fpRegT1);
2533 // (1c)
2534 loadedDouble.link(this);
2535 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
2536 if (opcodeID == op_add)
2537 addDouble(fpRegT1, fpRegT0);
2538 else if (opcodeID == op_sub)
2539 subDouble(fpRegT1, fpRegT0);
2540 else {
2541 ASSERT(opcodeID == op_mul);
2542 mulDouble(fpRegT1, fpRegT0);
2543 }
2544 storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2545 emitPutVirtualRegister(dst);
2546
2547 // Store the result to the JSNumberCell and jump.
2548 storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
2549 emitPutVirtualRegister(dst);
2550 wasJSNumberCell1 = jump();
2551
2552 // (2) This handles cases where src1 is an immediate number.
2553 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
2554 op1imm.link(this);
2555 emitJumpSlowCaseIfNotImmediateInteger(regT1);
2556 } else
2557 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
2558
2559 if (opcodeID == op_add) {
2560 emitFastArithDeTagImmediate(regT0);
2561 addSlowCase(branchAdd32(Overflow, regT1, regT0));
2562 } else if (opcodeID == op_sub) {
2563 addSlowCase(branchSub32(Overflow, regT1, regT0));
2564 signExtend32ToPtr(regT0, regT0);
2565 emitFastArithReTagImmediate(regT0, regT0);
2566 } else {
2567 ASSERT(opcodeID == op_mul);
2568 // convert eax & edx from JSImmediates to ints, and check if either are zero
2569 emitFastArithImmToInt(regT1);
2570 Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(regT0);
2571 Jump op2NonZero = branchTest32(NonZero, regT1);
2572 op1Zero.link(this);
2573 // if either input is zero, add the two together, and check if the result is < 0.
2574 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
2575 move(regT0, regT2);
2576 addSlowCase(branchAdd32(Signed, regT1, regT2));
2577 // Skip the above check if neither input is zero
2578 op2NonZero.link(this);
2579 addSlowCase(branchMul32(Overflow, regT1, regT0));
2580 signExtend32ToPtr(regT0, regT0);
2581 emitFastArithReTagImmediate(regT0, regT0);
2582 }
2583 emitPutVirtualRegister(dst);
2584
2585 if (types.second().isReusable() && supportsFloatingPoint())
2586 wasJSNumberCell2.link(this);
2587 else if (types.first().isReusable() && supportsFloatingPoint())
2588 wasJSNumberCell1.link(this);
2589 }
2590
2591 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
2592 {
2593 linkSlowCase(iter);
2594 if (types.second().isReusable() && supportsFloatingPoint()) {
2595 if (!types.first().definitelyIsNumber()) {
2596 linkSlowCaseIfNotJSCell(iter, src1);
2597 linkSlowCase(iter);
2598 }
2599 if (!types.second().definitelyIsNumber()) {
2600 linkSlowCaseIfNotJSCell(iter, src2);
2601 linkSlowCase(iter);
2602 }
2603 } else if (types.first().isReusable() && supportsFloatingPoint()) {
2604 if (!types.first().definitelyIsNumber()) {
2605 linkSlowCaseIfNotJSCell(iter, src1);
2606 linkSlowCase(iter);
2607 }
2608 if (!types.second().definitelyIsNumber()) {
2609 linkSlowCaseIfNotJSCell(iter, src2);
2610 linkSlowCase(iter);
2611 }
2612 }
2613 linkSlowCase(iter);
2614
2615 // additional entry point to handle -0 cases.
2616 if (opcodeID == op_mul)
2617 linkSlowCase(iter);
2618
2619 JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
2620 stubCall.addArgument(src1, regT2);
2621 stubCall.addArgument(src2, regT2);
2622 stubCall.call(dst);
2623 }
2624
2625 void JIT::emit_op_add(Instruction* currentInstruction)
2626 {
2627 unsigned result = currentInstruction[1].u.operand;
2628 unsigned op1 = currentInstruction[2].u.operand;
2629 unsigned op2 = currentInstruction[3].u.operand;
2630 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2631
2632 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
2633 JITStubCall stubCall(this, cti_op_add);
2634 stubCall.addArgument(op1, regT2);
2635 stubCall.addArgument(op2, regT2);
2636 stubCall.call(result);
2637 return;
2638 }
2639
2640 if (isOperandConstantImmediateInt(op1)) {
2641 emitGetVirtualRegister(op2, regT0);
2642 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2643 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0));
2644 signExtend32ToPtr(regT0, regT0);
2645 emitPutVirtualRegister(result);
2646 } else if (isOperandConstantImmediateInt(op2)) {
2647 emitGetVirtualRegister(op1, regT0);
2648 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2649 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0));
2650 signExtend32ToPtr(regT0, regT0);
2651 emitPutVirtualRegister(result);
2652 } else {
2653 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2654 }
2655 }
2656
2657 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2658 {
2659 unsigned result = currentInstruction[1].u.operand;
2660 unsigned op1 = currentInstruction[2].u.operand;
2661 unsigned op2 = currentInstruction[3].u.operand;
2662
2663 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2664 if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
2665 return;
2666
2667 if (isOperandConstantImmediateInt(op1)) {
2668 Jump notImm = getSlowCase(iter);
2669 linkSlowCase(iter);
2670 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0);
2671 notImm.link(this);
2672 JITStubCall stubCall(this, cti_op_add);
2673 stubCall.addArgument(op1, regT2);
2674 stubCall.addArgument(regT0);
2675 stubCall.call(result);
2676 } else if (isOperandConstantImmediateInt(op2)) {
2677 Jump notImm = getSlowCase(iter);
2678 linkSlowCase(iter);
2679 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0);
2680 notImm.link(this);
2681 JITStubCall stubCall(this, cti_op_add);
2682 stubCall.addArgument(regT0);
2683 stubCall.addArgument(op2, regT2);
2684 stubCall.call(result);
2685 } else {
2686 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
2687 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
2688 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
2689 }
2690 }
2691
2692 void JIT::emit_op_mul(Instruction* currentInstruction)
2693 {
2694 unsigned result = currentInstruction[1].u.operand;
2695 unsigned op1 = currentInstruction[2].u.operand;
2696 unsigned op2 = currentInstruction[3].u.operand;
2697
2698 // For now, only plant a fast int case if the constant operand is greater than zero.
2699 int32_t value;
2700 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
2701 emitGetVirtualRegister(op2, regT0);
2702 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2703 emitFastArithDeTagImmediate(regT0);
2704 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2705 signExtend32ToPtr(regT0, regT0);
2706 emitFastArithReTagImmediate(regT0, regT0);
2707 emitPutVirtualRegister(result);
2708 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
2709 emitGetVirtualRegister(op1, regT0);
2710 emitJumpSlowCaseIfNotImmediateInteger(regT0);
2711 emitFastArithDeTagImmediate(regT0);
2712 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
2713 signExtend32ToPtr(regT0, regT0);
2714 emitFastArithReTagImmediate(regT0, regT0);
2715 emitPutVirtualRegister(result);
2716 } else
2717 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2718 }
2719
2720 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2721 {
2722 unsigned result = currentInstruction[1].u.operand;
2723 unsigned op1 = currentInstruction[2].u.operand;
2724 unsigned op2 = currentInstruction[3].u.operand;
2725
2726 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
2727 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
2728 linkSlowCase(iter);
2729 linkSlowCase(iter);
2730 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
2731 JITStubCall stubCall(this, cti_op_mul);
2732 stubCall.addArgument(op1, regT2);
2733 stubCall.addArgument(op2, regT2);
2734 stubCall.call(result);
2735 } else
2736 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
2737 }
2738
2739 void JIT::emit_op_sub(Instruction* currentInstruction)
2740 {
2741 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
2742 }
2743
2744 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
2745 {
2746 compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
2747 }
2748
2749 #endif // USE(JSVALUE64)
2750
2751 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
2752
2753 #endif // USE(JSVALUE32_64)
2754
2755 } // namespace JSC
2756
2757 #endif // ENABLE(JIT)