]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITArithmetic32_64.cpp
JavaScriptCore-1097.13.tar.gz
[apple/javascriptcore.git] / jit / JITArithmetic32_64.cpp
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #if USE(JSVALUE32_64)
30 #include "JIT.h"
31
32 #include "CodeBlock.h"
33 #include "JITInlineMethods.h"
34 #include "JITStubCall.h"
35 #include "JITStubs.h"
36 #include "JSArray.h"
37 #include "JSFunction.h"
38 #include "Interpreter.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
41
42 #ifndef NDEBUG
43 #include <stdio.h>
44 #endif
45
46 using namespace std;
47
48 namespace JSC {
49
50 void JIT::emit_op_negate(Instruction* currentInstruction)
51 {
52 unsigned dst = currentInstruction[1].u.operand;
53 unsigned src = currentInstruction[2].u.operand;
54
55 emitLoad(src, regT1, regT0);
56
57 Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
58 addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
59 neg32(regT0);
60 emitStoreInt32(dst, regT0, (dst == src));
61
62 Jump end = jump();
63
64 srcNotInt.link(this);
65 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
66
67 xor32(TrustedImm32(1 << 31), regT1);
68 store32(regT1, tagFor(dst));
69 if (dst != src)
70 store32(regT0, payloadFor(dst));
71
72 end.link(this);
73 }
74
75 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
76 {
77 unsigned dst = currentInstruction[1].u.operand;
78
79 linkSlowCase(iter); // 0x7fffffff check
80 linkSlowCase(iter); // double check
81
82 JITStubCall stubCall(this, cti_op_negate);
83 stubCall.addArgument(regT1, regT0);
84 stubCall.call(dst);
85 }
86
87 void JIT::emit_compareAndJump(OpcodeID opcode, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
88 {
89 JumpList notInt32Op1;
90 JumpList notInt32Op2;
91
92 // Character less.
93 if (isOperandConstantImmediateChar(op1)) {
94 emitLoad(op2, regT1, regT0);
95 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
96 JumpList failures;
97 emitLoadCharacterString(regT0, regT0, failures);
98 addSlowCase(failures);
99 addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
100 return;
101 }
102 if (isOperandConstantImmediateChar(op2)) {
103 emitLoad(op1, regT1, regT0);
104 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
105 JumpList failures;
106 emitLoadCharacterString(regT0, regT0, failures);
107 addSlowCase(failures);
108 addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
109 return;
110 }
111 if (isOperandConstantImmediateInt(op1)) {
112 emitLoad(op2, regT3, regT2);
113 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
114 addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
115 } else if (isOperandConstantImmediateInt(op2)) {
116 emitLoad(op1, regT1, regT0);
117 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
118 addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
119 } else {
120 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
121 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
122 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
123 addJump(branch32(condition, regT0, regT2), target);
124 }
125
126 if (!supportsFloatingPoint()) {
127 addSlowCase(notInt32Op1);
128 addSlowCase(notInt32Op2);
129 return;
130 }
131 Jump end = jump();
132
133 // Double less.
134 emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
135 end.link(this);
136 }
137
138 void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
139 {
140 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
141 linkSlowCase(iter);
142 linkSlowCase(iter);
143 linkSlowCase(iter);
144 linkSlowCase(iter);
145 } else {
146 if (!supportsFloatingPoint()) {
147 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
148 linkSlowCase(iter); // int32 check
149 linkSlowCase(iter); // int32 check
150 } else {
151 if (!isOperandConstantImmediateInt(op1)) {
152 linkSlowCase(iter); // double check
153 linkSlowCase(iter); // int32 check
154 }
155 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
156 linkSlowCase(iter); // double check
157 }
158 }
159 JITStubCall stubCall(this, stub);
160 stubCall.addArgument(op1);
161 stubCall.addArgument(op2);
162 stubCall.call();
163 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
164 }
165
166 // LeftShift (<<)
167
168 void JIT::emit_op_lshift(Instruction* currentInstruction)
169 {
170 unsigned dst = currentInstruction[1].u.operand;
171 unsigned op1 = currentInstruction[2].u.operand;
172 unsigned op2 = currentInstruction[3].u.operand;
173
174 if (isOperandConstantImmediateInt(op2)) {
175 emitLoad(op1, regT1, regT0);
176 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
177 lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
178 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_lshift));
179 return;
180 }
181
182 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
183 if (!isOperandConstantImmediateInt(op1))
184 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
185 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
186 lshift32(regT2, regT0);
187 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1 || dst == op2, OPCODE_LENGTH(op_lshift));
188 }
189
190 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
191 {
192 unsigned dst = currentInstruction[1].u.operand;
193 unsigned op1 = currentInstruction[2].u.operand;
194 unsigned op2 = currentInstruction[3].u.operand;
195
196 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
197 linkSlowCase(iter); // int32 check
198 linkSlowCase(iter); // int32 check
199
200 JITStubCall stubCall(this, cti_op_lshift);
201 stubCall.addArgument(op1);
202 stubCall.addArgument(op2);
203 stubCall.call(dst);
204 }
205
206 // RightShift (>>) and UnsignedRightShift (>>>) helper
207
208 void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
209 {
210 unsigned dst = currentInstruction[1].u.operand;
211 unsigned op1 = currentInstruction[2].u.operand;
212 unsigned op2 = currentInstruction[3].u.operand;
213
214 // Slow case of rshift makes assumptions about what registers hold the
215 // shift arguments, so any changes must be updated there as well.
216 if (isOperandConstantImmediateInt(op2)) {
217 emitLoad(op1, regT1, regT0);
218 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
219 int shift = getConstantOperand(op2).asInt32() & 0x1f;
220 if (shift) {
221 if (isUnsigned)
222 urshift32(Imm32(shift), regT0);
223 else
224 rshift32(Imm32(shift), regT0);
225 } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
226 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
227 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
228 } else {
229 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
230 if (!isOperandConstantImmediateInt(op1))
231 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
232 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
233 if (isUnsigned) {
234 urshift32(regT2, regT0);
235 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
236 } else
237 rshift32(regT2, regT0);
238 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
239 }
240 }
241
242 void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
243 {
244 unsigned dst = currentInstruction[1].u.operand;
245 unsigned op1 = currentInstruction[2].u.operand;
246 unsigned op2 = currentInstruction[3].u.operand;
247 if (isOperandConstantImmediateInt(op2)) {
248 int shift = getConstantOperand(op2).asInt32() & 0x1f;
249 // op1 = regT1:regT0
250 linkSlowCase(iter); // int32 check
251 if (supportsFloatingPointTruncate()) {
252 JumpList failures;
253 failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
254 emitLoadDouble(op1, fpRegT0);
255 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
256 if (shift) {
257 if (isUnsigned)
258 urshift32(Imm32(shift), regT0);
259 else
260 rshift32(Imm32(shift), regT0);
261 } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
262 failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
263 move(TrustedImm32(JSValue::Int32Tag), regT1);
264 emitStoreInt32(dst, regT0, false);
265 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
266 failures.link(this);
267 }
268 if (isUnsigned && !shift)
269 linkSlowCase(iter); // failed to box in hot path
270 } else {
271 // op1 = regT1:regT0
272 // op2 = regT3:regT2
273 if (!isOperandConstantImmediateInt(op1)) {
274 linkSlowCase(iter); // int32 check -- op1 is not an int
275 if (supportsFloatingPointTruncate()) {
276 JumpList failures;
277 failures.append(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); // op1 is not a double
278 emitLoadDouble(op1, fpRegT0);
279 failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
280 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
281 if (isUnsigned) {
282 urshift32(regT2, regT0);
283 failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
284 } else
285 rshift32(regT2, regT0);
286 move(TrustedImm32(JSValue::Int32Tag), regT1);
287 emitStoreInt32(dst, regT0, false);
288 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
289 failures.link(this);
290 }
291 }
292
293 linkSlowCase(iter); // int32 check - op2 is not an int
294 if (isUnsigned)
295 linkSlowCase(iter); // Can't represent unsigned result as an immediate
296 }
297
298 JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
299 stubCall.addArgument(op1);
300 stubCall.addArgument(op2);
301 stubCall.call(dst);
302 }
303
304 // RightShift (>>)
305
306 void JIT::emit_op_rshift(Instruction* currentInstruction)
307 {
308 emitRightShift(currentInstruction, false);
309 }
310
311 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
312 {
313 emitRightShiftSlowCase(currentInstruction, iter, false);
314 }
315
316 // UnsignedRightShift (>>>)
317
318 void JIT::emit_op_urshift(Instruction* currentInstruction)
319 {
320 emitRightShift(currentInstruction, true);
321 }
322
323 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
324 {
325 emitRightShiftSlowCase(currentInstruction, iter, true);
326 }
327
328 // BitAnd (&)
329
330 void JIT::emit_op_bitand(Instruction* currentInstruction)
331 {
332 unsigned dst = currentInstruction[1].u.operand;
333 unsigned op1 = currentInstruction[2].u.operand;
334 unsigned op2 = currentInstruction[3].u.operand;
335
336 unsigned op;
337 int32_t constant;
338 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
339 emitLoad(op, regT1, regT0);
340 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
341 and32(Imm32(constant), regT0);
342 emitStoreAndMapInt32(dst, regT1, regT0, dst == op, OPCODE_LENGTH(op_bitand));
343 return;
344 }
345
346 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
347 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
348 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
349 and32(regT2, regT0);
350 emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitand));
351 }
352
353 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
354 {
355 unsigned dst = currentInstruction[1].u.operand;
356 unsigned op1 = currentInstruction[2].u.operand;
357 unsigned op2 = currentInstruction[3].u.operand;
358
359 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
360 linkSlowCase(iter); // int32 check
361 linkSlowCase(iter); // int32 check
362
363 JITStubCall stubCall(this, cti_op_bitand);
364 stubCall.addArgument(op1);
365 stubCall.addArgument(op2);
366 stubCall.call(dst);
367 }
368
369 // BitOr (|)
370
371 void JIT::emit_op_bitor(Instruction* currentInstruction)
372 {
373 unsigned dst = currentInstruction[1].u.operand;
374 unsigned op1 = currentInstruction[2].u.operand;
375 unsigned op2 = currentInstruction[3].u.operand;
376
377 unsigned op;
378 int32_t constant;
379 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
380 emitLoad(op, regT1, regT0);
381 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
382 or32(Imm32(constant), regT0);
383 emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitor));
384 return;
385 }
386
387 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
388 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
389 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
390 or32(regT2, regT0);
391 emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitor));
392 }
393
394 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
395 {
396 unsigned dst = currentInstruction[1].u.operand;
397 unsigned op1 = currentInstruction[2].u.operand;
398 unsigned op2 = currentInstruction[3].u.operand;
399
400 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
401 linkSlowCase(iter); // int32 check
402 linkSlowCase(iter); // int32 check
403
404 JITStubCall stubCall(this, cti_op_bitor);
405 stubCall.addArgument(op1);
406 stubCall.addArgument(op2);
407 stubCall.call(dst);
408 }
409
410 // BitXor (^)
411
412 void JIT::emit_op_bitxor(Instruction* currentInstruction)
413 {
414 unsigned dst = currentInstruction[1].u.operand;
415 unsigned op1 = currentInstruction[2].u.operand;
416 unsigned op2 = currentInstruction[3].u.operand;
417
418 unsigned op;
419 int32_t constant;
420 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
421 emitLoad(op, regT1, regT0);
422 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
423 xor32(Imm32(constant), regT0);
424 emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitxor));
425 return;
426 }
427
428 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
429 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
430 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
431 xor32(regT2, regT0);
432 emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitxor));
433 }
434
435 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
436 {
437 unsigned dst = currentInstruction[1].u.operand;
438 unsigned op1 = currentInstruction[2].u.operand;
439 unsigned op2 = currentInstruction[3].u.operand;
440
441 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
442 linkSlowCase(iter); // int32 check
443 linkSlowCase(iter); // int32 check
444
445 JITStubCall stubCall(this, cti_op_bitxor);
446 stubCall.addArgument(op1);
447 stubCall.addArgument(op2);
448 stubCall.call(dst);
449 }
450
451 // PostInc (i++)
452
453 void JIT::emit_op_post_inc(Instruction* currentInstruction)
454 {
455 unsigned dst = currentInstruction[1].u.operand;
456 unsigned srcDst = currentInstruction[2].u.operand;
457
458 emitLoad(srcDst, regT1, regT0);
459 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
460
461 if (dst == srcDst) // x = x++ is a noop for ints.
462 return;
463
464 move(regT0, regT2);
465 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT2));
466 emitStoreInt32(srcDst, regT2, true);
467
468 emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_inc));
469 }
470
471 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
472 {
473 unsigned dst = currentInstruction[1].u.operand;
474 unsigned srcDst = currentInstruction[2].u.operand;
475
476 linkSlowCase(iter); // int32 check
477 if (dst != srcDst)
478 linkSlowCase(iter); // overflow check
479
480 JITStubCall stubCall(this, cti_op_post_inc);
481 stubCall.addArgument(srcDst);
482 stubCall.addArgument(TrustedImm32(srcDst));
483 stubCall.call(dst);
484 }
485
486 // PostDec (i--)
487
488 void JIT::emit_op_post_dec(Instruction* currentInstruction)
489 {
490 unsigned dst = currentInstruction[1].u.operand;
491 unsigned srcDst = currentInstruction[2].u.operand;
492
493 emitLoad(srcDst, regT1, regT0);
494 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
495
496 if (dst == srcDst) // x = x-- is a noop for ints.
497 return;
498
499 move(regT0, regT2);
500 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT2));
501 emitStoreInt32(srcDst, regT2, true);
502
503 emitStoreAndMapInt32(dst, regT1, regT0, false, OPCODE_LENGTH(op_post_dec));
504 }
505
506 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
507 {
508 unsigned dst = currentInstruction[1].u.operand;
509 unsigned srcDst = currentInstruction[2].u.operand;
510
511 linkSlowCase(iter); // int32 check
512 if (dst != srcDst)
513 linkSlowCase(iter); // overflow check
514
515 JITStubCall stubCall(this, cti_op_post_dec);
516 stubCall.addArgument(srcDst);
517 stubCall.addArgument(TrustedImm32(srcDst));
518 stubCall.call(dst);
519 }
520
521 // PreInc (++i)
522
523 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
524 {
525 unsigned srcDst = currentInstruction[1].u.operand;
526
527 emitLoad(srcDst, regT1, regT0);
528
529 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
530 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
531 emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_pre_inc));
532 }
533
534 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
535 {
536 unsigned srcDst = currentInstruction[1].u.operand;
537
538 linkSlowCase(iter); // int32 check
539 linkSlowCase(iter); // overflow check
540
541 JITStubCall stubCall(this, cti_op_pre_inc);
542 stubCall.addArgument(srcDst);
543 stubCall.call(srcDst);
544 }
545
546 // PreDec (--i)
547
548 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
549 {
550 unsigned srcDst = currentInstruction[1].u.operand;
551
552 emitLoad(srcDst, regT1, regT0);
553
554 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
555 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
556 emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_pre_dec));
557 }
558
559 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
560 {
561 unsigned srcDst = currentInstruction[1].u.operand;
562
563 linkSlowCase(iter); // int32 check
564 linkSlowCase(iter); // overflow check
565
566 JITStubCall stubCall(this, cti_op_pre_dec);
567 stubCall.addArgument(srcDst);
568 stubCall.call(srcDst);
569 }
570
571 // Addition (+)
572
573 void JIT::emit_op_add(Instruction* currentInstruction)
574 {
575 unsigned dst = currentInstruction[1].u.operand;
576 unsigned op1 = currentInstruction[2].u.operand;
577 unsigned op2 = currentInstruction[3].u.operand;
578 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
579
580 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
581 addSlowCase();
582 JITStubCall stubCall(this, cti_op_add);
583 stubCall.addArgument(op1);
584 stubCall.addArgument(op2);
585 stubCall.call(dst);
586 return;
587 }
588
589 JumpList notInt32Op1;
590 JumpList notInt32Op2;
591
592 unsigned op;
593 int32_t constant;
594 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
595 emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
596 return;
597 }
598
599 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
600 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
601 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
602
603 // Int32 case.
604 addSlowCase(branchAdd32(Overflow, regT2, regT0));
605 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
606
607 if (!supportsFloatingPoint()) {
608 addSlowCase(notInt32Op1);
609 addSlowCase(notInt32Op2);
610 return;
611 }
612 Jump end = jump();
613
614 // Double case.
615 emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
616 end.link(this);
617 }
618
619 void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
620 {
621 // Int32 case.
622 emitLoad(op, regT1, regT2);
623 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
624 addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0));
625 emitStoreInt32(dst, regT0, (op == dst));
626
627 // Double case.
628 if (!supportsFloatingPoint()) {
629 addSlowCase(notInt32);
630 return;
631 }
632 Jump end = jump();
633
634 notInt32.link(this);
635 if (!opType.definitelyIsNumber())
636 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
637 move(Imm32(constant), regT2);
638 convertInt32ToDouble(regT2, fpRegT0);
639 emitLoadDouble(op, fpRegT1);
640 addDouble(fpRegT1, fpRegT0);
641 emitStoreDouble(dst, fpRegT0);
642
643 end.link(this);
644 }
645
646 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
647 {
648 unsigned dst = currentInstruction[1].u.operand;
649 unsigned op1 = currentInstruction[2].u.operand;
650 unsigned op2 = currentInstruction[3].u.operand;
651 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
652
653 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
654 linkDummySlowCase(iter);
655 return;
656 }
657
658 unsigned op;
659 int32_t constant;
660 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
661 linkSlowCase(iter); // overflow check
662
663 if (!supportsFloatingPoint())
664 linkSlowCase(iter); // non-sse case
665 else {
666 ResultType opType = op == op1 ? types.first() : types.second();
667 if (!opType.definitelyIsNumber())
668 linkSlowCase(iter); // double check
669 }
670 } else {
671 linkSlowCase(iter); // overflow check
672
673 if (!supportsFloatingPoint()) {
674 linkSlowCase(iter); // int32 check
675 linkSlowCase(iter); // int32 check
676 } else {
677 if (!types.first().definitelyIsNumber())
678 linkSlowCase(iter); // double check
679
680 if (!types.second().definitelyIsNumber()) {
681 linkSlowCase(iter); // int32 check
682 linkSlowCase(iter); // double check
683 }
684 }
685 }
686
687 JITStubCall stubCall(this, cti_op_add);
688 stubCall.addArgument(op1);
689 stubCall.addArgument(op2);
690 stubCall.call(dst);
691 }
692
693 // Subtraction (-)
694
695 void JIT::emit_op_sub(Instruction* currentInstruction)
696 {
697 unsigned dst = currentInstruction[1].u.operand;
698 unsigned op1 = currentInstruction[2].u.operand;
699 unsigned op2 = currentInstruction[3].u.operand;
700 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
701
702 JumpList notInt32Op1;
703 JumpList notInt32Op2;
704
705 if (isOperandConstantImmediateInt(op2)) {
706 emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
707 return;
708 }
709
710 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
711 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
712 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
713
714 // Int32 case.
715 addSlowCase(branchSub32(Overflow, regT2, regT0));
716 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
717
718 if (!supportsFloatingPoint()) {
719 addSlowCase(notInt32Op1);
720 addSlowCase(notInt32Op2);
721 return;
722 }
723 Jump end = jump();
724
725 // Double case.
726 emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
727 end.link(this);
728 }
729
730 void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
731 {
732 // Int32 case.
733 emitLoad(op, regT1, regT0);
734 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
735 #if ENABLE(JIT_CONSTANT_BLINDING)
736 addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
737 #else
738 addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2));
739 #endif
740
741 emitStoreInt32(dst, regT2, (op == dst));
742
743 // Double case.
744 if (!supportsFloatingPoint()) {
745 addSlowCase(notInt32);
746 return;
747 }
748 Jump end = jump();
749
750 notInt32.link(this);
751 if (!opType.definitelyIsNumber())
752 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
753 move(Imm32(constant), regT2);
754 convertInt32ToDouble(regT2, fpRegT0);
755 emitLoadDouble(op, fpRegT1);
756 subDouble(fpRegT0, fpRegT1);
757 emitStoreDouble(dst, fpRegT1);
758
759 end.link(this);
760 }
761
762 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
763 {
764 unsigned dst = currentInstruction[1].u.operand;
765 unsigned op1 = currentInstruction[2].u.operand;
766 unsigned op2 = currentInstruction[3].u.operand;
767 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
768
769 if (isOperandConstantImmediateInt(op2)) {
770 linkSlowCase(iter); // overflow check
771
772 if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
773 linkSlowCase(iter); // int32 or double check
774 } else {
775 linkSlowCase(iter); // overflow check
776
777 if (!supportsFloatingPoint()) {
778 linkSlowCase(iter); // int32 check
779 linkSlowCase(iter); // int32 check
780 } else {
781 if (!types.first().definitelyIsNumber())
782 linkSlowCase(iter); // double check
783
784 if (!types.second().definitelyIsNumber()) {
785 linkSlowCase(iter); // int32 check
786 linkSlowCase(iter); // double check
787 }
788 }
789 }
790
791 JITStubCall stubCall(this, cti_op_sub);
792 stubCall.addArgument(op1);
793 stubCall.addArgument(op2);
794 stubCall.call(dst);
795 }
796
797 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
798 {
799 JumpList end;
800
801 if (!notInt32Op1.empty()) {
802 // Double case 1: Op1 is not int32; Op2 is unknown.
803 notInt32Op1.link(this);
804
805 ASSERT(op1IsInRegisters);
806
807 // Verify Op1 is double.
808 if (!types.first().definitelyIsNumber())
809 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
810
811 if (!op2IsInRegisters)
812 emitLoad(op2, regT3, regT2);
813
814 Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
815
816 if (!types.second().definitelyIsNumber())
817 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
818
819 convertInt32ToDouble(regT2, fpRegT0);
820 Jump doTheMath = jump();
821
822 // Load Op2 as double into double register.
823 doubleOp2.link(this);
824 emitLoadDouble(op2, fpRegT0);
825
826 // Do the math.
827 doTheMath.link(this);
828 switch (opcodeID) {
829 case op_mul:
830 emitLoadDouble(op1, fpRegT2);
831 mulDouble(fpRegT2, fpRegT0);
832 emitStoreDouble(dst, fpRegT0);
833 break;
834 case op_add:
835 emitLoadDouble(op1, fpRegT2);
836 addDouble(fpRegT2, fpRegT0);
837 emitStoreDouble(dst, fpRegT0);
838 break;
839 case op_sub:
840 emitLoadDouble(op1, fpRegT1);
841 subDouble(fpRegT0, fpRegT1);
842 emitStoreDouble(dst, fpRegT1);
843 break;
844 case op_div: {
845 emitLoadDouble(op1, fpRegT1);
846 divDouble(fpRegT0, fpRegT1);
847
848 #if ENABLE(VALUE_PROFILER)
849 // Is the result actually an integer? The DFG JIT would really like to know. If it's
850 // not an integer, we increment a count. If this together with the slow case counter
851 // are below threshold then the DFG JIT will compile this division with a specualtion
852 // that the remainder is zero.
853
854 // As well, there are cases where a double result here would cause an important field
855 // in the heap to sometimes have doubles in it, resulting in double predictions getting
856 // propagated to a use site where it might cause damage (such as the index to an array
857 // access). So if we are DFG compiling anything in the program, we want this code to
858 // ensure that it produces integers whenever possible.
859
860 // FIXME: This will fail to convert to integer if the result is zero. We should
861 // distinguish between positive zero and negative zero here.
862
863 JumpList notInteger;
864 branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
865 // If we've got an integer, we might as well make that the result of the division.
866 emitStoreInt32(dst, regT2);
867 Jump isInteger = jump();
868 notInteger.link(this);
869 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
870 emitStoreDouble(dst, fpRegT1);
871 isInteger.link(this);
872 #else
873 emitStoreDouble(dst, fpRegT1);
874 #endif
875 break;
876 }
877 case op_jless:
878 emitLoadDouble(op1, fpRegT2);
879 addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
880 break;
881 case op_jlesseq:
882 emitLoadDouble(op1, fpRegT2);
883 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
884 break;
885 case op_jgreater:
886 emitLoadDouble(op1, fpRegT2);
887 addJump(branchDouble(DoubleGreaterThan, fpRegT2, fpRegT0), dst);
888 break;
889 case op_jgreatereq:
890 emitLoadDouble(op1, fpRegT2);
891 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT2, fpRegT0), dst);
892 break;
893 case op_jnless:
894 emitLoadDouble(op1, fpRegT2);
895 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
896 break;
897 case op_jnlesseq:
898 emitLoadDouble(op1, fpRegT2);
899 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
900 break;
901 case op_jngreater:
902 emitLoadDouble(op1, fpRegT2);
903 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
904 break;
905 case op_jngreatereq:
906 emitLoadDouble(op1, fpRegT2);
907 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), dst);
908 break;
909 default:
910 ASSERT_NOT_REACHED();
911 }
912
913 if (!notInt32Op2.empty())
914 end.append(jump());
915 }
916
917 if (!notInt32Op2.empty()) {
918 // Double case 2: Op1 is int32; Op2 is not int32.
919 notInt32Op2.link(this);
920
921 ASSERT(op2IsInRegisters);
922
923 if (!op1IsInRegisters)
924 emitLoadPayload(op1, regT0);
925
926 convertInt32ToDouble(regT0, fpRegT0);
927
928 // Verify op2 is double.
929 if (!types.second().definitelyIsNumber())
930 addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
931
932 // Do the math.
933 switch (opcodeID) {
934 case op_mul:
935 emitLoadDouble(op2, fpRegT2);
936 mulDouble(fpRegT2, fpRegT0);
937 emitStoreDouble(dst, fpRegT0);
938 break;
939 case op_add:
940 emitLoadDouble(op2, fpRegT2);
941 addDouble(fpRegT2, fpRegT0);
942 emitStoreDouble(dst, fpRegT0);
943 break;
944 case op_sub:
945 emitLoadDouble(op2, fpRegT2);
946 subDouble(fpRegT2, fpRegT0);
947 emitStoreDouble(dst, fpRegT0);
948 break;
949 case op_div: {
950 emitLoadDouble(op2, fpRegT2);
951 divDouble(fpRegT2, fpRegT0);
952 #if ENABLE(VALUE_PROFILER)
953 // Is the result actually an integer? The DFG JIT would really like to know. If it's
954 // not an integer, we increment a count. If this together with the slow case counter
955 // are below threshold then the DFG JIT will compile this division with a specualtion
956 // that the remainder is zero.
957
958 // As well, there are cases where a double result here would cause an important field
959 // in the heap to sometimes have doubles in it, resulting in double predictions getting
960 // propagated to a use site where it might cause damage (such as the index to an array
961 // access). So if we are DFG compiling anything in the program, we want this code to
962 // ensure that it produces integers whenever possible.
963
964 // FIXME: This will fail to convert to integer if the result is zero. We should
965 // distinguish between positive zero and negative zero here.
966
967 JumpList notInteger;
968 branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
969 // If we've got an integer, we might as well make that the result of the division.
970 emitStoreInt32(dst, regT2);
971 Jump isInteger = jump();
972 notInteger.link(this);
973 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
974 emitStoreDouble(dst, fpRegT0);
975 isInteger.link(this);
976 #else
977 emitStoreDouble(dst, fpRegT0);
978 #endif
979 break;
980 }
981 case op_jless:
982 emitLoadDouble(op2, fpRegT1);
983 addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
984 break;
985 case op_jlesseq:
986 emitLoadDouble(op2, fpRegT1);
987 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
988 break;
989 case op_jgreater:
990 emitLoadDouble(op2, fpRegT1);
991 addJump(branchDouble(DoubleGreaterThan, fpRegT0, fpRegT1), dst);
992 break;
993 case op_jgreatereq:
994 emitLoadDouble(op2, fpRegT1);
995 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT0, fpRegT1), dst);
996 break;
997 case op_jnless:
998 emitLoadDouble(op2, fpRegT1);
999 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
1000 break;
1001 case op_jnlesseq:
1002 emitLoadDouble(op2, fpRegT1);
1003 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
1004 break;
1005 case op_jngreater:
1006 emitLoadDouble(op2, fpRegT1);
1007 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
1008 break;
1009 case op_jngreatereq:
1010 emitLoadDouble(op2, fpRegT1);
1011 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), dst);
1012 break;
1013 default:
1014 ASSERT_NOT_REACHED();
1015 }
1016 }
1017
1018 end.link(this);
1019 }
1020
1021 // Multiplication (*)
1022
1023 void JIT::emit_op_mul(Instruction* currentInstruction)
1024 {
1025 unsigned dst = currentInstruction[1].u.operand;
1026 unsigned op1 = currentInstruction[2].u.operand;
1027 unsigned op2 = currentInstruction[3].u.operand;
1028 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1029
1030 #if ENABLE(VALUE_PROFILER)
1031 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
1032 #endif
1033
1034 JumpList notInt32Op1;
1035 JumpList notInt32Op2;
1036
1037 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1038 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1039 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
1040
1041 // Int32 case.
1042 move(regT0, regT3);
1043 addSlowCase(branchMul32(Overflow, regT2, regT0));
1044 addSlowCase(branchTest32(Zero, regT0));
1045 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
1046
1047 if (!supportsFloatingPoint()) {
1048 addSlowCase(notInt32Op1);
1049 addSlowCase(notInt32Op2);
1050 return;
1051 }
1052 Jump end = jump();
1053
1054 // Double case.
1055 emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1056 end.link(this);
1057 }
1058
1059 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1060 {
1061 unsigned dst = currentInstruction[1].u.operand;
1062 unsigned op1 = currentInstruction[2].u.operand;
1063 unsigned op2 = currentInstruction[3].u.operand;
1064 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1065
1066 Jump overflow = getSlowCase(iter); // overflow check
1067 linkSlowCase(iter); // zero result check
1068
1069 Jump negZero = branchOr32(Signed, regT2, regT3);
1070 emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
1071
1072 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
1073
1074 negZero.link(this);
1075 #if ENABLE(VALUE_PROFILER)
1076 // We only get here if we have a genuine negative zero. Record this,
1077 // so that the speculative JIT knows that we failed speculation
1078 // because of a negative zero.
1079 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
1080 #endif
1081 overflow.link(this);
1082
1083 if (!supportsFloatingPoint()) {
1084 linkSlowCase(iter); // int32 check
1085 linkSlowCase(iter); // int32 check
1086 }
1087
1088 if (supportsFloatingPoint()) {
1089 if (!types.first().definitelyIsNumber())
1090 linkSlowCase(iter); // double check
1091
1092 if (!types.second().definitelyIsNumber()) {
1093 linkSlowCase(iter); // int32 check
1094 linkSlowCase(iter); // double check
1095 }
1096 }
1097
1098 Label jitStubCall(this);
1099 JITStubCall stubCall(this, cti_op_mul);
1100 stubCall.addArgument(op1);
1101 stubCall.addArgument(op2);
1102 stubCall.call(dst);
1103 }
1104
1105 // Division (/)
1106
1107 void JIT::emit_op_div(Instruction* currentInstruction)
1108 {
1109 unsigned dst = currentInstruction[1].u.operand;
1110 unsigned op1 = currentInstruction[2].u.operand;
1111 unsigned op2 = currentInstruction[3].u.operand;
1112 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1113
1114 #if ENABLE(VALUE_PROFILER)
1115 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
1116 #endif
1117
1118 if (!supportsFloatingPoint()) {
1119 addSlowCase(jump());
1120 return;
1121 }
1122
1123 // Int32 divide.
1124 JumpList notInt32Op1;
1125 JumpList notInt32Op2;
1126
1127 JumpList end;
1128
1129 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1130
1131 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1132 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
1133
1134 convertInt32ToDouble(regT0, fpRegT0);
1135 convertInt32ToDouble(regT2, fpRegT1);
1136 divDouble(fpRegT1, fpRegT0);
1137 #if ENABLE(VALUE_PROFILER)
1138 // Is the result actually an integer? The DFG JIT would really like to know. If it's
1139 // not an integer, we increment a count. If this together with the slow case counter
1140 // are below threshold then the DFG JIT will compile this division with a specualtion
1141 // that the remainder is zero.
1142
1143 // As well, there are cases where a double result here would cause an important field
1144 // in the heap to sometimes have doubles in it, resulting in double predictions getting
1145 // propagated to a use site where it might cause damage (such as the index to an array
1146 // access). So if we are DFG compiling anything in the program, we want this code to
1147 // ensure that it produces integers whenever possible.
1148
1149 // FIXME: This will fail to convert to integer if the result is zero. We should
1150 // distinguish between positive zero and negative zero here.
1151
1152 JumpList notInteger;
1153 branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
1154 // If we've got an integer, we might as well make that the result of the division.
1155 emitStoreInt32(dst, regT2);
1156 end.append(jump());
1157 notInteger.link(this);
1158 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
1159 emitStoreDouble(dst, fpRegT0);
1160 #else
1161 emitStoreDouble(dst, fpRegT0);
1162 #endif
1163 end.append(jump());
1164
1165 // Double divide.
1166 emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1167 end.link(this);
1168 }
1169
1170 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1171 {
1172 unsigned dst = currentInstruction[1].u.operand;
1173 unsigned op1 = currentInstruction[2].u.operand;
1174 unsigned op2 = currentInstruction[3].u.operand;
1175 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1176
1177 if (!supportsFloatingPoint())
1178 linkSlowCase(iter);
1179 else {
1180 if (!types.first().definitelyIsNumber())
1181 linkSlowCase(iter); // double check
1182
1183 if (!types.second().definitelyIsNumber()) {
1184 linkSlowCase(iter); // int32 check
1185 linkSlowCase(iter); // double check
1186 }
1187 }
1188
1189 JITStubCall stubCall(this, cti_op_div);
1190 stubCall.addArgument(op1);
1191 stubCall.addArgument(op2);
1192 stubCall.call(dst);
1193 }
1194
1195 // Mod (%)
1196
1197 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1198
1199 void JIT::emit_op_mod(Instruction* currentInstruction)
1200 {
1201 unsigned dst = currentInstruction[1].u.operand;
1202 unsigned op1 = currentInstruction[2].u.operand;
1203 unsigned op2 = currentInstruction[3].u.operand;
1204
1205 #if CPU(X86) || CPU(X86_64)
1206 // Make sure registers are correct for x86 IDIV instructions.
1207 ASSERT(regT0 == X86Registers::eax);
1208 ASSERT(regT1 == X86Registers::edx);
1209 ASSERT(regT2 == X86Registers::ecx);
1210 ASSERT(regT3 == X86Registers::ebx);
1211
1212 emitLoad2(op1, regT0, regT3, op2, regT1, regT2);
1213 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1214 addSlowCase(branch32(NotEqual, regT0, TrustedImm32(JSValue::Int32Tag)));
1215
1216 move(regT3, regT0);
1217 addSlowCase(branchTest32(Zero, regT2));
1218 Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
1219 addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
1220 denominatorNotNeg1.link(this);
1221 m_assembler.cdq();
1222 m_assembler.idivl_r(regT2);
1223 Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
1224 addSlowCase(branchTest32(Zero, regT1));
1225 numeratorPositive.link(this);
1226 emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
1227 #else
1228 JITStubCall stubCall(this, cti_op_mod);
1229 stubCall.addArgument(op1);
1230 stubCall.addArgument(op2);
1231 stubCall.call(dst);
1232 #endif
1233 }
1234
1235 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1236 {
1237 #if CPU(X86) || CPU(X86_64)
1238 unsigned result = currentInstruction[1].u.operand;
1239 unsigned op1 = currentInstruction[2].u.operand;
1240 unsigned op2 = currentInstruction[3].u.operand;
1241 linkSlowCase(iter);
1242 linkSlowCase(iter);
1243 linkSlowCase(iter);
1244 linkSlowCase(iter);
1245 linkSlowCase(iter);
1246 JITStubCall stubCall(this, cti_op_mod);
1247 stubCall.addArgument(op1);
1248 stubCall.addArgument(op2);
1249 stubCall.call(result);
1250 #else
1251 UNUSED_PARAM(currentInstruction);
1252 UNUSED_PARAM(iter);
1253 // We would have really useful assertions here if it wasn't for the compiler's
1254 // insistence on attribute noreturn.
1255 // ASSERT_NOT_REACHED();
1256 #endif
1257 }
1258
1259 /* ------------------------------ END: OP_MOD ------------------------------ */
1260
1261 } // namespace JSC
1262
1263 #endif // USE(JSVALUE32_64)
1264 #endif // ENABLE(JIT)