]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITArithmetic32_64.cpp
JavaScriptCore-1218.35.tar.gz
[apple/javascriptcore.git] / jit / JITArithmetic32_64.cpp
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #if USE(JSVALUE32_64)
30 #include "JIT.h"
31
32 #include "CodeBlock.h"
33 #include "JITInlines.h"
34 #include "JITStubCall.h"
35 #include "JITStubs.h"
36 #include "JSArray.h"
37 #include "JSFunction.h"
38 #include "Interpreter.h"
39 #include "Operations.h"
40 #include "ResultType.h"
41 #include "SamplingTool.h"
42
43 #ifndef NDEBUG
44 #include <stdio.h>
45 #endif
46
47 using namespace std;
48
49 namespace JSC {
50
51 void JIT::emit_op_negate(Instruction* currentInstruction)
52 {
53 unsigned dst = currentInstruction[1].u.operand;
54 unsigned src = currentInstruction[2].u.operand;
55
56 emitLoad(src, regT1, regT0);
57
58 Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
59 addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
60 neg32(regT0);
61 emitStoreInt32(dst, regT0, (dst == src));
62
63 Jump end = jump();
64
65 srcNotInt.link(this);
66 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
67
68 xor32(TrustedImm32(1 << 31), regT1);
69 store32(regT1, tagFor(dst));
70 if (dst != src)
71 store32(regT0, payloadFor(dst));
72
73 end.link(this);
74 }
75
76 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
77 {
78 unsigned dst = currentInstruction[1].u.operand;
79
80 linkSlowCase(iter); // 0x7fffffff check
81 linkSlowCase(iter); // double check
82
83 JITStubCall stubCall(this, cti_op_negate);
84 stubCall.addArgument(regT1, regT0);
85 stubCall.call(dst);
86 }
87
88 void JIT::emit_compareAndJump(OpcodeID opcode, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
89 {
90 JumpList notInt32Op1;
91 JumpList notInt32Op2;
92
93 // Character less.
94 if (isOperandConstantImmediateChar(op1)) {
95 emitLoad(op2, regT1, regT0);
96 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
97 JumpList failures;
98 emitLoadCharacterString(regT0, regT0, failures);
99 addSlowCase(failures);
100 addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
101 return;
102 }
103 if (isOperandConstantImmediateChar(op2)) {
104 emitLoad(op1, regT1, regT0);
105 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
106 JumpList failures;
107 emitLoadCharacterString(regT0, regT0, failures);
108 addSlowCase(failures);
109 addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
110 return;
111 }
112 if (isOperandConstantImmediateInt(op1)) {
113 emitLoad(op2, regT3, regT2);
114 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
115 addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
116 } else if (isOperandConstantImmediateInt(op2)) {
117 emitLoad(op1, regT1, regT0);
118 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
119 addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
120 } else {
121 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
122 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
123 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
124 addJump(branch32(condition, regT0, regT2), target);
125 }
126
127 if (!supportsFloatingPoint()) {
128 addSlowCase(notInt32Op1);
129 addSlowCase(notInt32Op2);
130 return;
131 }
132 Jump end = jump();
133
134 // Double less.
135 emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
136 end.link(this);
137 }
138
139 void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
140 {
141 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
142 linkSlowCase(iter);
143 linkSlowCase(iter);
144 linkSlowCase(iter);
145 linkSlowCase(iter);
146 } else {
147 if (!supportsFloatingPoint()) {
148 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
149 linkSlowCase(iter); // int32 check
150 linkSlowCase(iter); // int32 check
151 } else {
152 if (!isOperandConstantImmediateInt(op1)) {
153 linkSlowCase(iter); // double check
154 linkSlowCase(iter); // int32 check
155 }
156 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
157 linkSlowCase(iter); // double check
158 }
159 }
160 JITStubCall stubCall(this, stub);
161 stubCall.addArgument(op1);
162 stubCall.addArgument(op2);
163 stubCall.call();
164 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
165 }
166
167 // LeftShift (<<)
168
169 void JIT::emit_op_lshift(Instruction* currentInstruction)
170 {
171 unsigned dst = currentInstruction[1].u.operand;
172 unsigned op1 = currentInstruction[2].u.operand;
173 unsigned op2 = currentInstruction[3].u.operand;
174
175 if (isOperandConstantImmediateInt(op2)) {
176 emitLoad(op1, regT1, regT0);
177 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
178 lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
179 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_lshift));
180 return;
181 }
182
183 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
184 if (!isOperandConstantImmediateInt(op1))
185 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
186 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
187 lshift32(regT2, regT0);
188 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1 || dst == op2, OPCODE_LENGTH(op_lshift));
189 }
190
191 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
192 {
193 unsigned dst = currentInstruction[1].u.operand;
194 unsigned op1 = currentInstruction[2].u.operand;
195 unsigned op2 = currentInstruction[3].u.operand;
196
197 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
198 linkSlowCase(iter); // int32 check
199 linkSlowCase(iter); // int32 check
200
201 JITStubCall stubCall(this, cti_op_lshift);
202 stubCall.addArgument(op1);
203 stubCall.addArgument(op2);
204 stubCall.call(dst);
205 }
206
207 // RightShift (>>) and UnsignedRightShift (>>>) helper
208
209 void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
210 {
211 unsigned dst = currentInstruction[1].u.operand;
212 unsigned op1 = currentInstruction[2].u.operand;
213 unsigned op2 = currentInstruction[3].u.operand;
214
215 // Slow case of rshift makes assumptions about what registers hold the
216 // shift arguments, so any changes must be updated there as well.
217 if (isOperandConstantImmediateInt(op2)) {
218 emitLoad(op1, regT1, regT0);
219 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
220 int shift = getConstantOperand(op2).asInt32() & 0x1f;
221 if (shift) {
222 if (isUnsigned)
223 urshift32(Imm32(shift), regT0);
224 else
225 rshift32(Imm32(shift), regT0);
226 } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
227 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
228 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
229 } else {
230 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
231 if (!isOperandConstantImmediateInt(op1))
232 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
233 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
234 if (isUnsigned) {
235 urshift32(regT2, regT0);
236 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
237 } else
238 rshift32(regT2, regT0);
239 emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
240 }
241 }
242
243 void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
244 {
245 unsigned dst = currentInstruction[1].u.operand;
246 unsigned op1 = currentInstruction[2].u.operand;
247 unsigned op2 = currentInstruction[3].u.operand;
248 if (isOperandConstantImmediateInt(op2)) {
249 int shift = getConstantOperand(op2).asInt32() & 0x1f;
250 // op1 = regT1:regT0
251 linkSlowCase(iter); // int32 check
252 if (supportsFloatingPointTruncate()) {
253 JumpList failures;
254 failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
255 emitLoadDouble(op1, fpRegT0);
256 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
257 if (shift) {
258 if (isUnsigned)
259 urshift32(Imm32(shift), regT0);
260 else
261 rshift32(Imm32(shift), regT0);
262 } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
263 failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
264 move(TrustedImm32(JSValue::Int32Tag), regT1);
265 emitStoreInt32(dst, regT0, false);
266 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
267 failures.link(this);
268 }
269 if (isUnsigned && !shift)
270 linkSlowCase(iter); // failed to box in hot path
271 } else {
272 // op1 = regT1:regT0
273 // op2 = regT3:regT2
274 if (!isOperandConstantImmediateInt(op1)) {
275 linkSlowCase(iter); // int32 check -- op1 is not an int
276 if (supportsFloatingPointTruncate()) {
277 JumpList failures;
278 failures.append(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); // op1 is not a double
279 emitLoadDouble(op1, fpRegT0);
280 failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
281 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
282 if (isUnsigned) {
283 urshift32(regT2, regT0);
284 failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
285 } else
286 rshift32(regT2, regT0);
287 move(TrustedImm32(JSValue::Int32Tag), regT1);
288 emitStoreInt32(dst, regT0, false);
289 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
290 failures.link(this);
291 }
292 }
293
294 linkSlowCase(iter); // int32 check - op2 is not an int
295 if (isUnsigned)
296 linkSlowCase(iter); // Can't represent unsigned result as an immediate
297 }
298
299 JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
300 stubCall.addArgument(op1);
301 stubCall.addArgument(op2);
302 stubCall.call(dst);
303 }
304
305 // RightShift (>>)
306
307 void JIT::emit_op_rshift(Instruction* currentInstruction)
308 {
309 emitRightShift(currentInstruction, false);
310 }
311
312 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
313 {
314 emitRightShiftSlowCase(currentInstruction, iter, false);
315 }
316
317 // UnsignedRightShift (>>>)
318
319 void JIT::emit_op_urshift(Instruction* currentInstruction)
320 {
321 emitRightShift(currentInstruction, true);
322 }
323
324 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
325 {
326 emitRightShiftSlowCase(currentInstruction, iter, true);
327 }
328
329 // BitAnd (&)
330
331 void JIT::emit_op_bitand(Instruction* currentInstruction)
332 {
333 unsigned dst = currentInstruction[1].u.operand;
334 unsigned op1 = currentInstruction[2].u.operand;
335 unsigned op2 = currentInstruction[3].u.operand;
336
337 unsigned op;
338 int32_t constant;
339 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
340 emitLoad(op, regT1, regT0);
341 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
342 and32(Imm32(constant), regT0);
343 emitStoreAndMapInt32(dst, regT1, regT0, dst == op, OPCODE_LENGTH(op_bitand));
344 return;
345 }
346
347 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
348 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
349 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
350 and32(regT2, regT0);
351 emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitand));
352 }
353
354 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
355 {
356 unsigned dst = currentInstruction[1].u.operand;
357 unsigned op1 = currentInstruction[2].u.operand;
358 unsigned op2 = currentInstruction[3].u.operand;
359
360 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
361 linkSlowCase(iter); // int32 check
362 linkSlowCase(iter); // int32 check
363
364 JITStubCall stubCall(this, cti_op_bitand);
365 stubCall.addArgument(op1);
366 stubCall.addArgument(op2);
367 stubCall.call(dst);
368 }
369
370 // BitOr (|)
371
372 void JIT::emit_op_bitor(Instruction* currentInstruction)
373 {
374 unsigned dst = currentInstruction[1].u.operand;
375 unsigned op1 = currentInstruction[2].u.operand;
376 unsigned op2 = currentInstruction[3].u.operand;
377
378 unsigned op;
379 int32_t constant;
380 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
381 emitLoad(op, regT1, regT0);
382 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
383 or32(Imm32(constant), regT0);
384 emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitor));
385 return;
386 }
387
388 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
389 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
390 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
391 or32(regT2, regT0);
392 emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitor));
393 }
394
395 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
396 {
397 unsigned dst = currentInstruction[1].u.operand;
398 unsigned op1 = currentInstruction[2].u.operand;
399 unsigned op2 = currentInstruction[3].u.operand;
400
401 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
402 linkSlowCase(iter); // int32 check
403 linkSlowCase(iter); // int32 check
404
405 JITStubCall stubCall(this, cti_op_bitor);
406 stubCall.addArgument(op1);
407 stubCall.addArgument(op2);
408 stubCall.call(dst);
409 }
410
411 // BitXor (^)
412
413 void JIT::emit_op_bitxor(Instruction* currentInstruction)
414 {
415 unsigned dst = currentInstruction[1].u.operand;
416 unsigned op1 = currentInstruction[2].u.operand;
417 unsigned op2 = currentInstruction[3].u.operand;
418
419 unsigned op;
420 int32_t constant;
421 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
422 emitLoad(op, regT1, regT0);
423 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
424 xor32(Imm32(constant), regT0);
425 emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitxor));
426 return;
427 }
428
429 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
430 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
431 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
432 xor32(regT2, regT0);
433 emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitxor));
434 }
435
436 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
437 {
438 unsigned dst = currentInstruction[1].u.operand;
439 unsigned op1 = currentInstruction[2].u.operand;
440 unsigned op2 = currentInstruction[3].u.operand;
441
442 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
443 linkSlowCase(iter); // int32 check
444 linkSlowCase(iter); // int32 check
445
446 JITStubCall stubCall(this, cti_op_bitxor);
447 stubCall.addArgument(op1);
448 stubCall.addArgument(op2);
449 stubCall.call(dst);
450 }
451
452 void JIT::emit_op_inc(Instruction* currentInstruction)
453 {
454 unsigned srcDst = currentInstruction[1].u.operand;
455
456 emitLoad(srcDst, regT1, regT0);
457
458 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
459 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
460 emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_inc));
461 }
462
463 void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
464 {
465 unsigned srcDst = currentInstruction[1].u.operand;
466
467 linkSlowCase(iter); // int32 check
468 linkSlowCase(iter); // overflow check
469
470 JITStubCall stubCall(this, cti_op_inc);
471 stubCall.addArgument(srcDst);
472 stubCall.call(srcDst);
473 }
474
475 void JIT::emit_op_dec(Instruction* currentInstruction)
476 {
477 unsigned srcDst = currentInstruction[1].u.operand;
478
479 emitLoad(srcDst, regT1, regT0);
480
481 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
482 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
483 emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_dec));
484 }
485
486 void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
487 {
488 unsigned srcDst = currentInstruction[1].u.operand;
489
490 linkSlowCase(iter); // int32 check
491 linkSlowCase(iter); // overflow check
492
493 JITStubCall stubCall(this, cti_op_dec);
494 stubCall.addArgument(srcDst);
495 stubCall.call(srcDst);
496 }
497
498 // Addition (+)
499
500 void JIT::emit_op_add(Instruction* currentInstruction)
501 {
502 unsigned dst = currentInstruction[1].u.operand;
503 unsigned op1 = currentInstruction[2].u.operand;
504 unsigned op2 = currentInstruction[3].u.operand;
505 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
506
507 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
508 addSlowCase();
509 JITStubCall stubCall(this, cti_op_add);
510 stubCall.addArgument(op1);
511 stubCall.addArgument(op2);
512 stubCall.call(dst);
513 return;
514 }
515
516 JumpList notInt32Op1;
517 JumpList notInt32Op2;
518
519 unsigned op;
520 int32_t constant;
521 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
522 emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
523 return;
524 }
525
526 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
527 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
528 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
529
530 // Int32 case.
531 addSlowCase(branchAdd32(Overflow, regT2, regT0));
532 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
533
534 if (!supportsFloatingPoint()) {
535 addSlowCase(notInt32Op1);
536 addSlowCase(notInt32Op2);
537 return;
538 }
539 Jump end = jump();
540
541 // Double case.
542 emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
543 end.link(this);
544 }
545
546 void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
547 {
548 // Int32 case.
549 emitLoad(op, regT1, regT2);
550 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
551 addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0));
552 emitStoreInt32(dst, regT0, (op == dst));
553
554 // Double case.
555 if (!supportsFloatingPoint()) {
556 addSlowCase(notInt32);
557 return;
558 }
559 Jump end = jump();
560
561 notInt32.link(this);
562 if (!opType.definitelyIsNumber())
563 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
564 move(Imm32(constant), regT2);
565 convertInt32ToDouble(regT2, fpRegT0);
566 emitLoadDouble(op, fpRegT1);
567 addDouble(fpRegT1, fpRegT0);
568 emitStoreDouble(dst, fpRegT0);
569
570 end.link(this);
571 }
572
573 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
574 {
575 unsigned dst = currentInstruction[1].u.operand;
576 unsigned op1 = currentInstruction[2].u.operand;
577 unsigned op2 = currentInstruction[3].u.operand;
578 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
579
580 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
581 linkDummySlowCase(iter);
582 return;
583 }
584
585 unsigned op;
586 int32_t constant;
587 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
588 linkSlowCase(iter); // overflow check
589
590 if (!supportsFloatingPoint())
591 linkSlowCase(iter); // non-sse case
592 else {
593 ResultType opType = op == op1 ? types.first() : types.second();
594 if (!opType.definitelyIsNumber())
595 linkSlowCase(iter); // double check
596 }
597 } else {
598 linkSlowCase(iter); // overflow check
599
600 if (!supportsFloatingPoint()) {
601 linkSlowCase(iter); // int32 check
602 linkSlowCase(iter); // int32 check
603 } else {
604 if (!types.first().definitelyIsNumber())
605 linkSlowCase(iter); // double check
606
607 if (!types.second().definitelyIsNumber()) {
608 linkSlowCase(iter); // int32 check
609 linkSlowCase(iter); // double check
610 }
611 }
612 }
613
614 JITStubCall stubCall(this, cti_op_add);
615 stubCall.addArgument(op1);
616 stubCall.addArgument(op2);
617 stubCall.call(dst);
618 }
619
620 // Subtraction (-)
621
622 void JIT::emit_op_sub(Instruction* currentInstruction)
623 {
624 unsigned dst = currentInstruction[1].u.operand;
625 unsigned op1 = currentInstruction[2].u.operand;
626 unsigned op2 = currentInstruction[3].u.operand;
627 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
628
629 JumpList notInt32Op1;
630 JumpList notInt32Op2;
631
632 if (isOperandConstantImmediateInt(op2)) {
633 emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
634 return;
635 }
636
637 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
638 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
639 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
640
641 // Int32 case.
642 addSlowCase(branchSub32(Overflow, regT2, regT0));
643 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
644
645 if (!supportsFloatingPoint()) {
646 addSlowCase(notInt32Op1);
647 addSlowCase(notInt32Op2);
648 return;
649 }
650 Jump end = jump();
651
652 // Double case.
653 emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
654 end.link(this);
655 }
656
657 void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
658 {
659 // Int32 case.
660 emitLoad(op, regT1, regT0);
661 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
662 #if ENABLE(JIT_CONSTANT_BLINDING)
663 addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
664 #else
665 addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2));
666 #endif
667
668 emitStoreInt32(dst, regT2, (op == dst));
669
670 // Double case.
671 if (!supportsFloatingPoint()) {
672 addSlowCase(notInt32);
673 return;
674 }
675 Jump end = jump();
676
677 notInt32.link(this);
678 if (!opType.definitelyIsNumber())
679 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
680 move(Imm32(constant), regT2);
681 convertInt32ToDouble(regT2, fpRegT0);
682 emitLoadDouble(op, fpRegT1);
683 subDouble(fpRegT0, fpRegT1);
684 emitStoreDouble(dst, fpRegT1);
685
686 end.link(this);
687 }
688
689 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
690 {
691 unsigned dst = currentInstruction[1].u.operand;
692 unsigned op1 = currentInstruction[2].u.operand;
693 unsigned op2 = currentInstruction[3].u.operand;
694 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
695
696 if (isOperandConstantImmediateInt(op2)) {
697 linkSlowCase(iter); // overflow check
698
699 if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
700 linkSlowCase(iter); // int32 or double check
701 } else {
702 linkSlowCase(iter); // overflow check
703
704 if (!supportsFloatingPoint()) {
705 linkSlowCase(iter); // int32 check
706 linkSlowCase(iter); // int32 check
707 } else {
708 if (!types.first().definitelyIsNumber())
709 linkSlowCase(iter); // double check
710
711 if (!types.second().definitelyIsNumber()) {
712 linkSlowCase(iter); // int32 check
713 linkSlowCase(iter); // double check
714 }
715 }
716 }
717
718 JITStubCall stubCall(this, cti_op_sub);
719 stubCall.addArgument(op1);
720 stubCall.addArgument(op2);
721 stubCall.call(dst);
722 }
723
724 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
725 {
726 JumpList end;
727
728 if (!notInt32Op1.empty()) {
729 // Double case 1: Op1 is not int32; Op2 is unknown.
730 notInt32Op1.link(this);
731
732 ASSERT(op1IsInRegisters);
733
734 // Verify Op1 is double.
735 if (!types.first().definitelyIsNumber())
736 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
737
738 if (!op2IsInRegisters)
739 emitLoad(op2, regT3, regT2);
740
741 Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
742
743 if (!types.second().definitelyIsNumber())
744 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
745
746 convertInt32ToDouble(regT2, fpRegT0);
747 Jump doTheMath = jump();
748
749 // Load Op2 as double into double register.
750 doubleOp2.link(this);
751 emitLoadDouble(op2, fpRegT0);
752
753 // Do the math.
754 doTheMath.link(this);
755 switch (opcodeID) {
756 case op_mul:
757 emitLoadDouble(op1, fpRegT2);
758 mulDouble(fpRegT2, fpRegT0);
759 emitStoreDouble(dst, fpRegT0);
760 break;
761 case op_add:
762 emitLoadDouble(op1, fpRegT2);
763 addDouble(fpRegT2, fpRegT0);
764 emitStoreDouble(dst, fpRegT0);
765 break;
766 case op_sub:
767 emitLoadDouble(op1, fpRegT1);
768 subDouble(fpRegT0, fpRegT1);
769 emitStoreDouble(dst, fpRegT1);
770 break;
771 case op_div: {
772 emitLoadDouble(op1, fpRegT1);
773 divDouble(fpRegT0, fpRegT1);
774
775 #if ENABLE(VALUE_PROFILER)
776 // Is the result actually an integer? The DFG JIT would really like to know. If it's
777 // not an integer, we increment a count. If this together with the slow case counter
778 // are below threshold then the DFG JIT will compile this division with a specualtion
779 // that the remainder is zero.
780
781 // As well, there are cases where a double result here would cause an important field
782 // in the heap to sometimes have doubles in it, resulting in double predictions getting
783 // propagated to a use site where it might cause damage (such as the index to an array
784 // access). So if we are DFG compiling anything in the program, we want this code to
785 // ensure that it produces integers whenever possible.
786
787 // FIXME: This will fail to convert to integer if the result is zero. We should
788 // distinguish between positive zero and negative zero here.
789
790 JumpList notInteger;
791 branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
792 // If we've got an integer, we might as well make that the result of the division.
793 emitStoreInt32(dst, regT2);
794 Jump isInteger = jump();
795 notInteger.link(this);
796 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
797 emitStoreDouble(dst, fpRegT1);
798 isInteger.link(this);
799 #else
800 emitStoreDouble(dst, fpRegT1);
801 #endif
802 break;
803 }
804 case op_jless:
805 emitLoadDouble(op1, fpRegT2);
806 addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
807 break;
808 case op_jlesseq:
809 emitLoadDouble(op1, fpRegT2);
810 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
811 break;
812 case op_jgreater:
813 emitLoadDouble(op1, fpRegT2);
814 addJump(branchDouble(DoubleGreaterThan, fpRegT2, fpRegT0), dst);
815 break;
816 case op_jgreatereq:
817 emitLoadDouble(op1, fpRegT2);
818 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT2, fpRegT0), dst);
819 break;
820 case op_jnless:
821 emitLoadDouble(op1, fpRegT2);
822 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
823 break;
824 case op_jnlesseq:
825 emitLoadDouble(op1, fpRegT2);
826 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
827 break;
828 case op_jngreater:
829 emitLoadDouble(op1, fpRegT2);
830 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
831 break;
832 case op_jngreatereq:
833 emitLoadDouble(op1, fpRegT2);
834 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), dst);
835 break;
836 default:
837 RELEASE_ASSERT_NOT_REACHED();
838 }
839
840 if (!notInt32Op2.empty())
841 end.append(jump());
842 }
843
844 if (!notInt32Op2.empty()) {
845 // Double case 2: Op1 is int32; Op2 is not int32.
846 notInt32Op2.link(this);
847
848 ASSERT(op2IsInRegisters);
849
850 if (!op1IsInRegisters)
851 emitLoadPayload(op1, regT0);
852
853 convertInt32ToDouble(regT0, fpRegT0);
854
855 // Verify op2 is double.
856 if (!types.second().definitelyIsNumber())
857 addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
858
859 // Do the math.
860 switch (opcodeID) {
861 case op_mul:
862 emitLoadDouble(op2, fpRegT2);
863 mulDouble(fpRegT2, fpRegT0);
864 emitStoreDouble(dst, fpRegT0);
865 break;
866 case op_add:
867 emitLoadDouble(op2, fpRegT2);
868 addDouble(fpRegT2, fpRegT0);
869 emitStoreDouble(dst, fpRegT0);
870 break;
871 case op_sub:
872 emitLoadDouble(op2, fpRegT2);
873 subDouble(fpRegT2, fpRegT0);
874 emitStoreDouble(dst, fpRegT0);
875 break;
876 case op_div: {
877 emitLoadDouble(op2, fpRegT2);
878 divDouble(fpRegT2, fpRegT0);
879 #if ENABLE(VALUE_PROFILER)
880 // Is the result actually an integer? The DFG JIT would really like to know. If it's
881 // not an integer, we increment a count. If this together with the slow case counter
882 // are below threshold then the DFG JIT will compile this division with a specualtion
883 // that the remainder is zero.
884
885 // As well, there are cases where a double result here would cause an important field
886 // in the heap to sometimes have doubles in it, resulting in double predictions getting
887 // propagated to a use site where it might cause damage (such as the index to an array
888 // access). So if we are DFG compiling anything in the program, we want this code to
889 // ensure that it produces integers whenever possible.
890
891 // FIXME: This will fail to convert to integer if the result is zero. We should
892 // distinguish between positive zero and negative zero here.
893
894 JumpList notInteger;
895 branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
896 // If we've got an integer, we might as well make that the result of the division.
897 emitStoreInt32(dst, regT2);
898 Jump isInteger = jump();
899 notInteger.link(this);
900 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
901 emitStoreDouble(dst, fpRegT0);
902 isInteger.link(this);
903 #else
904 emitStoreDouble(dst, fpRegT0);
905 #endif
906 break;
907 }
908 case op_jless:
909 emitLoadDouble(op2, fpRegT1);
910 addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
911 break;
912 case op_jlesseq:
913 emitLoadDouble(op2, fpRegT1);
914 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
915 break;
916 case op_jgreater:
917 emitLoadDouble(op2, fpRegT1);
918 addJump(branchDouble(DoubleGreaterThan, fpRegT0, fpRegT1), dst);
919 break;
920 case op_jgreatereq:
921 emitLoadDouble(op2, fpRegT1);
922 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT0, fpRegT1), dst);
923 break;
924 case op_jnless:
925 emitLoadDouble(op2, fpRegT1);
926 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
927 break;
928 case op_jnlesseq:
929 emitLoadDouble(op2, fpRegT1);
930 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
931 break;
932 case op_jngreater:
933 emitLoadDouble(op2, fpRegT1);
934 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
935 break;
936 case op_jngreatereq:
937 emitLoadDouble(op2, fpRegT1);
938 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), dst);
939 break;
940 default:
941 RELEASE_ASSERT_NOT_REACHED();
942 }
943 }
944
945 end.link(this);
946 }
947
948 // Multiplication (*)
949
950 void JIT::emit_op_mul(Instruction* currentInstruction)
951 {
952 unsigned dst = currentInstruction[1].u.operand;
953 unsigned op1 = currentInstruction[2].u.operand;
954 unsigned op2 = currentInstruction[3].u.operand;
955 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
956
957 #if ENABLE(VALUE_PROFILER)
958 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
959 #endif
960
961 JumpList notInt32Op1;
962 JumpList notInt32Op2;
963
964 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
965 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
966 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
967
968 // Int32 case.
969 move(regT0, regT3);
970 addSlowCase(branchMul32(Overflow, regT2, regT0));
971 addSlowCase(branchTest32(Zero, regT0));
972 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
973
974 if (!supportsFloatingPoint()) {
975 addSlowCase(notInt32Op1);
976 addSlowCase(notInt32Op2);
977 return;
978 }
979 Jump end = jump();
980
981 // Double case.
982 emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
983 end.link(this);
984 }
985
986 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
987 {
988 unsigned dst = currentInstruction[1].u.operand;
989 unsigned op1 = currentInstruction[2].u.operand;
990 unsigned op2 = currentInstruction[3].u.operand;
991 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
992
993 Jump overflow = getSlowCase(iter); // overflow check
994 linkSlowCase(iter); // zero result check
995
996 Jump negZero = branchOr32(Signed, regT2, regT3);
997 emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
998
999 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
1000
1001 negZero.link(this);
1002 #if ENABLE(VALUE_PROFILER)
1003 // We only get here if we have a genuine negative zero. Record this,
1004 // so that the speculative JIT knows that we failed speculation
1005 // because of a negative zero.
1006 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
1007 #endif
1008 overflow.link(this);
1009
1010 if (!supportsFloatingPoint()) {
1011 linkSlowCase(iter); // int32 check
1012 linkSlowCase(iter); // int32 check
1013 }
1014
1015 if (supportsFloatingPoint()) {
1016 if (!types.first().definitelyIsNumber())
1017 linkSlowCase(iter); // double check
1018
1019 if (!types.second().definitelyIsNumber()) {
1020 linkSlowCase(iter); // int32 check
1021 linkSlowCase(iter); // double check
1022 }
1023 }
1024
1025 Label jitStubCall(this);
1026 JITStubCall stubCall(this, cti_op_mul);
1027 stubCall.addArgument(op1);
1028 stubCall.addArgument(op2);
1029 stubCall.call(dst);
1030 }
1031
1032 // Division (/)
1033
1034 void JIT::emit_op_div(Instruction* currentInstruction)
1035 {
1036 unsigned dst = currentInstruction[1].u.operand;
1037 unsigned op1 = currentInstruction[2].u.operand;
1038 unsigned op2 = currentInstruction[3].u.operand;
1039 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1040
1041 #if ENABLE(VALUE_PROFILER)
1042 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
1043 #endif
1044
1045 if (!supportsFloatingPoint()) {
1046 addSlowCase(jump());
1047 return;
1048 }
1049
1050 // Int32 divide.
1051 JumpList notInt32Op1;
1052 JumpList notInt32Op2;
1053
1054 JumpList end;
1055
1056 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1057
1058 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1059 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
1060
1061 convertInt32ToDouble(regT0, fpRegT0);
1062 convertInt32ToDouble(regT2, fpRegT1);
1063 divDouble(fpRegT1, fpRegT0);
1064 #if ENABLE(VALUE_PROFILER)
1065 // Is the result actually an integer? The DFG JIT would really like to know. If it's
1066 // not an integer, we increment a count. If this together with the slow case counter
1067 // are below threshold then the DFG JIT will compile this division with a specualtion
1068 // that the remainder is zero.
1069
1070 // As well, there are cases where a double result here would cause an important field
1071 // in the heap to sometimes have doubles in it, resulting in double predictions getting
1072 // propagated to a use site where it might cause damage (such as the index to an array
1073 // access). So if we are DFG compiling anything in the program, we want this code to
1074 // ensure that it produces integers whenever possible.
1075
1076 // FIXME: This will fail to convert to integer if the result is zero. We should
1077 // distinguish between positive zero and negative zero here.
1078
1079 JumpList notInteger;
1080 branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
1081 // If we've got an integer, we might as well make that the result of the division.
1082 emitStoreInt32(dst, regT2);
1083 end.append(jump());
1084 notInteger.link(this);
1085 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
1086 emitStoreDouble(dst, fpRegT0);
1087 #else
1088 emitStoreDouble(dst, fpRegT0);
1089 #endif
1090 end.append(jump());
1091
1092 // Double divide.
1093 emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1094 end.link(this);
1095 }
1096
1097 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1098 {
1099 unsigned dst = currentInstruction[1].u.operand;
1100 unsigned op1 = currentInstruction[2].u.operand;
1101 unsigned op2 = currentInstruction[3].u.operand;
1102 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1103
1104 if (!supportsFloatingPoint())
1105 linkSlowCase(iter);
1106 else {
1107 if (!types.first().definitelyIsNumber())
1108 linkSlowCase(iter); // double check
1109
1110 if (!types.second().definitelyIsNumber()) {
1111 linkSlowCase(iter); // int32 check
1112 linkSlowCase(iter); // double check
1113 }
1114 }
1115
1116 JITStubCall stubCall(this, cti_op_div);
1117 stubCall.addArgument(op1);
1118 stubCall.addArgument(op2);
1119 stubCall.call(dst);
1120 }
1121
1122 // Mod (%)
1123
1124 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1125
1126 void JIT::emit_op_mod(Instruction* currentInstruction)
1127 {
1128 unsigned dst = currentInstruction[1].u.operand;
1129 unsigned op1 = currentInstruction[2].u.operand;
1130 unsigned op2 = currentInstruction[3].u.operand;
1131
1132 #if CPU(X86) || CPU(X86_64)
1133 // Make sure registers are correct for x86 IDIV instructions.
1134 ASSERT(regT0 == X86Registers::eax);
1135 ASSERT(regT1 == X86Registers::edx);
1136 ASSERT(regT2 == X86Registers::ecx);
1137 ASSERT(regT3 == X86Registers::ebx);
1138
1139 emitLoad2(op1, regT0, regT3, op2, regT1, regT2);
1140 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1141 addSlowCase(branch32(NotEqual, regT0, TrustedImm32(JSValue::Int32Tag)));
1142
1143 move(regT3, regT0);
1144 addSlowCase(branchTest32(Zero, regT2));
1145 Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
1146 addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
1147 denominatorNotNeg1.link(this);
1148 m_assembler.cdq();
1149 m_assembler.idivl_r(regT2);
1150 Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
1151 addSlowCase(branchTest32(Zero, regT1));
1152 numeratorPositive.link(this);
1153 emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
1154 #else
1155 JITStubCall stubCall(this, cti_op_mod);
1156 stubCall.addArgument(op1);
1157 stubCall.addArgument(op2);
1158 stubCall.call(dst);
1159 #endif
1160 }
1161
1162 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1163 {
1164 #if CPU(X86) || CPU(X86_64)
1165 unsigned result = currentInstruction[1].u.operand;
1166 unsigned op1 = currentInstruction[2].u.operand;
1167 unsigned op2 = currentInstruction[3].u.operand;
1168 linkSlowCase(iter);
1169 linkSlowCase(iter);
1170 linkSlowCase(iter);
1171 linkSlowCase(iter);
1172 linkSlowCase(iter);
1173 JITStubCall stubCall(this, cti_op_mod);
1174 stubCall.addArgument(op1);
1175 stubCall.addArgument(op2);
1176 stubCall.call(result);
1177 #else
1178 UNUSED_PARAM(currentInstruction);
1179 UNUSED_PARAM(iter);
1180 // We would have really useful assertions here if it wasn't for the compiler's
1181 // insistence on attribute noreturn.
1182 // RELEASE_ASSERT_NOT_REACHED();
1183 #endif
1184 }
1185
1186 /* ------------------------------ END: OP_MOD ------------------------------ */
1187
1188 } // namespace JSC
1189
1190 #endif // USE(JSVALUE32_64)
1191 #endif // ENABLE(JIT)