]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITArithmetic32_64.cpp
JavaScriptCore-7600.1.4.16.1.tar.gz
[apple/javascriptcore.git] / jit / JITArithmetic32_64.cpp
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #if USE(JSVALUE32_64)
30 #include "JIT.h"
31
32 #include "CodeBlock.h"
33 #include "JITInlines.h"
34 #include "JITStubs.h"
35 #include "JSArray.h"
36 #include "JSFunction.h"
37 #include "Interpreter.h"
38 #include "JSCInlines.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
41 #include "SlowPathCall.h"
42
43
44 namespace JSC {
45
46 void JIT::emit_op_negate(Instruction* currentInstruction)
47 {
48 int dst = currentInstruction[1].u.operand;
49 int src = currentInstruction[2].u.operand;
50
51 emitLoad(src, regT1, regT0);
52
53 Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
54 addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
55 neg32(regT0);
56 emitStoreInt32(dst, regT0, (dst == src));
57
58 Jump end = jump();
59
60 srcNotInt.link(this);
61 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
62
63 xor32(TrustedImm32(1 << 31), regT1);
64 store32(regT1, tagFor(dst));
65 if (dst != src)
66 store32(regT0, payloadFor(dst));
67
68 end.link(this);
69 }
70
71 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
72 {
73 linkSlowCase(iter); // 0x7fffffff check
74 linkSlowCase(iter); // double check
75
76 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_negate);
77 slowPathCall.call();
78 }
79
80 void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target, RelationalCondition condition)
81 {
82 JumpList notInt32Op1;
83 JumpList notInt32Op2;
84
85 // Character less.
86 if (isOperandConstantImmediateChar(op1)) {
87 emitLoad(op2, regT1, regT0);
88 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
89 JumpList failures;
90 emitLoadCharacterString(regT0, regT0, failures);
91 addSlowCase(failures);
92 addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
93 return;
94 }
95 if (isOperandConstantImmediateChar(op2)) {
96 emitLoad(op1, regT1, regT0);
97 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
98 JumpList failures;
99 emitLoadCharacterString(regT0, regT0, failures);
100 addSlowCase(failures);
101 addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
102 return;
103 }
104 if (isOperandConstantImmediateInt(op1)) {
105 emitLoad(op2, regT3, regT2);
106 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
107 addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
108 } else if (isOperandConstantImmediateInt(op2)) {
109 emitLoad(op1, regT1, regT0);
110 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
111 addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
112 } else {
113 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
114 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
115 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
116 addJump(branch32(condition, regT0, regT2), target);
117 }
118
119 if (!supportsFloatingPoint()) {
120 addSlowCase(notInt32Op1);
121 addSlowCase(notInt32Op2);
122 return;
123 }
124 Jump end = jump();
125
126 // Double less.
127 emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
128 end.link(this);
129 }
130
131 void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
132 {
133 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
134 linkSlowCase(iter);
135 linkSlowCase(iter);
136 linkSlowCase(iter);
137 linkSlowCase(iter);
138 } else {
139 if (!supportsFloatingPoint()) {
140 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
141 linkSlowCase(iter); // int32 check
142 linkSlowCase(iter); // int32 check
143 } else {
144 if (!isOperandConstantImmediateInt(op1)) {
145 linkSlowCase(iter); // double check
146 linkSlowCase(iter); // int32 check
147 }
148 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
149 linkSlowCase(iter); // double check
150 }
151 }
152 emitLoad(op1, regT1, regT0);
153 emitLoad(op2, regT3, regT2);
154 callOperation(operation, regT1, regT0, regT3, regT2);
155 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
156 }
157
158 // LeftShift (<<)
159
160 void JIT::emit_op_lshift(Instruction* currentInstruction)
161 {
162 int dst = currentInstruction[1].u.operand;
163 int op1 = currentInstruction[2].u.operand;
164 int op2 = currentInstruction[3].u.operand;
165
166 if (isOperandConstantImmediateInt(op2)) {
167 emitLoad(op1, regT1, regT0);
168 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
169 lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
170 emitStoreInt32(dst, regT0, dst == op1);
171 return;
172 }
173
174 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
175 if (!isOperandConstantImmediateInt(op1))
176 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
177 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
178 lshift32(regT2, regT0);
179 emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
180 }
181
182 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
183 {
184 int op1 = currentInstruction[2].u.operand;
185 int op2 = currentInstruction[3].u.operand;
186
187 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
188 linkSlowCase(iter); // int32 check
189 linkSlowCase(iter); // int32 check
190
191 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
192 slowPathCall.call();
193 }
194
195 // RightShift (>>) and UnsignedRightShift (>>>) helper
196
197 void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
198 {
199 int dst = currentInstruction[1].u.operand;
200 int op1 = currentInstruction[2].u.operand;
201 int op2 = currentInstruction[3].u.operand;
202
203 // Slow case of rshift makes assumptions about what registers hold the
204 // shift arguments, so any changes must be updated there as well.
205 if (isOperandConstantImmediateInt(op2)) {
206 emitLoad(op1, regT1, regT0);
207 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
208 int shift = getConstantOperand(op2).asInt32() & 0x1f;
209 if (shift) {
210 if (isUnsigned)
211 urshift32(Imm32(shift), regT0);
212 else
213 rshift32(Imm32(shift), regT0);
214 }
215 emitStoreInt32(dst, regT0, dst == op1);
216 } else {
217 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
218 if (!isOperandConstantImmediateInt(op1))
219 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
220 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
221 if (isUnsigned)
222 urshift32(regT2, regT0);
223 else
224 rshift32(regT2, regT0);
225 emitStoreInt32(dst, regT0, dst == op1);
226 }
227 }
228
229 void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
230 {
231 int dst = currentInstruction[1].u.operand;
232 int op1 = currentInstruction[2].u.operand;
233 int op2 = currentInstruction[3].u.operand;
234 if (isOperandConstantImmediateInt(op2)) {
235 int shift = getConstantOperand(op2).asInt32() & 0x1f;
236 // op1 = regT1:regT0
237 linkSlowCase(iter); // int32 check
238 if (supportsFloatingPointTruncate()) {
239 JumpList failures;
240 failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
241 emitLoadDouble(op1, fpRegT0);
242 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
243 if (shift) {
244 if (isUnsigned)
245 urshift32(Imm32(shift), regT0);
246 else
247 rshift32(Imm32(shift), regT0);
248 }
249 move(TrustedImm32(JSValue::Int32Tag), regT1);
250 emitStoreInt32(dst, regT0, false);
251 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
252 failures.link(this);
253 }
254 } else {
255 // op1 = regT1:regT0
256 // op2 = regT3:regT2
257 if (!isOperandConstantImmediateInt(op1)) {
258 linkSlowCase(iter); // int32 check -- op1 is not an int
259 if (supportsFloatingPointTruncate()) {
260 JumpList failures;
261 failures.append(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); // op1 is not a double
262 emitLoadDouble(op1, fpRegT0);
263 failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
264 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
265 if (isUnsigned)
266 urshift32(regT2, regT0);
267 else
268 rshift32(regT2, regT0);
269 move(TrustedImm32(JSValue::Int32Tag), regT1);
270 emitStoreInt32(dst, regT0, false);
271 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
272 failures.link(this);
273 }
274 }
275
276 linkSlowCase(iter); // int32 check - op2 is not an int
277 }
278
279 JITSlowPathCall slowPathCall(this, currentInstruction, isUnsigned ? slow_path_urshift : slow_path_rshift);
280 slowPathCall.call();
281 }
282
283 // RightShift (>>)
284
285 void JIT::emit_op_rshift(Instruction* currentInstruction)
286 {
287 emitRightShift(currentInstruction, false);
288 }
289
290 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
291 {
292 emitRightShiftSlowCase(currentInstruction, iter, false);
293 }
294
295 // UnsignedRightShift (>>>)
296
297 void JIT::emit_op_urshift(Instruction* currentInstruction)
298 {
299 emitRightShift(currentInstruction, true);
300 }
301
302 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
303 {
304 emitRightShiftSlowCase(currentInstruction, iter, true);
305 }
306
307 void JIT::emit_op_unsigned(Instruction* currentInstruction)
308 {
309 int result = currentInstruction[1].u.operand;
310 int op1 = currentInstruction[2].u.operand;
311
312 emitLoad(op1, regT1, regT0);
313
314 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
315 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
316 emitStoreInt32(result, regT0, result == op1);
317 }
318
319 void JIT::emitSlow_op_unsigned(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
320 {
321 linkSlowCase(iter);
322 linkSlowCase(iter);
323
324 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_unsigned);
325 slowPathCall.call();
326 }
327
328 // BitAnd (&)
329
330 void JIT::emit_op_bitand(Instruction* currentInstruction)
331 {
332 int dst = currentInstruction[1].u.operand;
333 int op1 = currentInstruction[2].u.operand;
334 int op2 = currentInstruction[3].u.operand;
335
336 int op;
337 int32_t constant;
338 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
339 emitLoad(op, regT1, regT0);
340 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
341 and32(Imm32(constant), regT0);
342 emitStoreInt32(dst, regT0, dst == op);
343 return;
344 }
345
346 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
347 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
348 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
349 and32(regT2, regT0);
350 emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
351 }
352
353 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
354 {
355 int op1 = currentInstruction[2].u.operand;
356 int op2 = currentInstruction[3].u.operand;
357
358 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
359 linkSlowCase(iter); // int32 check
360 linkSlowCase(iter); // int32 check
361
362 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
363 slowPathCall.call();
364 }
365
366 // BitOr (|)
367
368 void JIT::emit_op_bitor(Instruction* currentInstruction)
369 {
370 int dst = currentInstruction[1].u.operand;
371 int op1 = currentInstruction[2].u.operand;
372 int op2 = currentInstruction[3].u.operand;
373
374 int op;
375 int32_t constant;
376 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
377 emitLoad(op, regT1, regT0);
378 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
379 or32(Imm32(constant), regT0);
380 emitStoreInt32(dst, regT0, op == dst);
381 return;
382 }
383
384 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
385 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
386 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
387 or32(regT2, regT0);
388 emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
389 }
390
391 void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
392 {
393 int op1 = currentInstruction[2].u.operand;
394 int op2 = currentInstruction[3].u.operand;
395
396 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
397 linkSlowCase(iter); // int32 check
398 linkSlowCase(iter); // int32 check
399
400 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
401 slowPathCall.call();
402 }
403
404 // BitXor (^)
405
406 void JIT::emit_op_bitxor(Instruction* currentInstruction)
407 {
408 int dst = currentInstruction[1].u.operand;
409 int op1 = currentInstruction[2].u.operand;
410 int op2 = currentInstruction[3].u.operand;
411
412 int op;
413 int32_t constant;
414 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
415 emitLoad(op, regT1, regT0);
416 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
417 xor32(Imm32(constant), regT0);
418 emitStoreInt32(dst, regT0, op == dst);
419 return;
420 }
421
422 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
423 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
424 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
425 xor32(regT2, regT0);
426 emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
427 }
428
429 void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
430 {
431 int op1 = currentInstruction[2].u.operand;
432 int op2 = currentInstruction[3].u.operand;
433
434 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
435 linkSlowCase(iter); // int32 check
436 linkSlowCase(iter); // int32 check
437
438 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
439 slowPathCall.call();
440 }
441
442 void JIT::emit_op_inc(Instruction* currentInstruction)
443 {
444 int srcDst = currentInstruction[1].u.operand;
445
446 emitLoad(srcDst, regT1, regT0);
447
448 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
449 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
450 emitStoreInt32(srcDst, regT0, true);
451 }
452
453 void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
454 {
455 linkSlowCase(iter); // int32 check
456 linkSlowCase(iter); // overflow check
457
458 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_inc);
459 slowPathCall.call();
460 }
461
462 void JIT::emit_op_dec(Instruction* currentInstruction)
463 {
464 int srcDst = currentInstruction[1].u.operand;
465
466 emitLoad(srcDst, regT1, regT0);
467
468 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
469 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
470 emitStoreInt32(srcDst, regT0, true);
471 }
472
473 void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
474 {
475 linkSlowCase(iter); // int32 check
476 linkSlowCase(iter); // overflow check
477
478 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_dec);
479 slowPathCall.call();
480 }
481
482 // Addition (+)
483
484 void JIT::emit_op_add(Instruction* currentInstruction)
485 {
486 int dst = currentInstruction[1].u.operand;
487 int op1 = currentInstruction[2].u.operand;
488 int op2 = currentInstruction[3].u.operand;
489 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
490
491 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
492 addSlowCase();
493 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
494 slowPathCall.call();
495 return;
496 }
497
498 JumpList notInt32Op1;
499 JumpList notInt32Op2;
500
501 int op;
502 int32_t constant;
503 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
504 emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
505 return;
506 }
507
508 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
509 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
510 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
511
512 // Int32 case.
513 addSlowCase(branchAdd32(Overflow, regT2, regT0));
514 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
515
516 if (!supportsFloatingPoint()) {
517 addSlowCase(notInt32Op1);
518 addSlowCase(notInt32Op2);
519 return;
520 }
521 Jump end = jump();
522
523 // Double case.
524 emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
525 end.link(this);
526 }
527
528 void JIT::emitAdd32Constant(int dst, int op, int32_t constant, ResultType opType)
529 {
530 // Int32 case.
531 emitLoad(op, regT1, regT2);
532 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
533 addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0));
534 emitStoreInt32(dst, regT0, (op == dst));
535
536 // Double case.
537 if (!supportsFloatingPoint()) {
538 addSlowCase(notInt32);
539 return;
540 }
541 Jump end = jump();
542
543 notInt32.link(this);
544 if (!opType.definitelyIsNumber())
545 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
546 move(Imm32(constant), regT2);
547 convertInt32ToDouble(regT2, fpRegT0);
548 emitLoadDouble(op, fpRegT1);
549 addDouble(fpRegT1, fpRegT0);
550 emitStoreDouble(dst, fpRegT0);
551
552 end.link(this);
553 }
554
555 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
556 {
557 int op1 = currentInstruction[2].u.operand;
558 int op2 = currentInstruction[3].u.operand;
559 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
560
561 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
562 linkDummySlowCase(iter);
563 return;
564 }
565
566 int op;
567 int32_t constant;
568 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
569 linkSlowCase(iter); // overflow check
570
571 if (!supportsFloatingPoint())
572 linkSlowCase(iter); // non-sse case
573 else {
574 ResultType opType = op == op1 ? types.first() : types.second();
575 if (!opType.definitelyIsNumber())
576 linkSlowCase(iter); // double check
577 }
578 } else {
579 linkSlowCase(iter); // overflow check
580
581 if (!supportsFloatingPoint()) {
582 linkSlowCase(iter); // int32 check
583 linkSlowCase(iter); // int32 check
584 } else {
585 if (!types.first().definitelyIsNumber())
586 linkSlowCase(iter); // double check
587
588 if (!types.second().definitelyIsNumber()) {
589 linkSlowCase(iter); // int32 check
590 linkSlowCase(iter); // double check
591 }
592 }
593 }
594
595 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
596 slowPathCall.call();
597 }
598
599 // Subtraction (-)
600
601 void JIT::emit_op_sub(Instruction* currentInstruction)
602 {
603 int dst = currentInstruction[1].u.operand;
604 int op1 = currentInstruction[2].u.operand;
605 int op2 = currentInstruction[3].u.operand;
606 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
607
608 JumpList notInt32Op1;
609 JumpList notInt32Op2;
610
611 if (isOperandConstantImmediateInt(op2)) {
612 emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
613 return;
614 }
615
616 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
617 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
618 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
619
620 // Int32 case.
621 addSlowCase(branchSub32(Overflow, regT2, regT0));
622 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
623
624 if (!supportsFloatingPoint()) {
625 addSlowCase(notInt32Op1);
626 addSlowCase(notInt32Op2);
627 return;
628 }
629 Jump end = jump();
630
631 // Double case.
632 emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
633 end.link(this);
634 }
635
636 void JIT::emitSub32Constant(int dst, int op, int32_t constant, ResultType opType)
637 {
638 // Int32 case.
639 emitLoad(op, regT1, regT0);
640 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
641 addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
642 emitStoreInt32(dst, regT2, (op == dst));
643
644 // Double case.
645 if (!supportsFloatingPoint()) {
646 addSlowCase(notInt32);
647 return;
648 }
649 Jump end = jump();
650
651 notInt32.link(this);
652 if (!opType.definitelyIsNumber())
653 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
654 move(Imm32(constant), regT2);
655 convertInt32ToDouble(regT2, fpRegT0);
656 emitLoadDouble(op, fpRegT1);
657 subDouble(fpRegT0, fpRegT1);
658 emitStoreDouble(dst, fpRegT1);
659
660 end.link(this);
661 }
662
663 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
664 {
665 int op2 = currentInstruction[3].u.operand;
666 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
667
668 if (isOperandConstantImmediateInt(op2)) {
669 linkSlowCase(iter); // overflow check
670
671 if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
672 linkSlowCase(iter); // int32 or double check
673 } else {
674 linkSlowCase(iter); // overflow check
675
676 if (!supportsFloatingPoint()) {
677 linkSlowCase(iter); // int32 check
678 linkSlowCase(iter); // int32 check
679 } else {
680 if (!types.first().definitelyIsNumber())
681 linkSlowCase(iter); // double check
682
683 if (!types.second().definitelyIsNumber()) {
684 linkSlowCase(iter); // int32 check
685 linkSlowCase(iter); // double check
686 }
687 }
688 }
689
690 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_sub);
691 slowPathCall.call();
692 }
693
694 void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
695 {
696 JumpList end;
697
698 if (!notInt32Op1.empty()) {
699 // Double case 1: Op1 is not int32; Op2 is unknown.
700 notInt32Op1.link(this);
701
702 ASSERT(op1IsInRegisters);
703
704 // Verify Op1 is double.
705 if (!types.first().definitelyIsNumber())
706 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
707
708 if (!op2IsInRegisters)
709 emitLoad(op2, regT3, regT2);
710
711 Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
712
713 if (!types.second().definitelyIsNumber())
714 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
715
716 convertInt32ToDouble(regT2, fpRegT0);
717 Jump doTheMath = jump();
718
719 // Load Op2 as double into double register.
720 doubleOp2.link(this);
721 emitLoadDouble(op2, fpRegT0);
722
723 // Do the math.
724 doTheMath.link(this);
725 switch (opcodeID) {
726 case op_mul:
727 emitLoadDouble(op1, fpRegT2);
728 mulDouble(fpRegT2, fpRegT0);
729 emitStoreDouble(dst, fpRegT0);
730 break;
731 case op_add:
732 emitLoadDouble(op1, fpRegT2);
733 addDouble(fpRegT2, fpRegT0);
734 emitStoreDouble(dst, fpRegT0);
735 break;
736 case op_sub:
737 emitLoadDouble(op1, fpRegT1);
738 subDouble(fpRegT0, fpRegT1);
739 emitStoreDouble(dst, fpRegT1);
740 break;
741 case op_div: {
742 emitLoadDouble(op1, fpRegT1);
743 divDouble(fpRegT0, fpRegT1);
744
745 // Is the result actually an integer? The DFG JIT would really like to know. If it's
746 // not an integer, we increment a count. If this together with the slow case counter
747 // are below threshold then the DFG JIT will compile this division with a specualtion
748 // that the remainder is zero.
749
750 // As well, there are cases where a double result here would cause an important field
751 // in the heap to sometimes have doubles in it, resulting in double predictions getting
752 // propagated to a use site where it might cause damage (such as the index to an array
753 // access). So if we are DFG compiling anything in the program, we want this code to
754 // ensure that it produces integers whenever possible.
755
756 // FIXME: This will fail to convert to integer if the result is zero. We should
757 // distinguish between positive zero and negative zero here.
758
759 JumpList notInteger;
760 branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
761 // If we've got an integer, we might as well make that the result of the division.
762 emitStoreInt32(dst, regT2);
763 Jump isInteger = jump();
764 notInteger.link(this);
765 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
766 emitStoreDouble(dst, fpRegT1);
767 isInteger.link(this);
768 break;
769 }
770 case op_jless:
771 emitLoadDouble(op1, fpRegT2);
772 addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
773 break;
774 case op_jlesseq:
775 emitLoadDouble(op1, fpRegT2);
776 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
777 break;
778 case op_jgreater:
779 emitLoadDouble(op1, fpRegT2);
780 addJump(branchDouble(DoubleGreaterThan, fpRegT2, fpRegT0), dst);
781 break;
782 case op_jgreatereq:
783 emitLoadDouble(op1, fpRegT2);
784 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT2, fpRegT0), dst);
785 break;
786 case op_jnless:
787 emitLoadDouble(op1, fpRegT2);
788 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
789 break;
790 case op_jnlesseq:
791 emitLoadDouble(op1, fpRegT2);
792 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
793 break;
794 case op_jngreater:
795 emitLoadDouble(op1, fpRegT2);
796 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
797 break;
798 case op_jngreatereq:
799 emitLoadDouble(op1, fpRegT2);
800 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), dst);
801 break;
802 default:
803 RELEASE_ASSERT_NOT_REACHED();
804 }
805
806 if (!notInt32Op2.empty())
807 end.append(jump());
808 }
809
810 if (!notInt32Op2.empty()) {
811 // Double case 2: Op1 is int32; Op2 is not int32.
812 notInt32Op2.link(this);
813
814 ASSERT(op2IsInRegisters);
815
816 if (!op1IsInRegisters)
817 emitLoadPayload(op1, regT0);
818
819 convertInt32ToDouble(regT0, fpRegT0);
820
821 // Verify op2 is double.
822 if (!types.second().definitelyIsNumber())
823 addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
824
825 // Do the math.
826 switch (opcodeID) {
827 case op_mul:
828 emitLoadDouble(op2, fpRegT2);
829 mulDouble(fpRegT2, fpRegT0);
830 emitStoreDouble(dst, fpRegT0);
831 break;
832 case op_add:
833 emitLoadDouble(op2, fpRegT2);
834 addDouble(fpRegT2, fpRegT0);
835 emitStoreDouble(dst, fpRegT0);
836 break;
837 case op_sub:
838 emitLoadDouble(op2, fpRegT2);
839 subDouble(fpRegT2, fpRegT0);
840 emitStoreDouble(dst, fpRegT0);
841 break;
842 case op_div: {
843 emitLoadDouble(op2, fpRegT2);
844 divDouble(fpRegT2, fpRegT0);
845 // Is the result actually an integer? The DFG JIT would really like to know. If it's
846 // not an integer, we increment a count. If this together with the slow case counter
847 // are below threshold then the DFG JIT will compile this division with a specualtion
848 // that the remainder is zero.
849
850 // As well, there are cases where a double result here would cause an important field
851 // in the heap to sometimes have doubles in it, resulting in double predictions getting
852 // propagated to a use site where it might cause damage (such as the index to an array
853 // access). So if we are DFG compiling anything in the program, we want this code to
854 // ensure that it produces integers whenever possible.
855
856 // FIXME: This will fail to convert to integer if the result is zero. We should
857 // distinguish between positive zero and negative zero here.
858
859 JumpList notInteger;
860 branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
861 // If we've got an integer, we might as well make that the result of the division.
862 emitStoreInt32(dst, regT2);
863 Jump isInteger = jump();
864 notInteger.link(this);
865 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
866 emitStoreDouble(dst, fpRegT0);
867 isInteger.link(this);
868 break;
869 }
870 case op_jless:
871 emitLoadDouble(op2, fpRegT1);
872 addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
873 break;
874 case op_jlesseq:
875 emitLoadDouble(op2, fpRegT1);
876 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
877 break;
878 case op_jgreater:
879 emitLoadDouble(op2, fpRegT1);
880 addJump(branchDouble(DoubleGreaterThan, fpRegT0, fpRegT1), dst);
881 break;
882 case op_jgreatereq:
883 emitLoadDouble(op2, fpRegT1);
884 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT0, fpRegT1), dst);
885 break;
886 case op_jnless:
887 emitLoadDouble(op2, fpRegT1);
888 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
889 break;
890 case op_jnlesseq:
891 emitLoadDouble(op2, fpRegT1);
892 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
893 break;
894 case op_jngreater:
895 emitLoadDouble(op2, fpRegT1);
896 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
897 break;
898 case op_jngreatereq:
899 emitLoadDouble(op2, fpRegT1);
900 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), dst);
901 break;
902 default:
903 RELEASE_ASSERT_NOT_REACHED();
904 }
905 }
906
907 end.link(this);
908 }
909
910 // Multiplication (*)
911
912 void JIT::emit_op_mul(Instruction* currentInstruction)
913 {
914 int dst = currentInstruction[1].u.operand;
915 int op1 = currentInstruction[2].u.operand;
916 int op2 = currentInstruction[3].u.operand;
917 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
918
919 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
920
921 JumpList notInt32Op1;
922 JumpList notInt32Op2;
923
924 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
925 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
926 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
927
928 // Int32 case.
929 move(regT0, regT3);
930 addSlowCase(branchMul32(Overflow, regT2, regT0));
931 addSlowCase(branchTest32(Zero, regT0));
932 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
933
934 if (!supportsFloatingPoint()) {
935 addSlowCase(notInt32Op1);
936 addSlowCase(notInt32Op2);
937 return;
938 }
939 Jump end = jump();
940
941 // Double case.
942 emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
943 end.link(this);
944 }
945
946 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
947 {
948 int dst = currentInstruction[1].u.operand;
949 int op1 = currentInstruction[2].u.operand;
950 int op2 = currentInstruction[3].u.operand;
951 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
952
953 Jump overflow = getSlowCase(iter); // overflow check
954 linkSlowCase(iter); // zero result check
955
956 Jump negZero = branchOr32(Signed, regT2, regT3);
957 emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
958
959 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
960
961 negZero.link(this);
962 // We only get here if we have a genuine negative zero. Record this,
963 // so that the speculative JIT knows that we failed speculation
964 // because of a negative zero.
965 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
966 overflow.link(this);
967
968 if (!supportsFloatingPoint()) {
969 linkSlowCase(iter); // int32 check
970 linkSlowCase(iter); // int32 check
971 }
972
973 if (supportsFloatingPoint()) {
974 if (!types.first().definitelyIsNumber())
975 linkSlowCase(iter); // double check
976
977 if (!types.second().definitelyIsNumber()) {
978 linkSlowCase(iter); // int32 check
979 linkSlowCase(iter); // double check
980 }
981 }
982
983 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mul);
984 slowPathCall.call();
985 }
986
987 // Division (/)
988
989 void JIT::emit_op_div(Instruction* currentInstruction)
990 {
991 int dst = currentInstruction[1].u.operand;
992 int op1 = currentInstruction[2].u.operand;
993 int op2 = currentInstruction[3].u.operand;
994 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
995
996 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
997
998 if (!supportsFloatingPoint()) {
999 addSlowCase(jump());
1000 return;
1001 }
1002
1003 // Int32 divide.
1004 JumpList notInt32Op1;
1005 JumpList notInt32Op2;
1006
1007 JumpList end;
1008
1009 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1010
1011 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1012 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
1013
1014 convertInt32ToDouble(regT0, fpRegT0);
1015 convertInt32ToDouble(regT2, fpRegT1);
1016 divDouble(fpRegT1, fpRegT0);
1017 // Is the result actually an integer? The DFG JIT would really like to know. If it's
1018 // not an integer, we increment a count. If this together with the slow case counter
1019 // are below threshold then the DFG JIT will compile this division with a specualtion
1020 // that the remainder is zero.
1021
1022 // As well, there are cases where a double result here would cause an important field
1023 // in the heap to sometimes have doubles in it, resulting in double predictions getting
1024 // propagated to a use site where it might cause damage (such as the index to an array
1025 // access). So if we are DFG compiling anything in the program, we want this code to
1026 // ensure that it produces integers whenever possible.
1027
1028 // FIXME: This will fail to convert to integer if the result is zero. We should
1029 // distinguish between positive zero and negative zero here.
1030
1031 JumpList notInteger;
1032 branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
1033 // If we've got an integer, we might as well make that the result of the division.
1034 emitStoreInt32(dst, regT2);
1035 end.append(jump());
1036 notInteger.link(this);
1037 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
1038 emitStoreDouble(dst, fpRegT0);
1039 end.append(jump());
1040
1041 // Double divide.
1042 emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1043 end.link(this);
1044 }
1045
1046 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1047 {
1048 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1049
1050 if (!supportsFloatingPoint())
1051 linkSlowCase(iter);
1052 else {
1053 if (!types.first().definitelyIsNumber())
1054 linkSlowCase(iter); // double check
1055
1056 if (!types.second().definitelyIsNumber()) {
1057 linkSlowCase(iter); // int32 check
1058 linkSlowCase(iter); // double check
1059 }
1060 }
1061
1062 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
1063 slowPathCall.call();
1064 }
1065
1066 // Mod (%)
1067
1068 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1069
1070 void JIT::emit_op_mod(Instruction* currentInstruction)
1071 {
1072 #if CPU(X86) || CPU(X86_64)
1073 int dst = currentInstruction[1].u.operand;
1074 int op1 = currentInstruction[2].u.operand;
1075 int op2 = currentInstruction[3].u.operand;
1076
1077 // Make sure registers are correct for x86 IDIV instructions.
1078 ASSERT(regT0 == X86Registers::eax);
1079 ASSERT(regT1 == X86Registers::edx);
1080 ASSERT(regT2 == X86Registers::ecx);
1081 ASSERT(regT3 == X86Registers::ebx);
1082
1083 emitLoad2(op1, regT0, regT3, op2, regT1, regT2);
1084 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1085 addSlowCase(branch32(NotEqual, regT0, TrustedImm32(JSValue::Int32Tag)));
1086
1087 move(regT3, regT0);
1088 addSlowCase(branchTest32(Zero, regT2));
1089 Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
1090 addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
1091 denominatorNotNeg1.link(this);
1092 m_assembler.cdq();
1093 m_assembler.idivl_r(regT2);
1094 Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
1095 addSlowCase(branchTest32(Zero, regT1));
1096 numeratorPositive.link(this);
1097 emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
1098 #else
1099 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
1100 slowPathCall.call();
1101 #endif
1102 }
1103
1104 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1105 {
1106 #if CPU(X86) || CPU(X86_64)
1107 linkSlowCase(iter);
1108 linkSlowCase(iter);
1109 linkSlowCase(iter);
1110 linkSlowCase(iter);
1111 linkSlowCase(iter);
1112 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
1113 slowPathCall.call();
1114 #else
1115 UNUSED_PARAM(currentInstruction);
1116 UNUSED_PARAM(iter);
1117 // We would have really useful assertions here if it wasn't for the compiler's
1118 // insistence on attribute noreturn.
1119 // RELEASE_ASSERT_NOT_REACHED();
1120 #endif
1121 }
1122
1123 /* ------------------------------ END: OP_MOD ------------------------------ */
1124
1125 } // namespace JSC
1126
1127 #endif // USE(JSVALUE32_64)
1128 #endif // ENABLE(JIT)