]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITArithmetic.cpp
JavaScriptCore-7601.1.46.3.tar.gz
[apple/javascriptcore.git] / jit / JITArithmetic.cpp
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27
28 #if ENABLE(JIT)
29 #include "JIT.h"
30
31 #include "CodeBlock.h"
32 #include "JITInlines.h"
33 #include "JITOperations.h"
34 #include "JITStubs.h"
35 #include "JSArray.h"
36 #include "JSFunction.h"
37 #include "Interpreter.h"
38 #include "JSCInlines.h"
39 #include "ResultType.h"
40 #include "SamplingTool.h"
41 #include "SlowPathCall.h"
42
43
44 namespace JSC {
45
46 void JIT::emit_op_jless(Instruction* currentInstruction)
47 {
48 int op1 = currentInstruction[1].u.operand;
49 int op2 = currentInstruction[2].u.operand;
50 unsigned target = currentInstruction[3].u.operand;
51
52 emit_compareAndJump(op_jless, op1, op2, target, LessThan);
53 }
54
55 void JIT::emit_op_jlesseq(Instruction* currentInstruction)
56 {
57 int op1 = currentInstruction[1].u.operand;
58 int op2 = currentInstruction[2].u.operand;
59 unsigned target = currentInstruction[3].u.operand;
60
61 emit_compareAndJump(op_jlesseq, op1, op2, target, LessThanOrEqual);
62 }
63
64 void JIT::emit_op_jgreater(Instruction* currentInstruction)
65 {
66 int op1 = currentInstruction[1].u.operand;
67 int op2 = currentInstruction[2].u.operand;
68 unsigned target = currentInstruction[3].u.operand;
69
70 emit_compareAndJump(op_jgreater, op1, op2, target, GreaterThan);
71 }
72
73 void JIT::emit_op_jgreatereq(Instruction* currentInstruction)
74 {
75 int op1 = currentInstruction[1].u.operand;
76 int op2 = currentInstruction[2].u.operand;
77 unsigned target = currentInstruction[3].u.operand;
78
79 emit_compareAndJump(op_jgreatereq, op1, op2, target, GreaterThanOrEqual);
80 }
81
82 void JIT::emit_op_jnless(Instruction* currentInstruction)
83 {
84 int op1 = currentInstruction[1].u.operand;
85 int op2 = currentInstruction[2].u.operand;
86 unsigned target = currentInstruction[3].u.operand;
87
88 emit_compareAndJump(op_jnless, op1, op2, target, GreaterThanOrEqual);
89 }
90
91 void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
92 {
93 int op1 = currentInstruction[1].u.operand;
94 int op2 = currentInstruction[2].u.operand;
95 unsigned target = currentInstruction[3].u.operand;
96
97 emit_compareAndJump(op_jnlesseq, op1, op2, target, GreaterThan);
98 }
99
100 void JIT::emit_op_jngreater(Instruction* currentInstruction)
101 {
102 int op1 = currentInstruction[1].u.operand;
103 int op2 = currentInstruction[2].u.operand;
104 unsigned target = currentInstruction[3].u.operand;
105
106 emit_compareAndJump(op_jngreater, op1, op2, target, LessThanOrEqual);
107 }
108
109 void JIT::emit_op_jngreatereq(Instruction* currentInstruction)
110 {
111 int op1 = currentInstruction[1].u.operand;
112 int op2 = currentInstruction[2].u.operand;
113 unsigned target = currentInstruction[3].u.operand;
114
115 emit_compareAndJump(op_jngreatereq, op1, op2, target, LessThan);
116 }
117
118 void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
119 {
120 int op1 = currentInstruction[1].u.operand;
121 int op2 = currentInstruction[2].u.operand;
122 unsigned target = currentInstruction[3].u.operand;
123
124 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThan, operationCompareLess, false, iter);
125 }
126
127 void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
128 {
129 int op1 = currentInstruction[1].u.operand;
130 int op2 = currentInstruction[2].u.operand;
131 unsigned target = currentInstruction[3].u.operand;
132
133 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqual, operationCompareLessEq, false, iter);
134 }
135
136 void JIT::emitSlow_op_jgreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
137 {
138 int op1 = currentInstruction[1].u.operand;
139 int op2 = currentInstruction[2].u.operand;
140 unsigned target = currentInstruction[3].u.operand;
141
142 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThan, operationCompareGreater, false, iter);
143 }
144
145 void JIT::emitSlow_op_jgreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
146 {
147 int op1 = currentInstruction[1].u.operand;
148 int op2 = currentInstruction[2].u.operand;
149 unsigned target = currentInstruction[3].u.operand;
150
151 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqual, operationCompareGreaterEq, false, iter);
152 }
153
154 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
155 {
156 int op1 = currentInstruction[1].u.operand;
157 int op2 = currentInstruction[2].u.operand;
158 unsigned target = currentInstruction[3].u.operand;
159
160 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqualOrUnordered, operationCompareLess, true, iter);
161 }
162
163 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
164 {
165 int op1 = currentInstruction[1].u.operand;
166 int op2 = currentInstruction[2].u.operand;
167 unsigned target = currentInstruction[3].u.operand;
168
169 emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrUnordered, operationCompareLessEq, true, iter);
170 }
171
172 void JIT::emitSlow_op_jngreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
173 {
174 int op1 = currentInstruction[1].u.operand;
175 int op2 = currentInstruction[2].u.operand;
176 unsigned target = currentInstruction[3].u.operand;
177
178 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqualOrUnordered, operationCompareGreater, true, iter);
179 }
180
181 void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
182 {
183 int op1 = currentInstruction[1].u.operand;
184 int op2 = currentInstruction[2].u.operand;
185 unsigned target = currentInstruction[3].u.operand;
186
187 emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrUnordered, operationCompareGreaterEq, true, iter);
188 }
189
190 #if USE(JSVALUE64)
191
192 void JIT::emit_op_negate(Instruction* currentInstruction)
193 {
194 int dst = currentInstruction[1].u.operand;
195 int src = currentInstruction[2].u.operand;
196
197 emitGetVirtualRegister(src, regT0);
198
199 Jump srcNotInt = emitJumpIfNotImmediateInteger(regT0);
200 addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
201 neg32(regT0);
202 emitFastArithReTagImmediate(regT0, regT0);
203
204 Jump end = jump();
205
206 srcNotInt.link(this);
207 emitJumpSlowCaseIfNotImmediateNumber(regT0);
208
209 move(TrustedImm64((int64_t)0x8000000000000000ull), regT1);
210 xor64(regT1, regT0);
211
212 end.link(this);
213 emitPutVirtualRegister(dst);
214 }
215
216 void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
217 {
218 linkSlowCase(iter); // 0x7fffffff check
219 linkSlowCase(iter); // double check
220
221 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_negate);
222 slowPathCall.call();
223 }
224
225 void JIT::emit_op_lshift(Instruction* currentInstruction)
226 {
227 int result = currentInstruction[1].u.operand;
228 int op1 = currentInstruction[2].u.operand;
229 int op2 = currentInstruction[3].u.operand;
230
231 emitGetVirtualRegisters(op1, regT0, op2, regT2);
232 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
233 emitJumpSlowCaseIfNotImmediateInteger(regT0);
234 emitJumpSlowCaseIfNotImmediateInteger(regT2);
235 emitFastArithImmToInt(regT0);
236 emitFastArithImmToInt(regT2);
237 lshift32(regT2, regT0);
238 emitFastArithReTagImmediate(regT0, regT0);
239 emitPutVirtualRegister(result);
240 }
241
242 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
243 {
244 linkSlowCase(iter);
245 linkSlowCase(iter);
246 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
247 slowPathCall.call();
248 }
249
250 void JIT::emit_op_rshift(Instruction* currentInstruction)
251 {
252 int result = currentInstruction[1].u.operand;
253 int op1 = currentInstruction[2].u.operand;
254 int op2 = currentInstruction[3].u.operand;
255
256 if (isOperandConstantImmediateInt(op2)) {
257 // isOperandConstantImmediateInt(op2) => 1 SlowCase
258 emitGetVirtualRegister(op1, regT0);
259 emitJumpSlowCaseIfNotImmediateInteger(regT0);
260 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
261 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
262 } else {
263 emitGetVirtualRegisters(op1, regT0, op2, regT2);
264 if (supportsFloatingPointTruncate()) {
265 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
266 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
267 addSlowCase(emitJumpIfNotImmediateNumber(regT0));
268 add64(tagTypeNumberRegister, regT0);
269 move64ToDouble(regT0, fpRegT0);
270 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
271 lhsIsInt.link(this);
272 emitJumpSlowCaseIfNotImmediateInteger(regT2);
273 } else {
274 // !supportsFloatingPoint() => 2 SlowCases
275 emitJumpSlowCaseIfNotImmediateInteger(regT0);
276 emitJumpSlowCaseIfNotImmediateInteger(regT2);
277 }
278 emitFastArithImmToInt(regT2);
279 rshift32(regT2, regT0);
280 }
281 emitFastArithIntToImmNoCheck(regT0, regT0);
282 emitPutVirtualRegister(result);
283 }
284
285 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
286 {
287 int op2 = currentInstruction[3].u.operand;
288
289 if (isOperandConstantImmediateInt(op2))
290 linkSlowCase(iter);
291
292 else {
293 if (supportsFloatingPointTruncate()) {
294 linkSlowCase(iter);
295 linkSlowCase(iter);
296 linkSlowCase(iter);
297 } else {
298 linkSlowCase(iter);
299 linkSlowCase(iter);
300 }
301 }
302
303 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_rshift);
304 slowPathCall.call();
305 }
306
307 void JIT::emit_op_urshift(Instruction* currentInstruction)
308 {
309 int result = currentInstruction[1].u.operand;
310 int op1 = currentInstruction[2].u.operand;
311 int op2 = currentInstruction[3].u.operand;
312
313 if (isOperandConstantImmediateInt(op2)) {
314 // isOperandConstantImmediateInt(op2) => 1 SlowCase
315 emitGetVirtualRegister(op1, regT0);
316 emitJumpSlowCaseIfNotImmediateInteger(regT0);
317 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
318 urshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
319 } else {
320 emitGetVirtualRegisters(op1, regT0, op2, regT2);
321 if (supportsFloatingPointTruncate()) {
322 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
323 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
324 addSlowCase(emitJumpIfNotImmediateNumber(regT0));
325 add64(tagTypeNumberRegister, regT0);
326 move64ToDouble(regT0, fpRegT0);
327 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
328 lhsIsInt.link(this);
329 emitJumpSlowCaseIfNotImmediateInteger(regT2);
330 } else {
331 // !supportsFloatingPoint() => 2 SlowCases
332 emitJumpSlowCaseIfNotImmediateInteger(regT0);
333 emitJumpSlowCaseIfNotImmediateInteger(regT2);
334 }
335 emitFastArithImmToInt(regT2);
336 urshift32(regT2, regT0);
337 }
338 emitFastArithIntToImmNoCheck(regT0, regT0);
339 emitPutVirtualRegister(result);
340 }
341
342 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
343 {
344 int op2 = currentInstruction[3].u.operand;
345
346 if (isOperandConstantImmediateInt(op2))
347 linkSlowCase(iter);
348
349 else {
350 if (supportsFloatingPointTruncate()) {
351 linkSlowCase(iter);
352 linkSlowCase(iter);
353 linkSlowCase(iter);
354 } else {
355 linkSlowCase(iter);
356 linkSlowCase(iter);
357 }
358 }
359
360 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_urshift);
361 slowPathCall.call();
362 }
363
364 void JIT::emit_op_unsigned(Instruction* currentInstruction)
365 {
366 int result = currentInstruction[1].u.operand;
367 int op1 = currentInstruction[2].u.operand;
368
369 emitGetVirtualRegister(op1, regT0);
370 emitJumpSlowCaseIfNotImmediateInteger(regT0);
371 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
372 emitFastArithReTagImmediate(regT0, regT0);
373 emitPutVirtualRegister(result, regT0);
374 }
375
376 void JIT::emitSlow_op_unsigned(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
377 {
378 linkSlowCase(iter);
379 linkSlowCase(iter);
380
381 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_unsigned);
382 slowPathCall.call();
383 }
384
385 void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition condition)
386 {
387 // We generate inline code for the following cases in the fast path:
388 // - int immediate to constant int immediate
389 // - constant int immediate to int immediate
390 // - int immediate to int immediate
391
392 if (isOperandConstantImmediateChar(op1)) {
393 emitGetVirtualRegister(op2, regT0);
394 addSlowCase(emitJumpIfNotJSCell(regT0));
395 JumpList failures;
396 emitLoadCharacterString(regT0, regT0, failures);
397 addSlowCase(failures);
398 addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
399 return;
400 }
401 if (isOperandConstantImmediateChar(op2)) {
402 emitGetVirtualRegister(op1, regT0);
403 addSlowCase(emitJumpIfNotJSCell(regT0));
404 JumpList failures;
405 emitLoadCharacterString(regT0, regT0, failures);
406 addSlowCase(failures);
407 addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
408 return;
409 }
410 if (isOperandConstantImmediateInt(op2)) {
411 emitGetVirtualRegister(op1, regT0);
412 emitJumpSlowCaseIfNotImmediateInteger(regT0);
413 int32_t op2imm = getConstantOperandImmediateInt(op2);
414 addJump(branch32(condition, regT0, Imm32(op2imm)), target);
415 } else if (isOperandConstantImmediateInt(op1)) {
416 emitGetVirtualRegister(op2, regT1);
417 emitJumpSlowCaseIfNotImmediateInteger(regT1);
418 int32_t op1imm = getConstantOperandImmediateInt(op1);
419 addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
420 } else {
421 emitGetVirtualRegisters(op1, regT0, op2, regT1);
422 emitJumpSlowCaseIfNotImmediateInteger(regT0);
423 emitJumpSlowCaseIfNotImmediateInteger(regT1);
424
425 addJump(branch32(condition, regT0, regT1), target);
426 }
427 }
428
429 void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition condition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
430 {
431 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jlesseq), OPCODE_LENGTH_op_jlesseq_equals_op_jless);
432 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnless), OPCODE_LENGTH_op_jnless_equals_op_jless);
433 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnlesseq), OPCODE_LENGTH_op_jnlesseq_equals_op_jless);
434 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreater), OPCODE_LENGTH_op_jgreater_equals_op_jless);
435 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreatereq), OPCODE_LENGTH_op_jgreatereq_equals_op_jless);
436 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreater), OPCODE_LENGTH_op_jngreater_equals_op_jless);
437 COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreatereq), OPCODE_LENGTH_op_jngreatereq_equals_op_jless);
438
439 // We generate inline code for the following cases in the slow path:
440 // - floating-point number to constant int immediate
441 // - constant int immediate to floating-point number
442 // - floating-point number to floating-point number.
443 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
444 linkSlowCase(iter);
445 linkSlowCase(iter);
446 linkSlowCase(iter);
447 linkSlowCase(iter);
448
449 emitGetVirtualRegister(op1, argumentGPR0);
450 emitGetVirtualRegister(op2, argumentGPR1);
451 callOperation(operation, argumentGPR0, argumentGPR1);
452 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
453 return;
454 }
455
456 if (isOperandConstantImmediateInt(op2)) {
457 linkSlowCase(iter);
458
459 if (supportsFloatingPoint()) {
460 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
461 add64(tagTypeNumberRegister, regT0);
462 move64ToDouble(regT0, fpRegT0);
463
464 int32_t op2imm = getConstantOperand(op2).asInt32();
465
466 move(Imm32(op2imm), regT1);
467 convertInt32ToDouble(regT1, fpRegT1);
468
469 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
470
471 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
472
473 fail1.link(this);
474 }
475
476 emitGetVirtualRegister(op2, regT1);
477 callOperation(operation, regT0, regT1);
478 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
479 } else if (isOperandConstantImmediateInt(op1)) {
480 linkSlowCase(iter);
481
482 if (supportsFloatingPoint()) {
483 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
484 add64(tagTypeNumberRegister, regT1);
485 move64ToDouble(regT1, fpRegT1);
486
487 int32_t op1imm = getConstantOperand(op1).asInt32();
488
489 move(Imm32(op1imm), regT0);
490 convertInt32ToDouble(regT0, fpRegT0);
491
492 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
493
494 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
495
496 fail1.link(this);
497 }
498
499 emitGetVirtualRegister(op1, regT2);
500 callOperation(operation, regT2, regT1);
501 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
502 } else {
503 linkSlowCase(iter);
504
505 if (supportsFloatingPoint()) {
506 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
507 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
508 Jump fail3 = emitJumpIfImmediateInteger(regT1);
509 add64(tagTypeNumberRegister, regT0);
510 add64(tagTypeNumberRegister, regT1);
511 move64ToDouble(regT0, fpRegT0);
512 move64ToDouble(regT1, fpRegT1);
513
514 emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target);
515
516 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless));
517
518 fail1.link(this);
519 fail2.link(this);
520 fail3.link(this);
521 }
522
523 linkSlowCase(iter);
524 callOperation(operation, regT0, regT1);
525 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
526 }
527 }
528
529 void JIT::emit_op_bitand(Instruction* currentInstruction)
530 {
531 int result = currentInstruction[1].u.operand;
532 int op1 = currentInstruction[2].u.operand;
533 int op2 = currentInstruction[3].u.operand;
534
535 if (isOperandConstantImmediateInt(op1)) {
536 emitGetVirtualRegister(op2, regT0);
537 emitJumpSlowCaseIfNotImmediateInteger(regT0);
538 int32_t imm = getConstantOperandImmediateInt(op1);
539 and64(Imm32(imm), regT0);
540 if (imm >= 0)
541 emitFastArithIntToImmNoCheck(regT0, regT0);
542 } else if (isOperandConstantImmediateInt(op2)) {
543 emitGetVirtualRegister(op1, regT0);
544 emitJumpSlowCaseIfNotImmediateInteger(regT0);
545 int32_t imm = getConstantOperandImmediateInt(op2);
546 and64(Imm32(imm), regT0);
547 if (imm >= 0)
548 emitFastArithIntToImmNoCheck(regT0, regT0);
549 } else {
550 emitGetVirtualRegisters(op1, regT0, op2, regT1);
551 and64(regT1, regT0);
552 emitJumpSlowCaseIfNotImmediateInteger(regT0);
553 }
554 emitPutVirtualRegister(result);
555 }
556
557 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
558 {
559 linkSlowCase(iter);
560
561 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
562 slowPathCall.call();
563 }
564
565 void JIT::emit_op_inc(Instruction* currentInstruction)
566 {
567 int srcDst = currentInstruction[1].u.operand;
568
569 emitGetVirtualRegister(srcDst, regT0);
570 emitJumpSlowCaseIfNotImmediateInteger(regT0);
571 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
572 emitFastArithIntToImmNoCheck(regT0, regT0);
573 emitPutVirtualRegister(srcDst);
574 }
575
576 void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
577 {
578 linkSlowCase(iter);
579 linkSlowCase(iter);
580 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_inc);
581 slowPathCall.call();
582 }
583
584 void JIT::emit_op_dec(Instruction* currentInstruction)
585 {
586 int srcDst = currentInstruction[1].u.operand;
587
588 emitGetVirtualRegister(srcDst, regT0);
589 emitJumpSlowCaseIfNotImmediateInteger(regT0);
590 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
591 emitFastArithIntToImmNoCheck(regT0, regT0);
592 emitPutVirtualRegister(srcDst);
593 }
594
595 void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
596 {
597 linkSlowCase(iter);
598 linkSlowCase(iter);
599 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_dec);
600 slowPathCall.call();
601 }
602
603 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
604
605 #if CPU(X86) || CPU(X86_64)
606
607 void JIT::emit_op_mod(Instruction* currentInstruction)
608 {
609 int result = currentInstruction[1].u.operand;
610 int op1 = currentInstruction[2].u.operand;
611 int op2 = currentInstruction[3].u.operand;
612
613 // Make sure registers are correct for x86 IDIV instructions.
614 ASSERT(regT0 == X86Registers::eax);
615 ASSERT(regT1 == X86Registers::edx);
616 ASSERT(regT2 == X86Registers::ecx);
617
618 emitGetVirtualRegisters(op1, regT3, op2, regT2);
619 emitJumpSlowCaseIfNotImmediateInteger(regT3);
620 emitJumpSlowCaseIfNotImmediateInteger(regT2);
621
622 move(regT3, regT0);
623 addSlowCase(branchTest32(Zero, regT2));
624 Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
625 addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
626 denominatorNotNeg1.link(this);
627 m_assembler.cdq();
628 m_assembler.idivl_r(regT2);
629 Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
630 addSlowCase(branchTest32(Zero, regT1));
631 numeratorPositive.link(this);
632 emitFastArithReTagImmediate(regT1, regT0);
633 emitPutVirtualRegister(result);
634 }
635
636 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
637 {
638 linkSlowCase(iter);
639 linkSlowCase(iter);
640 linkSlowCase(iter);
641 linkSlowCase(iter);
642 linkSlowCase(iter);
643 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
644 slowPathCall.call();
645 }
646
647 #else // CPU(X86) || CPU(X86_64)
648
649 void JIT::emit_op_mod(Instruction* currentInstruction)
650 {
651 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
652 slowPathCall.call();
653 }
654
655 void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
656 {
657 UNREACHABLE_FOR_PLATFORM();
658 }
659
660 #endif // CPU(X86) || CPU(X86_64)
661
662 /* ------------------------------ END: OP_MOD ------------------------------ */
663
664 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
665
666 void JIT::compileBinaryArithOp(OpcodeID opcodeID, int, int op1, int op2, OperandTypes)
667 {
668 emitGetVirtualRegisters(op1, regT0, op2, regT1);
669 emitJumpSlowCaseIfNotImmediateInteger(regT0);
670 emitJumpSlowCaseIfNotImmediateInteger(regT1);
671 RareCaseProfile* profile = m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
672 if (opcodeID == op_add)
673 addSlowCase(branchAdd32(Overflow, regT1, regT0));
674 else if (opcodeID == op_sub)
675 addSlowCase(branchSub32(Overflow, regT1, regT0));
676 else {
677 ASSERT(opcodeID == op_mul);
678 if (shouldEmitProfiling()) {
679 // We want to be able to measure if this is taking the slow case just
680 // because of negative zero. If this produces positive zero, then we
681 // don't want the slow case to be taken because that will throw off
682 // speculative compilation.
683 move(regT0, regT2);
684 addSlowCase(branchMul32(Overflow, regT1, regT2));
685 JumpList done;
686 done.append(branchTest32(NonZero, regT2));
687 Jump negativeZero = branch32(LessThan, regT0, TrustedImm32(0));
688 done.append(branch32(GreaterThanOrEqual, regT1, TrustedImm32(0)));
689 negativeZero.link(this);
690 // We only get here if we have a genuine negative zero. Record this,
691 // so that the speculative JIT knows that we failed speculation
692 // because of a negative zero.
693 add32(TrustedImm32(1), AbsoluteAddress(&profile->m_counter));
694 addSlowCase(jump());
695 done.link(this);
696 move(regT2, regT0);
697 } else {
698 addSlowCase(branchMul32(Overflow, regT1, regT0));
699 addSlowCase(branchTest32(Zero, regT0));
700 }
701 }
702 emitFastArithIntToImmNoCheck(regT0, regT0);
703 }
704
705 void JIT::compileBinaryArithOpSlowCase(Instruction* currentInstruction, OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, int result, int op1, int op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
706 {
707 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
708 COMPILE_ASSERT(((TagTypeNumber + DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
709
710 Jump notImm1;
711 Jump notImm2;
712 if (op1HasImmediateIntFastCase) {
713 notImm2 = getSlowCase(iter);
714 } else if (op2HasImmediateIntFastCase) {
715 notImm1 = getSlowCase(iter);
716 } else {
717 notImm1 = getSlowCase(iter);
718 notImm2 = getSlowCase(iter);
719 }
720
721 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
722 if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
723 linkSlowCase(iter);
724
725 Label stubFunctionCall(this);
726
727 JITSlowPathCall slowPathCall(this, currentInstruction, opcodeID == op_add ? slow_path_add : opcodeID == op_sub ? slow_path_sub : slow_path_mul);
728 slowPathCall.call();
729 Jump end = jump();
730
731 if (op1HasImmediateIntFastCase) {
732 notImm2.link(this);
733 if (!types.second().definitelyIsNumber())
734 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
735 emitGetVirtualRegister(op1, regT1);
736 convertInt32ToDouble(regT1, fpRegT1);
737 add64(tagTypeNumberRegister, regT0);
738 move64ToDouble(regT0, fpRegT2);
739 } else if (op2HasImmediateIntFastCase) {
740 notImm1.link(this);
741 if (!types.first().definitelyIsNumber())
742 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
743 emitGetVirtualRegister(op2, regT1);
744 convertInt32ToDouble(regT1, fpRegT1);
745 add64(tagTypeNumberRegister, regT0);
746 move64ToDouble(regT0, fpRegT2);
747 } else {
748 // if we get here, eax is not an int32, edx not yet checked.
749 notImm1.link(this);
750 if (!types.first().definitelyIsNumber())
751 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
752 if (!types.second().definitelyIsNumber())
753 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
754 add64(tagTypeNumberRegister, regT0);
755 move64ToDouble(regT0, fpRegT1);
756 Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
757 convertInt32ToDouble(regT1, fpRegT2);
758 Jump op2wasInteger = jump();
759
760 // if we get here, eax IS an int32, edx is not.
761 notImm2.link(this);
762 if (!types.second().definitelyIsNumber())
763 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
764 convertInt32ToDouble(regT0, fpRegT1);
765 op2isDouble.link(this);
766 add64(tagTypeNumberRegister, regT1);
767 move64ToDouble(regT1, fpRegT2);
768 op2wasInteger.link(this);
769 }
770
771 if (opcodeID == op_add)
772 addDouble(fpRegT2, fpRegT1);
773 else if (opcodeID == op_sub)
774 subDouble(fpRegT2, fpRegT1);
775 else if (opcodeID == op_mul)
776 mulDouble(fpRegT2, fpRegT1);
777 else {
778 ASSERT(opcodeID == op_div);
779 divDouble(fpRegT2, fpRegT1);
780 }
781 moveDoubleTo64(fpRegT1, regT0);
782 sub64(tagTypeNumberRegister, regT0);
783 emitPutVirtualRegister(result, regT0);
784
785 end.link(this);
786 }
787
788 void JIT::emit_op_add(Instruction* currentInstruction)
789 {
790 int result = currentInstruction[1].u.operand;
791 int op1 = currentInstruction[2].u.operand;
792 int op2 = currentInstruction[3].u.operand;
793 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
794
795 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
796 addSlowCase();
797 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
798 slowPathCall.call();
799 return;
800 }
801
802 if (isOperandConstantImmediateInt(op1)) {
803 emitGetVirtualRegister(op2, regT0);
804 emitJumpSlowCaseIfNotImmediateInteger(regT0);
805 addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op1)), regT1));
806 emitFastArithIntToImmNoCheck(regT1, regT0);
807 } else if (isOperandConstantImmediateInt(op2)) {
808 emitGetVirtualRegister(op1, regT0);
809 emitJumpSlowCaseIfNotImmediateInteger(regT0);
810 addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op2)), regT1));
811 emitFastArithIntToImmNoCheck(regT1, regT0);
812 } else
813 compileBinaryArithOp(op_add, result, op1, op2, types);
814
815 emitPutVirtualRegister(result);
816 }
817
818 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
819 {
820 int result = currentInstruction[1].u.operand;
821 int op1 = currentInstruction[2].u.operand;
822 int op2 = currentInstruction[3].u.operand;
823 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
824
825 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
826 linkDummySlowCase(iter);
827 return;
828 }
829
830 bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
831 bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
832 compileBinaryArithOpSlowCase(currentInstruction, op_add, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
833 }
834
835 void JIT::emit_op_mul(Instruction* currentInstruction)
836 {
837 int result = currentInstruction[1].u.operand;
838 int op1 = currentInstruction[2].u.operand;
839 int op2 = currentInstruction[3].u.operand;
840 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
841
842 // For now, only plant a fast int case if the constant operand is greater than zero.
843 int32_t value;
844 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
845 // Add a special fast case profile because the DFG JIT will expect one.
846 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
847 emitGetVirtualRegister(op2, regT0);
848 emitJumpSlowCaseIfNotImmediateInteger(regT0);
849 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
850 emitFastArithReTagImmediate(regT1, regT0);
851 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
852 // Add a special fast case profile because the DFG JIT will expect one.
853 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
854 emitGetVirtualRegister(op1, regT0);
855 emitJumpSlowCaseIfNotImmediateInteger(regT0);
856 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
857 emitFastArithReTagImmediate(regT1, regT0);
858 } else
859 compileBinaryArithOp(op_mul, result, op1, op2, types);
860
861 emitPutVirtualRegister(result);
862 }
863
864 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
865 {
866 int result = currentInstruction[1].u.operand;
867 int op1 = currentInstruction[2].u.operand;
868 int op2 = currentInstruction[3].u.operand;
869 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
870
871 bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
872 bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
873 compileBinaryArithOpSlowCase(currentInstruction, op_mul, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
874 }
875
876 void JIT::emit_op_div(Instruction* currentInstruction)
877 {
878 int dst = currentInstruction[1].u.operand;
879 int op1 = currentInstruction[2].u.operand;
880 int op2 = currentInstruction[3].u.operand;
881 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
882
883 if (isOperandConstantImmediateDouble(op1)) {
884 emitGetVirtualRegister(op1, regT0);
885 add64(tagTypeNumberRegister, regT0);
886 move64ToDouble(regT0, fpRegT0);
887 } else if (isOperandConstantImmediateInt(op1)) {
888 emitLoadInt32ToDouble(op1, fpRegT0);
889 } else {
890 emitGetVirtualRegister(op1, regT0);
891 if (!types.first().definitelyIsNumber())
892 emitJumpSlowCaseIfNotImmediateNumber(regT0);
893 Jump notInt = emitJumpIfNotImmediateInteger(regT0);
894 convertInt32ToDouble(regT0, fpRegT0);
895 Jump skipDoubleLoad = jump();
896 notInt.link(this);
897 add64(tagTypeNumberRegister, regT0);
898 move64ToDouble(regT0, fpRegT0);
899 skipDoubleLoad.link(this);
900 }
901
902 if (isOperandConstantImmediateDouble(op2)) {
903 emitGetVirtualRegister(op2, regT1);
904 add64(tagTypeNumberRegister, regT1);
905 move64ToDouble(regT1, fpRegT1);
906 } else if (isOperandConstantImmediateInt(op2)) {
907 emitLoadInt32ToDouble(op2, fpRegT1);
908 } else {
909 emitGetVirtualRegister(op2, regT1);
910 if (!types.second().definitelyIsNumber())
911 emitJumpSlowCaseIfNotImmediateNumber(regT1);
912 Jump notInt = emitJumpIfNotImmediateInteger(regT1);
913 convertInt32ToDouble(regT1, fpRegT1);
914 Jump skipDoubleLoad = jump();
915 notInt.link(this);
916 add64(tagTypeNumberRegister, regT1);
917 move64ToDouble(regT1, fpRegT1);
918 skipDoubleLoad.link(this);
919 }
920 divDouble(fpRegT1, fpRegT0);
921
922 // Is the result actually an integer? The DFG JIT would really like to know. If it's
923 // not an integer, we increment a count. If this together with the slow case counter
924 // are below threshold then the DFG JIT will compile this division with a specualtion
925 // that the remainder is zero.
926
927 // As well, there are cases where a double result here would cause an important field
928 // in the heap to sometimes have doubles in it, resulting in double predictions getting
929 // propagated to a use site where it might cause damage (such as the index to an array
930 // access). So if we are DFG compiling anything in the program, we want this code to
931 // ensure that it produces integers whenever possible.
932
933 JumpList notInteger;
934 branchConvertDoubleToInt32(fpRegT0, regT0, notInteger, fpRegT1);
935 // If we've got an integer, we might as well make that the result of the division.
936 emitFastArithReTagImmediate(regT0, regT0);
937 Jump isInteger = jump();
938 notInteger.link(this);
939 moveDoubleTo64(fpRegT0, regT0);
940 Jump doubleZero = branchTest64(Zero, regT0);
941 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
942 sub64(tagTypeNumberRegister, regT0);
943 Jump trueDouble = jump();
944 doubleZero.link(this);
945 move(tagTypeNumberRegister, regT0);
946 trueDouble.link(this);
947 isInteger.link(this);
948
949 emitPutVirtualRegister(dst, regT0);
950 }
951
952 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
953 {
954 int op1 = currentInstruction[2].u.operand;
955 int op2 = currentInstruction[3].u.operand;
956 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
957 if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
958 if (!ASSERT_DISABLED)
959 abortWithReason(JITDivOperandsAreNotNumbers);
960 return;
961 }
962 if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
963 if (!types.first().definitelyIsNumber())
964 linkSlowCase(iter);
965 }
966 if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
967 if (!types.second().definitelyIsNumber())
968 linkSlowCase(iter);
969 }
970 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
971 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
972 slowPathCall.call();
973 }
974
975 void JIT::emit_op_sub(Instruction* currentInstruction)
976 {
977 int result = currentInstruction[1].u.operand;
978 int op1 = currentInstruction[2].u.operand;
979 int op2 = currentInstruction[3].u.operand;
980 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
981
982 compileBinaryArithOp(op_sub, result, op1, op2, types);
983 emitPutVirtualRegister(result);
984 }
985
986 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
987 {
988 int result = currentInstruction[1].u.operand;
989 int op1 = currentInstruction[2].u.operand;
990 int op2 = currentInstruction[3].u.operand;
991 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
992
993 compileBinaryArithOpSlowCase(currentInstruction, op_sub, iter, result, op1, op2, types, false, false);
994 }
995
996 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
997
998 #endif // USE(JSVALUE64)
999
1000 } // namespace JSC
1001
1002 #endif // ENABLE(JIT)