]> git.saurik.com Git - apple/javascriptcore.git/blame - jit/JITArithmetic32_64.cpp
JavaScriptCore-621.1.tar.gz
[apple/javascriptcore.git] / jit / JITArithmetic32_64.cpp
CommitLineData
4e4e5a6f
A
1/*
2* Copyright (C) 2008 Apple Inc. All rights reserved.
3*
4* Redistribution and use in source and binary forms, with or without
5* modification, are permitted provided that the following conditions
6* are met:
7* 1. Redistributions of source code must retain the above copyright
8* notice, this list of conditions and the following disclaimer.
9* 2. Redistributions in binary form must reproduce the above copyright
10* notice, this list of conditions and the following disclaimer in the
11* documentation and/or other materials provided with the distribution.
12*
13* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24*/
25
26#include "config.h"
27#include "JIT.h"
28
29#if ENABLE(JIT)
30
31#include "CodeBlock.h"
32#include "JITInlineMethods.h"
33#include "JITStubCall.h"
34#include "JITStubs.h"
35#include "JSArray.h"
36#include "JSFunction.h"
37#include "Interpreter.h"
38#include "ResultType.h"
39#include "SamplingTool.h"
40
41#ifndef NDEBUG
42#include <stdio.h>
43#endif
44
45using namespace std;
46
47namespace JSC {
48
49#if USE(JSVALUE32_64)
50
51void JIT::emit_op_negate(Instruction* currentInstruction)
52{
53 unsigned dst = currentInstruction[1].u.operand;
54 unsigned src = currentInstruction[2].u.operand;
55
56 emitLoad(src, regT1, regT0);
57
58 Jump srcNotInt = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
59 addSlowCase(branchTest32(Zero, regT0, Imm32(0x7fffffff)));
60 neg32(regT0);
61 emitStoreInt32(dst, regT0, (dst == src));
62
63 Jump end = jump();
64
65 srcNotInt.link(this);
66 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
67
68 xor32(Imm32(1 << 31), regT1);
69 store32(regT1, tagFor(dst));
70 if (dst != src)
71 store32(regT0, payloadFor(dst));
72
73 end.link(this);
74}
75
76void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
77{
78 unsigned dst = currentInstruction[1].u.operand;
79
80 linkSlowCase(iter); // 0x7fffffff check
81 linkSlowCase(iter); // double check
82
83 JITStubCall stubCall(this, cti_op_negate);
84 stubCall.addArgument(regT1, regT0);
85 stubCall.call(dst);
86}
87
88void JIT::emit_op_jnless(Instruction* currentInstruction)
89{
90 unsigned op1 = currentInstruction[1].u.operand;
91 unsigned op2 = currentInstruction[2].u.operand;
92 unsigned target = currentInstruction[3].u.operand;
93
94 JumpList notInt32Op1;
95 JumpList notInt32Op2;
96
97 // Character less.
98 if (isOperandConstantImmediateChar(op1)) {
99 emitLoad(op2, regT1, regT0);
100 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
101 JumpList failures;
102 emitLoadCharacterString(regT0, regT0, failures);
103 addSlowCase(failures);
104 addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
105 return;
106 }
107 if (isOperandConstantImmediateChar(op2)) {
108 emitLoad(op1, regT1, regT0);
109 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
110 JumpList failures;
111 emitLoadCharacterString(regT0, regT0, failures);
112 addSlowCase(failures);
113 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
114 return;
115 }
116 if (isOperandConstantImmediateInt(op1)) {
117 // Int32 less.
118 emitLoad(op2, regT3, regT2);
119 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
120 addJump(branch32(LessThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
121 } else if (isOperandConstantImmediateInt(op2)) {
122 emitLoad(op1, regT1, regT0);
123 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
124 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
125 } else {
126 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
127 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
128 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
129 addJump(branch32(GreaterThanOrEqual, regT0, regT2), target);
130 }
131
132 if (!supportsFloatingPoint()) {
133 addSlowCase(notInt32Op1);
134 addSlowCase(notInt32Op2);
135 return;
136 }
137 Jump end = jump();
138
139 // Double less.
140 emitBinaryDoubleOp(op_jnless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
141 end.link(this);
142}
143
144void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
145{
146 unsigned op1 = currentInstruction[1].u.operand;
147 unsigned op2 = currentInstruction[2].u.operand;
148 unsigned target = currentInstruction[3].u.operand;
149
150 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
151 linkSlowCase(iter);
152 linkSlowCase(iter);
153 linkSlowCase(iter);
154 linkSlowCase(iter);
155 } else {
156 if (!supportsFloatingPoint()) {
157 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
158 linkSlowCase(iter); // int32 check
159 linkSlowCase(iter); // int32 check
160 } else {
161 if (!isOperandConstantImmediateInt(op1)) {
162 linkSlowCase(iter); // double check
163 linkSlowCase(iter); // int32 check
164 }
165 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
166 linkSlowCase(iter); // double check
167 }
168 }
169
170 JITStubCall stubCall(this, cti_op_jless);
171 stubCall.addArgument(op1);
172 stubCall.addArgument(op2);
173 stubCall.call();
174 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
175}
176
177void JIT::emit_op_jless(Instruction* currentInstruction)
178{
179 unsigned op1 = currentInstruction[1].u.operand;
180 unsigned op2 = currentInstruction[2].u.operand;
181 unsigned target = currentInstruction[3].u.operand;
182
183 JumpList notInt32Op1;
184 JumpList notInt32Op2;
185
186 // Character less.
187 if (isOperandConstantImmediateChar(op1)) {
188 emitLoad(op2, regT1, regT0);
189 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
190 JumpList failures;
191 emitLoadCharacterString(regT0, regT0, failures);
192 addSlowCase(failures);
193 addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
194 return;
195 }
196 if (isOperandConstantImmediateChar(op2)) {
197 emitLoad(op1, regT1, regT0);
198 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
199 JumpList failures;
200 emitLoadCharacterString(regT0, regT0, failures);
201 addSlowCase(failures);
202 addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
203 return;
204 }
205 if (isOperandConstantImmediateInt(op1)) {
206 emitLoad(op2, regT3, regT2);
207 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
208 addJump(branch32(GreaterThan, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
209 } else if (isOperandConstantImmediateInt(op2)) {
210 emitLoad(op1, regT1, regT0);
211 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
212 addJump(branch32(LessThan, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
213 } else {
214 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
215 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
216 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
217 addJump(branch32(LessThan, regT0, regT2), target);
218 }
219
220 if (!supportsFloatingPoint()) {
221 addSlowCase(notInt32Op1);
222 addSlowCase(notInt32Op2);
223 return;
224 }
225 Jump end = jump();
226
227 // Double less.
228 emitBinaryDoubleOp(op_jless, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
229 end.link(this);
230}
231
232void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
233{
234 unsigned op1 = currentInstruction[1].u.operand;
235 unsigned op2 = currentInstruction[2].u.operand;
236 unsigned target = currentInstruction[3].u.operand;
237
238 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
239 linkSlowCase(iter);
240 linkSlowCase(iter);
241 linkSlowCase(iter);
242 linkSlowCase(iter);
243 } else {
244 if (!supportsFloatingPoint()) {
245 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
246 linkSlowCase(iter); // int32 check
247 linkSlowCase(iter); // int32 check
248 } else {
249 if (!isOperandConstantImmediateInt(op1)) {
250 linkSlowCase(iter); // double check
251 linkSlowCase(iter); // int32 check
252 }
253 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
254 linkSlowCase(iter); // double check
255 }
256 }
257 JITStubCall stubCall(this, cti_op_jless);
258 stubCall.addArgument(op1);
259 stubCall.addArgument(op2);
260 stubCall.call();
261 emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
262}
263
264void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
265{
266 unsigned op1 = currentInstruction[1].u.operand;
267 unsigned op2 = currentInstruction[2].u.operand;
268 unsigned target = currentInstruction[3].u.operand;
269
270 JumpList notInt32Op1;
271 JumpList notInt32Op2;
272
273 // Character less.
274 if (isOperandConstantImmediateChar(op1)) {
275 emitLoad(op2, regT1, regT0);
276 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
277 JumpList failures;
278 emitLoadCharacterString(regT0, regT0, failures);
279 addSlowCase(failures);
280 addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
281 return;
282 }
283 if (isOperandConstantImmediateChar(op2)) {
284 emitLoad(op1, regT1, regT0);
285 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::CellTag)));
286 JumpList failures;
287 emitLoadCharacterString(regT0, regT0, failures);
288 addSlowCase(failures);
289 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
290 return;
291 }
292 if (isOperandConstantImmediateInt(op1)) {
293 emitLoad(op2, regT3, regT2);
294 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
295 addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT2, Imm32(getConstantOperand(op1).asInt32())), target);
296 } else if (isOperandConstantImmediateInt(op2)) {
297 emitLoad(op1, regT1, regT0);
298 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
299 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
300 } else {
301 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
302 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
303 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
304 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT2), target);
305 }
306
307 if (!supportsFloatingPoint()) {
308 addSlowCase(notInt32Op1);
309 addSlowCase(notInt32Op2);
310 return;
311 }
312 Jump end = jump();
313
314 // Double less.
315 emitBinaryDoubleOp(invert ? op_jnlesseq : op_jlesseq, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
316 end.link(this);
317}
318
319void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert)
320{
321 unsigned op1 = currentInstruction[1].u.operand;
322 unsigned op2 = currentInstruction[2].u.operand;
323 unsigned target = currentInstruction[3].u.operand;
324
325 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
326 linkSlowCase(iter);
327 linkSlowCase(iter);
328 linkSlowCase(iter);
329 linkSlowCase(iter);
330 } else {
331 if (!supportsFloatingPoint()) {
332 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
333 linkSlowCase(iter); // int32 check
334 linkSlowCase(iter); // int32 check
335 } else {
336 if (!isOperandConstantImmediateInt(op1)) {
337 linkSlowCase(iter); // double check
338 linkSlowCase(iter); // int32 check
339 }
340 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
341 linkSlowCase(iter); // double check
342 }
343 }
344
345 JITStubCall stubCall(this, cti_op_jlesseq);
346 stubCall.addArgument(op1);
347 stubCall.addArgument(op2);
348 stubCall.call();
349 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
350}
351
352void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
353{
354 emit_op_jlesseq(currentInstruction, true);
355}
356
357void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
358{
359 emitSlow_op_jlesseq(currentInstruction, iter, true);
360}
361
362// LeftShift (<<)
363
364void JIT::emit_op_lshift(Instruction* currentInstruction)
365{
366 unsigned dst = currentInstruction[1].u.operand;
367 unsigned op1 = currentInstruction[2].u.operand;
368 unsigned op2 = currentInstruction[3].u.operand;
369
370 if (isOperandConstantImmediateInt(op2)) {
371 emitLoad(op1, regT1, regT0);
372 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
373 lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
374 emitStoreInt32(dst, regT0, dst == op1);
375 return;
376 }
377
378 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
379 if (!isOperandConstantImmediateInt(op1))
380 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
381 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
382 lshift32(regT2, regT0);
383 emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
384}
385
386void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
387{
388 unsigned dst = currentInstruction[1].u.operand;
389 unsigned op1 = currentInstruction[2].u.operand;
390 unsigned op2 = currentInstruction[3].u.operand;
391
392 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
393 linkSlowCase(iter); // int32 check
394 linkSlowCase(iter); // int32 check
395
396 JITStubCall stubCall(this, cti_op_lshift);
397 stubCall.addArgument(op1);
398 stubCall.addArgument(op2);
399 stubCall.call(dst);
400}
401
402// RightShift (>>) and UnsignedRightShift (>>>) helper
403
404void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
405{
406 unsigned dst = currentInstruction[1].u.operand;
407 unsigned op1 = currentInstruction[2].u.operand;
408 unsigned op2 = currentInstruction[3].u.operand;
409
410 // Slow case of rshift makes assumptions about what registers hold the
411 // shift arguments, so any changes must be updated there as well.
412 if (isOperandConstantImmediateInt(op2)) {
413 emitLoad(op1, regT1, regT0);
414 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
415 int shift = getConstantOperand(op2).asInt32();
416 if (isUnsigned) {
417 if (shift)
418 urshift32(Imm32(shift & 0x1f), regT0);
419 // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
420 // a toUint conversion, which can result in a value we can represent
421 // as an immediate int.
422 if (shift < 0 || !(shift & 31))
423 addSlowCase(branch32(LessThan, regT0, Imm32(0)));
424 } else if (shift) { // signed right shift by zero is simply toInt conversion
425 rshift32(Imm32(shift & 0x1f), regT0);
426 }
427 emitStoreInt32(dst, regT0, dst == op1);
428 return;
429 }
430
431 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
432 if (!isOperandConstantImmediateInt(op1))
433 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
434 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
435 if (isUnsigned) {
436 urshift32(regT2, regT0);
437 addSlowCase(branch32(LessThan, regT0, Imm32(0)));
438 } else
439 rshift32(regT2, regT0);
440 emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
441}
442
443void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
444{
445 unsigned dst = currentInstruction[1].u.operand;
446 unsigned op1 = currentInstruction[2].u.operand;
447 unsigned op2 = currentInstruction[3].u.operand;
448 if (isOperandConstantImmediateInt(op2)) {
449 int shift = getConstantOperand(op2).asInt32();
450 // op1 = regT1:regT0
451 linkSlowCase(iter); // int32 check
452 if (supportsFloatingPointTruncate()) {
453 JumpList failures;
454 failures.append(branch32(AboveOrEqual, regT1, Imm32(JSValue::LowestTag)));
455 emitLoadDouble(op1, fpRegT0);
456 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
457 if (isUnsigned) {
458 if (shift)
459 urshift32(Imm32(shift & 0x1f), regT0);
460 if (shift < 0 || !(shift & 31))
461 failures.append(branch32(LessThan, regT0, Imm32(0)));
462 } else if (shift)
463 rshift32(Imm32(shift & 0x1f), regT0);
464 emitStoreInt32(dst, regT0, false);
465 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
466 failures.link(this);
467 }
468 if (isUnsigned && (shift < 0 || !(shift & 31)))
469 linkSlowCase(iter); // failed to box in hot path
470 } else {
471 // op1 = regT1:regT0
472 // op2 = regT3:regT2
473 if (!isOperandConstantImmediateInt(op1)) {
474 linkSlowCase(iter); // int32 check -- op1 is not an int
475 if (supportsFloatingPointTruncate()) {
476 Jump notDouble = branch32(Above, regT1, Imm32(JSValue::LowestTag)); // op1 is not a double
477 emitLoadDouble(op1, fpRegT0);
478 Jump notInt = branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)); // op2 is not an int
479 Jump cantTruncate = branchTruncateDoubleToInt32(fpRegT0, regT0);
480 if (isUnsigned)
481 urshift32(regT2, regT0);
482 else
483 rshift32(regT2, regT0);
484 emitStoreInt32(dst, regT0, false);
485 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
486 notDouble.link(this);
487 notInt.link(this);
488 cantTruncate.link(this);
489 }
490 }
491
492 linkSlowCase(iter); // int32 check - op2 is not an int
493 if (isUnsigned)
494 linkSlowCase(iter); // Can't represent unsigned result as an immediate
495 }
496
497 JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
498 stubCall.addArgument(op1);
499 stubCall.addArgument(op2);
500 stubCall.call(dst);
501}
502
503// RightShift (>>)
504
505void JIT::emit_op_rshift(Instruction* currentInstruction)
506{
507 emitRightShift(currentInstruction, false);
508}
509
510void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
511{
512 emitRightShiftSlowCase(currentInstruction, iter, false);
513}
514
515// UnsignedRightShift (>>>)
516
517void JIT::emit_op_urshift(Instruction* currentInstruction)
518{
519 emitRightShift(currentInstruction, true);
520}
521
522void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
523{
524 emitRightShiftSlowCase(currentInstruction, iter, true);
525}
526
527// BitAnd (&)
528
529void JIT::emit_op_bitand(Instruction* currentInstruction)
530{
531 unsigned dst = currentInstruction[1].u.operand;
532 unsigned op1 = currentInstruction[2].u.operand;
533 unsigned op2 = currentInstruction[3].u.operand;
534
535 unsigned op;
536 int32_t constant;
537 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
538 emitLoad(op, regT1, regT0);
539 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
540 and32(Imm32(constant), regT0);
541 emitStoreInt32(dst, regT0, (op == dst));
542 return;
543 }
544
545 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
546 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
547 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
548 and32(regT2, regT0);
549 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
550}
551
552void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
553{
554 unsigned dst = currentInstruction[1].u.operand;
555 unsigned op1 = currentInstruction[2].u.operand;
556 unsigned op2 = currentInstruction[3].u.operand;
557
558 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
559 linkSlowCase(iter); // int32 check
560 linkSlowCase(iter); // int32 check
561
562 JITStubCall stubCall(this, cti_op_bitand);
563 stubCall.addArgument(op1);
564 stubCall.addArgument(op2);
565 stubCall.call(dst);
566}
567
568// BitOr (|)
569
570void JIT::emit_op_bitor(Instruction* currentInstruction)
571{
572 unsigned dst = currentInstruction[1].u.operand;
573 unsigned op1 = currentInstruction[2].u.operand;
574 unsigned op2 = currentInstruction[3].u.operand;
575
576 unsigned op;
577 int32_t constant;
578 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
579 emitLoad(op, regT1, regT0);
580 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
581 or32(Imm32(constant), regT0);
582 emitStoreInt32(dst, regT0, (op == dst));
583 return;
584 }
585
586 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
587 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
588 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
589 or32(regT2, regT0);
590 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
591}
592
593void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
594{
595 unsigned dst = currentInstruction[1].u.operand;
596 unsigned op1 = currentInstruction[2].u.operand;
597 unsigned op2 = currentInstruction[3].u.operand;
598
599 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
600 linkSlowCase(iter); // int32 check
601 linkSlowCase(iter); // int32 check
602
603 JITStubCall stubCall(this, cti_op_bitor);
604 stubCall.addArgument(op1);
605 stubCall.addArgument(op2);
606 stubCall.call(dst);
607}
608
609// BitXor (^)
610
611void JIT::emit_op_bitxor(Instruction* currentInstruction)
612{
613 unsigned dst = currentInstruction[1].u.operand;
614 unsigned op1 = currentInstruction[2].u.operand;
615 unsigned op2 = currentInstruction[3].u.operand;
616
617 unsigned op;
618 int32_t constant;
619 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
620 emitLoad(op, regT1, regT0);
621 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
622 xor32(Imm32(constant), regT0);
623 emitStoreInt32(dst, regT0, (op == dst));
624 return;
625 }
626
627 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
628 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
629 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
630 xor32(regT2, regT0);
631 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
632}
633
634void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
635{
636 unsigned dst = currentInstruction[1].u.operand;
637 unsigned op1 = currentInstruction[2].u.operand;
638 unsigned op2 = currentInstruction[3].u.operand;
639
640 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
641 linkSlowCase(iter); // int32 check
642 linkSlowCase(iter); // int32 check
643
644 JITStubCall stubCall(this, cti_op_bitxor);
645 stubCall.addArgument(op1);
646 stubCall.addArgument(op2);
647 stubCall.call(dst);
648}
649
650// BitNot (~)
651
652void JIT::emit_op_bitnot(Instruction* currentInstruction)
653{
654 unsigned dst = currentInstruction[1].u.operand;
655 unsigned src = currentInstruction[2].u.operand;
656
657 emitLoad(src, regT1, regT0);
658 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
659
660 not32(regT0);
661 emitStoreInt32(dst, regT0, (dst == src));
662}
663
664void JIT::emitSlow_op_bitnot(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
665{
666 unsigned dst = currentInstruction[1].u.operand;
667
668 linkSlowCase(iter); // int32 check
669
670 JITStubCall stubCall(this, cti_op_bitnot);
671 stubCall.addArgument(regT1, regT0);
672 stubCall.call(dst);
673}
674
675// PostInc (i++)
676
677void JIT::emit_op_post_inc(Instruction* currentInstruction)
678{
679 unsigned dst = currentInstruction[1].u.operand;
680 unsigned srcDst = currentInstruction[2].u.operand;
681
682 emitLoad(srcDst, regT1, regT0);
683 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
684
685 if (dst == srcDst) // x = x++ is a noop for ints.
686 return;
687
688 emitStoreInt32(dst, regT0);
689
690 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
691 emitStoreInt32(srcDst, regT0, true);
692}
693
694void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
695{
696 unsigned dst = currentInstruction[1].u.operand;
697 unsigned srcDst = currentInstruction[2].u.operand;
698
699 linkSlowCase(iter); // int32 check
700 if (dst != srcDst)
701 linkSlowCase(iter); // overflow check
702
703 JITStubCall stubCall(this, cti_op_post_inc);
704 stubCall.addArgument(srcDst);
705 stubCall.addArgument(Imm32(srcDst));
706 stubCall.call(dst);
707}
708
709// PostDec (i--)
710
711void JIT::emit_op_post_dec(Instruction* currentInstruction)
712{
713 unsigned dst = currentInstruction[1].u.operand;
714 unsigned srcDst = currentInstruction[2].u.operand;
715
716 emitLoad(srcDst, regT1, regT0);
717 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
718
719 if (dst == srcDst) // x = x-- is a noop for ints.
720 return;
721
722 emitStoreInt32(dst, regT0);
723
724 addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
725 emitStoreInt32(srcDst, regT0, true);
726}
727
728void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
729{
730 unsigned dst = currentInstruction[1].u.operand;
731 unsigned srcDst = currentInstruction[2].u.operand;
732
733 linkSlowCase(iter); // int32 check
734 if (dst != srcDst)
735 linkSlowCase(iter); // overflow check
736
737 JITStubCall stubCall(this, cti_op_post_dec);
738 stubCall.addArgument(srcDst);
739 stubCall.addArgument(Imm32(srcDst));
740 stubCall.call(dst);
741}
742
743// PreInc (++i)
744
745void JIT::emit_op_pre_inc(Instruction* currentInstruction)
746{
747 unsigned srcDst = currentInstruction[1].u.operand;
748
749 emitLoad(srcDst, regT1, regT0);
750
751 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
752 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
753 emitStoreInt32(srcDst, regT0, true);
754}
755
756void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
757{
758 unsigned srcDst = currentInstruction[1].u.operand;
759
760 linkSlowCase(iter); // int32 check
761 linkSlowCase(iter); // overflow check
762
763 JITStubCall stubCall(this, cti_op_pre_inc);
764 stubCall.addArgument(srcDst);
765 stubCall.call(srcDst);
766}
767
768// PreDec (--i)
769
770void JIT::emit_op_pre_dec(Instruction* currentInstruction)
771{
772 unsigned srcDst = currentInstruction[1].u.operand;
773
774 emitLoad(srcDst, regT1, regT0);
775
776 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
777 addSlowCase(branchSub32(Overflow, Imm32(1), regT0));
778 emitStoreInt32(srcDst, regT0, true);
779}
780
781void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
782{
783 unsigned srcDst = currentInstruction[1].u.operand;
784
785 linkSlowCase(iter); // int32 check
786 linkSlowCase(iter); // overflow check
787
788 JITStubCall stubCall(this, cti_op_pre_dec);
789 stubCall.addArgument(srcDst);
790 stubCall.call(srcDst);
791}
792
793// Addition (+)
794
795void JIT::emit_op_add(Instruction* currentInstruction)
796{
797 unsigned dst = currentInstruction[1].u.operand;
798 unsigned op1 = currentInstruction[2].u.operand;
799 unsigned op2 = currentInstruction[3].u.operand;
800 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
801
802 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
803 JITStubCall stubCall(this, cti_op_add);
804 stubCall.addArgument(op1);
805 stubCall.addArgument(op2);
806 stubCall.call(dst);
807 return;
808 }
809
810 JumpList notInt32Op1;
811 JumpList notInt32Op2;
812
813 unsigned op;
814 int32_t constant;
815 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
816 emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
817 return;
818 }
819
820 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
821 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
822 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
823
824 // Int32 case.
825 addSlowCase(branchAdd32(Overflow, regT2, regT0));
826 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
827
828 if (!supportsFloatingPoint()) {
829 addSlowCase(notInt32Op1);
830 addSlowCase(notInt32Op2);
831 return;
832 }
833 Jump end = jump();
834
835 // Double case.
836 emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
837 end.link(this);
838}
839
840void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
841{
842 // Int32 case.
843 emitLoad(op, regT1, regT0);
844 Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
845 addSlowCase(branchAdd32(Overflow, Imm32(constant), regT0));
846 emitStoreInt32(dst, regT0, (op == dst));
847
848 // Double case.
849 if (!supportsFloatingPoint()) {
850 addSlowCase(notInt32);
851 return;
852 }
853 Jump end = jump();
854
855 notInt32.link(this);
856 if (!opType.definitelyIsNumber())
857 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
858 move(Imm32(constant), regT2);
859 convertInt32ToDouble(regT2, fpRegT0);
860 emitLoadDouble(op, fpRegT1);
861 addDouble(fpRegT1, fpRegT0);
862 emitStoreDouble(dst, fpRegT0);
863
864 end.link(this);
865}
866
867void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
868{
869 unsigned dst = currentInstruction[1].u.operand;
870 unsigned op1 = currentInstruction[2].u.operand;
871 unsigned op2 = currentInstruction[3].u.operand;
872 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
873
874 if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
875 return;
876
877 unsigned op;
878 int32_t constant;
879 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
880 linkSlowCase(iter); // overflow check
881
882 if (!supportsFloatingPoint())
883 linkSlowCase(iter); // non-sse case
884 else {
885 ResultType opType = op == op1 ? types.first() : types.second();
886 if (!opType.definitelyIsNumber())
887 linkSlowCase(iter); // double check
888 }
889 } else {
890 linkSlowCase(iter); // overflow check
891
892 if (!supportsFloatingPoint()) {
893 linkSlowCase(iter); // int32 check
894 linkSlowCase(iter); // int32 check
895 } else {
896 if (!types.first().definitelyIsNumber())
897 linkSlowCase(iter); // double check
898
899 if (!types.second().definitelyIsNumber()) {
900 linkSlowCase(iter); // int32 check
901 linkSlowCase(iter); // double check
902 }
903 }
904 }
905
906 JITStubCall stubCall(this, cti_op_add);
907 stubCall.addArgument(op1);
908 stubCall.addArgument(op2);
909 stubCall.call(dst);
910}
911
912// Subtraction (-)
913
914void JIT::emit_op_sub(Instruction* currentInstruction)
915{
916 unsigned dst = currentInstruction[1].u.operand;
917 unsigned op1 = currentInstruction[2].u.operand;
918 unsigned op2 = currentInstruction[3].u.operand;
919 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
920
921 JumpList notInt32Op1;
922 JumpList notInt32Op2;
923
924 if (isOperandConstantImmediateInt(op2)) {
925 emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
926 return;
927 }
928
929 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
930 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
931 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
932
933 // Int32 case.
934 addSlowCase(branchSub32(Overflow, regT2, regT0));
935 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
936
937 if (!supportsFloatingPoint()) {
938 addSlowCase(notInt32Op1);
939 addSlowCase(notInt32Op2);
940 return;
941 }
942 Jump end = jump();
943
944 // Double case.
945 emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
946 end.link(this);
947}
948
949void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
950{
951 // Int32 case.
952 emitLoad(op, regT1, regT0);
953 Jump notInt32 = branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag));
954 addSlowCase(branchSub32(Overflow, Imm32(constant), regT0));
955 emitStoreInt32(dst, regT0, (op == dst));
956
957 // Double case.
958 if (!supportsFloatingPoint()) {
959 addSlowCase(notInt32);
960 return;
961 }
962 Jump end = jump();
963
964 notInt32.link(this);
965 if (!opType.definitelyIsNumber())
966 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
967 move(Imm32(constant), regT2);
968 convertInt32ToDouble(regT2, fpRegT0);
969 emitLoadDouble(op, fpRegT1);
970 subDouble(fpRegT0, fpRegT1);
971 emitStoreDouble(dst, fpRegT1);
972
973 end.link(this);
974}
975
976void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
977{
978 unsigned dst = currentInstruction[1].u.operand;
979 unsigned op1 = currentInstruction[2].u.operand;
980 unsigned op2 = currentInstruction[3].u.operand;
981 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
982
983 if (isOperandConstantImmediateInt(op2)) {
984 linkSlowCase(iter); // overflow check
985
986 if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
987 linkSlowCase(iter); // int32 or double check
988 } else {
989 linkSlowCase(iter); // overflow check
990
991 if (!supportsFloatingPoint()) {
992 linkSlowCase(iter); // int32 check
993 linkSlowCase(iter); // int32 check
994 } else {
995 if (!types.first().definitelyIsNumber())
996 linkSlowCase(iter); // double check
997
998 if (!types.second().definitelyIsNumber()) {
999 linkSlowCase(iter); // int32 check
1000 linkSlowCase(iter); // double check
1001 }
1002 }
1003 }
1004
1005 JITStubCall stubCall(this, cti_op_sub);
1006 stubCall.addArgument(op1);
1007 stubCall.addArgument(op2);
1008 stubCall.call(dst);
1009}
1010
1011void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
1012{
1013 JumpList end;
1014
1015 if (!notInt32Op1.empty()) {
1016 // Double case 1: Op1 is not int32; Op2 is unknown.
1017 notInt32Op1.link(this);
1018
1019 ASSERT(op1IsInRegisters);
1020
1021 // Verify Op1 is double.
1022 if (!types.first().definitelyIsNumber())
1023 addSlowCase(branch32(Above, regT1, Imm32(JSValue::LowestTag)));
1024
1025 if (!op2IsInRegisters)
1026 emitLoad(op2, regT3, regT2);
1027
1028 Jump doubleOp2 = branch32(Below, regT3, Imm32(JSValue::LowestTag));
1029
1030 if (!types.second().definitelyIsNumber())
1031 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
1032
1033 convertInt32ToDouble(regT2, fpRegT0);
1034 Jump doTheMath = jump();
1035
1036 // Load Op2 as double into double register.
1037 doubleOp2.link(this);
1038 emitLoadDouble(op2, fpRegT0);
1039
1040 // Do the math.
1041 doTheMath.link(this);
1042 switch (opcodeID) {
1043 case op_mul:
1044 emitLoadDouble(op1, fpRegT2);
1045 mulDouble(fpRegT2, fpRegT0);
1046 emitStoreDouble(dst, fpRegT0);
1047 break;
1048 case op_add:
1049 emitLoadDouble(op1, fpRegT2);
1050 addDouble(fpRegT2, fpRegT0);
1051 emitStoreDouble(dst, fpRegT0);
1052 break;
1053 case op_sub:
1054 emitLoadDouble(op1, fpRegT1);
1055 subDouble(fpRegT0, fpRegT1);
1056 emitStoreDouble(dst, fpRegT1);
1057 break;
1058 case op_div:
1059 emitLoadDouble(op1, fpRegT1);
1060 divDouble(fpRegT0, fpRegT1);
1061 emitStoreDouble(dst, fpRegT1);
1062 break;
1063 case op_jnless:
1064 emitLoadDouble(op1, fpRegT2);
1065 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
1066 break;
1067 case op_jless:
1068 emitLoadDouble(op1, fpRegT2);
1069 addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
1070 break;
1071 case op_jlesseq:
1072 emitLoadDouble(op1, fpRegT2);
1073 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
1074 break;
1075 case op_jnlesseq:
1076 emitLoadDouble(op1, fpRegT2);
1077 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
1078 break;
1079 default:
1080 ASSERT_NOT_REACHED();
1081 }
1082
1083 if (!notInt32Op2.empty())
1084 end.append(jump());
1085 }
1086
1087 if (!notInt32Op2.empty()) {
1088 // Double case 2: Op1 is int32; Op2 is not int32.
1089 notInt32Op2.link(this);
1090
1091 ASSERT(op2IsInRegisters);
1092
1093 if (!op1IsInRegisters)
1094 emitLoadPayload(op1, regT0);
1095
1096 convertInt32ToDouble(regT0, fpRegT0);
1097
1098 // Verify op2 is double.
1099 if (!types.second().definitelyIsNumber())
1100 addSlowCase(branch32(Above, regT3, Imm32(JSValue::LowestTag)));
1101
1102 // Do the math.
1103 switch (opcodeID) {
1104 case op_mul:
1105 emitLoadDouble(op2, fpRegT2);
1106 mulDouble(fpRegT2, fpRegT0);
1107 emitStoreDouble(dst, fpRegT0);
1108 break;
1109 case op_add:
1110 emitLoadDouble(op2, fpRegT2);
1111 addDouble(fpRegT2, fpRegT0);
1112 emitStoreDouble(dst, fpRegT0);
1113 break;
1114 case op_sub:
1115 emitLoadDouble(op2, fpRegT2);
1116 subDouble(fpRegT2, fpRegT0);
1117 emitStoreDouble(dst, fpRegT0);
1118 break;
1119 case op_div:
1120 emitLoadDouble(op2, fpRegT2);
1121 divDouble(fpRegT2, fpRegT0);
1122 emitStoreDouble(dst, fpRegT0);
1123 break;
1124 case op_jnless:
1125 emitLoadDouble(op2, fpRegT1);
1126 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
1127 break;
1128 case op_jless:
1129 emitLoadDouble(op2, fpRegT1);
1130 addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
1131 break;
1132 case op_jnlesseq:
1133 emitLoadDouble(op2, fpRegT1);
1134 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
1135 break;
1136 case op_jlesseq:
1137 emitLoadDouble(op2, fpRegT1);
1138 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
1139 break;
1140 default:
1141 ASSERT_NOT_REACHED();
1142 }
1143 }
1144
1145 end.link(this);
1146}
1147
1148// Multiplication (*)
1149
1150void JIT::emit_op_mul(Instruction* currentInstruction)
1151{
1152 unsigned dst = currentInstruction[1].u.operand;
1153 unsigned op1 = currentInstruction[2].u.operand;
1154 unsigned op2 = currentInstruction[3].u.operand;
1155 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1156
1157 JumpList notInt32Op1;
1158 JumpList notInt32Op2;
1159
1160 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1161 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
1162 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
1163
1164 // Int32 case.
1165 move(regT0, regT3);
1166 addSlowCase(branchMul32(Overflow, regT2, regT0));
1167 addSlowCase(branchTest32(Zero, regT0));
1168 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
1169
1170 if (!supportsFloatingPoint()) {
1171 addSlowCase(notInt32Op1);
1172 addSlowCase(notInt32Op2);
1173 return;
1174 }
1175 Jump end = jump();
1176
1177 // Double case.
1178 emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1179 end.link(this);
1180}
1181
1182void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1183{
1184 unsigned dst = currentInstruction[1].u.operand;
1185 unsigned op1 = currentInstruction[2].u.operand;
1186 unsigned op2 = currentInstruction[3].u.operand;
1187 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1188
1189 Jump overflow = getSlowCase(iter); // overflow check
1190 linkSlowCase(iter); // zero result check
1191
1192 Jump negZero = branchOr32(Signed, regT2, regT3);
1193 emitStoreInt32(dst, Imm32(0), (op1 == dst || op2 == dst));
1194
1195 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
1196
1197 negZero.link(this);
1198 overflow.link(this);
1199
1200 if (!supportsFloatingPoint()) {
1201 linkSlowCase(iter); // int32 check
1202 linkSlowCase(iter); // int32 check
1203 }
1204
1205 if (supportsFloatingPoint()) {
1206 if (!types.first().definitelyIsNumber())
1207 linkSlowCase(iter); // double check
1208
1209 if (!types.second().definitelyIsNumber()) {
1210 linkSlowCase(iter); // int32 check
1211 linkSlowCase(iter); // double check
1212 }
1213 }
1214
1215 Label jitStubCall(this);
1216 JITStubCall stubCall(this, cti_op_mul);
1217 stubCall.addArgument(op1);
1218 stubCall.addArgument(op2);
1219 stubCall.call(dst);
1220}
1221
1222// Division (/)
1223
1224void JIT::emit_op_div(Instruction* currentInstruction)
1225{
1226 unsigned dst = currentInstruction[1].u.operand;
1227 unsigned op1 = currentInstruction[2].u.operand;
1228 unsigned op2 = currentInstruction[3].u.operand;
1229 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1230
1231 if (!supportsFloatingPoint()) {
1232 addSlowCase(jump());
1233 return;
1234 }
1235
1236 // Int32 divide.
1237 JumpList notInt32Op1;
1238 JumpList notInt32Op2;
1239
1240 JumpList end;
1241
1242 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1243
1244 notInt32Op1.append(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
1245 notInt32Op2.append(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
1246
1247 convertInt32ToDouble(regT0, fpRegT0);
1248 convertInt32ToDouble(regT2, fpRegT1);
1249 divDouble(fpRegT1, fpRegT0);
1250
1251 JumpList doubleResult;
1252 branchConvertDoubleToInt32(fpRegT0, regT0, doubleResult, fpRegT1);
1253
1254 // Int32 result.
1255 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
1256 end.append(jump());
1257
1258 // Double result.
1259 doubleResult.link(this);
1260 emitStoreDouble(dst, fpRegT0);
1261 end.append(jump());
1262
1263 // Double divide.
1264 emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1265 end.link(this);
1266}
1267
1268void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1269{
1270 unsigned dst = currentInstruction[1].u.operand;
1271 unsigned op1 = currentInstruction[2].u.operand;
1272 unsigned op2 = currentInstruction[3].u.operand;
1273 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1274
1275 if (!supportsFloatingPoint())
1276 linkSlowCase(iter);
1277 else {
1278 if (!types.first().definitelyIsNumber())
1279 linkSlowCase(iter); // double check
1280
1281 if (!types.second().definitelyIsNumber()) {
1282 linkSlowCase(iter); // int32 check
1283 linkSlowCase(iter); // double check
1284 }
1285 }
1286
1287 JITStubCall stubCall(this, cti_op_div);
1288 stubCall.addArgument(op1);
1289 stubCall.addArgument(op2);
1290 stubCall.call(dst);
1291}
1292
1293// Mod (%)
1294
1295/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1296
1297#if CPU(X86) || CPU(X86_64)
1298
1299void JIT::emit_op_mod(Instruction* currentInstruction)
1300{
1301 unsigned dst = currentInstruction[1].u.operand;
1302 unsigned op1 = currentInstruction[2].u.operand;
1303 unsigned op2 = currentInstruction[3].u.operand;
1304
1305 if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
1306 emitLoad(op1, X86Registers::edx, X86Registers::eax);
1307 move(Imm32(getConstantOperand(op2).asInt32()), X86Registers::ecx);
1308 addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
1309 if (getConstantOperand(op2).asInt32() == -1)
1310 addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1311 } else {
1312 emitLoad2(op1, X86Registers::edx, X86Registers::eax, op2, X86Registers::ebx, X86Registers::ecx);
1313 addSlowCase(branch32(NotEqual, X86Registers::edx, Imm32(JSValue::Int32Tag)));
1314 addSlowCase(branch32(NotEqual, X86Registers::ebx, Imm32(JSValue::Int32Tag)));
1315
1316 addSlowCase(branch32(Equal, X86Registers::eax, Imm32(0x80000000))); // -2147483648 / -1 => EXC_ARITHMETIC
1317 addSlowCase(branch32(Equal, X86Registers::ecx, Imm32(0))); // divide by 0
1318 }
1319
1320 move(X86Registers::eax, X86Registers::ebx); // Save dividend payload, in case of 0.
1321 m_assembler.cdq();
1322 m_assembler.idivl_r(X86Registers::ecx);
1323
1324 // If the remainder is zero and the dividend is negative, the result is -0.
1325 Jump storeResult1 = branchTest32(NonZero, X86Registers::edx);
1326 Jump storeResult2 = branchTest32(Zero, X86Registers::ebx, Imm32(0x80000000)); // not negative
1327 emitStore(dst, jsNumber(m_globalData, -0.0));
1328 Jump end = jump();
1329
1330 storeResult1.link(this);
1331 storeResult2.link(this);
1332 emitStoreInt32(dst, X86Registers::edx, (op1 == dst || op2 == dst));
1333 end.link(this);
1334}
1335
1336void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1337{
1338 unsigned dst = currentInstruction[1].u.operand;
1339 unsigned op1 = currentInstruction[2].u.operand;
1340 unsigned op2 = currentInstruction[3].u.operand;
1341
1342 if (isOperandConstantImmediateInt(op2) && getConstantOperand(op2).asInt32() != 0) {
1343 linkSlowCase(iter); // int32 check
1344 if (getConstantOperand(op2).asInt32() == -1)
1345 linkSlowCase(iter); // 0x80000000 check
1346 } else {
1347 linkSlowCase(iter); // int32 check
1348 linkSlowCase(iter); // int32 check
1349 linkSlowCase(iter); // 0 check
1350 linkSlowCase(iter); // 0x80000000 check
1351 }
1352
1353 JITStubCall stubCall(this, cti_op_mod);
1354 stubCall.addArgument(op1);
1355 stubCall.addArgument(op2);
1356 stubCall.call(dst);
1357}
1358
1359#else // CPU(X86) || CPU(X86_64)
1360
1361void JIT::emit_op_mod(Instruction* currentInstruction)
1362{
1363 unsigned dst = currentInstruction[1].u.operand;
1364 unsigned op1 = currentInstruction[2].u.operand;
1365 unsigned op2 = currentInstruction[3].u.operand;
1366
1367#if ENABLE(JIT_OPTIMIZE_MOD)
1368 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1369 addSlowCase(branch32(NotEqual, regT1, Imm32(JSValue::Int32Tag)));
1370 addSlowCase(branch32(NotEqual, regT3, Imm32(JSValue::Int32Tag)));
1371
1372 addSlowCase(branch32(Equal, regT2, Imm32(0)));
1373
1374 emitNakedCall(m_globalData->jitStubs.ctiSoftModulo());
1375
1376 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
1377#else
1378 JITStubCall stubCall(this, cti_op_mod);
1379 stubCall.addArgument(op1);
1380 stubCall.addArgument(op2);
1381 stubCall.call(dst);
1382#endif
1383}
1384
1385void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1386{
1387#if ENABLE(JIT_OPTIMIZE_MOD)
1388 unsigned result = currentInstruction[1].u.operand;
1389 unsigned op1 = currentInstruction[2].u.operand;
1390 unsigned op2 = currentInstruction[3].u.operand;
1391 linkSlowCase(iter);
1392 linkSlowCase(iter);
1393 linkSlowCase(iter);
1394 JITStubCall stubCall(this, cti_op_mod);
1395 stubCall.addArgument(op1);
1396 stubCall.addArgument(op2);
1397 stubCall.call(result);
1398#else
1399 UNUSED_PARAM(currentInstruction);
1400 UNUSED_PARAM(iter);
1401 ASSERT_NOT_REACHED();
1402#endif
1403}
1404
1405#endif // CPU(X86) || CPU(X86_64)
1406
1407/* ------------------------------ END: OP_MOD ------------------------------ */
1408
1409#endif // USE(JSVALUE32_64)
1410
1411}
1412
1413#endif // ENABLE(JIT)