]> git.saurik.com Git - apple/javascriptcore.git/blame - jit/JITArithmetic32_64.cpp
JavaScriptCore-7600.1.4.16.1.tar.gz
[apple/javascriptcore.git] / jit / JITArithmetic32_64.cpp
CommitLineData
4e4e5a6f
A
1/*
2* Copyright (C) 2008 Apple Inc. All rights reserved.
3*
4* Redistribution and use in source and binary forms, with or without
5* modification, are permitted provided that the following conditions
6* are met:
7* 1. Redistributions of source code must retain the above copyright
8* notice, this list of conditions and the following disclaimer.
9* 2. Redistributions in binary form must reproduce the above copyright
10* notice, this list of conditions and the following disclaimer in the
11* documentation and/or other materials provided with the distribution.
12*
13* THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24*/
25
26#include "config.h"
4e4e5a6f
A
27
28#if ENABLE(JIT)
14957cd0
A
29#if USE(JSVALUE32_64)
30#include "JIT.h"
4e4e5a6f
A
31
32#include "CodeBlock.h"
93a37866 33#include "JITInlines.h"
4e4e5a6f
A
34#include "JITStubs.h"
35#include "JSArray.h"
36#include "JSFunction.h"
37#include "Interpreter.h"
81345200 38#include "JSCInlines.h"
4e4e5a6f
A
39#include "ResultType.h"
40#include "SamplingTool.h"
81345200 41#include "SlowPathCall.h"
4e4e5a6f 42
4e4e5a6f
A
43
44namespace JSC {
45
4e4e5a6f
A
46void JIT::emit_op_negate(Instruction* currentInstruction)
47{
81345200
A
48 int dst = currentInstruction[1].u.operand;
49 int src = currentInstruction[2].u.operand;
4e4e5a6f
A
50
51 emitLoad(src, regT1, regT0);
52
14957cd0
A
53 Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
54 addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
4e4e5a6f
A
55 neg32(regT0);
56 emitStoreInt32(dst, regT0, (dst == src));
57
58 Jump end = jump();
59
60 srcNotInt.link(this);
14957cd0 61 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
4e4e5a6f 62
14957cd0 63 xor32(TrustedImm32(1 << 31), regT1);
4e4e5a6f
A
64 store32(regT1, tagFor(dst));
65 if (dst != src)
66 store32(regT0, payloadFor(dst));
67
68 end.link(this);
69}
70
71void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
72{
4e4e5a6f
A
73 linkSlowCase(iter); // 0x7fffffff check
74 linkSlowCase(iter); // double check
75
81345200
A
76 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_negate);
77 slowPathCall.call();
4e4e5a6f
A
78}
79
81345200 80void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target, RelationalCondition condition)
4e4e5a6f 81{
4e4e5a6f
A
82 JumpList notInt32Op1;
83 JumpList notInt32Op2;
84
85 // Character less.
86 if (isOperandConstantImmediateChar(op1)) {
87 emitLoad(op2, regT1, regT0);
14957cd0 88 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
4e4e5a6f
A
89 JumpList failures;
90 emitLoadCharacterString(regT0, regT0, failures);
91 addSlowCase(failures);
6fe7ccc8 92 addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
4e4e5a6f
A
93 return;
94 }
95 if (isOperandConstantImmediateChar(op2)) {
96 emitLoad(op1, regT1, regT0);
14957cd0 97 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
4e4e5a6f
A
98 JumpList failures;
99 emitLoadCharacterString(regT0, regT0, failures);
100 addSlowCase(failures);
6fe7ccc8 101 addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
4e4e5a6f
A
102 return;
103 }
104 if (isOperandConstantImmediateInt(op1)) {
105 emitLoad(op2, regT3, regT2);
14957cd0 106 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
6fe7ccc8 107 addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
4e4e5a6f
A
108 } else if (isOperandConstantImmediateInt(op2)) {
109 emitLoad(op1, regT1, regT0);
14957cd0 110 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
6fe7ccc8 111 addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
4e4e5a6f
A
112 } else {
113 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
14957cd0
A
114 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
115 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
6fe7ccc8 116 addJump(branch32(condition, regT0, regT2), target);
4e4e5a6f
A
117 }
118
119 if (!supportsFloatingPoint()) {
120 addSlowCase(notInt32Op1);
121 addSlowCase(notInt32Op2);
122 return;
123 }
124 Jump end = jump();
125
126 // Double less.
6fe7ccc8 127 emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
4e4e5a6f
A
128 end.link(this);
129}
130
81345200 131void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
4e4e5a6f 132{
4e4e5a6f
A
133 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
134 linkSlowCase(iter);
135 linkSlowCase(iter);
136 linkSlowCase(iter);
137 linkSlowCase(iter);
138 } else {
139 if (!supportsFloatingPoint()) {
140 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
141 linkSlowCase(iter); // int32 check
142 linkSlowCase(iter); // int32 check
143 } else {
144 if (!isOperandConstantImmediateInt(op1)) {
145 linkSlowCase(iter); // double check
146 linkSlowCase(iter); // int32 check
147 }
148 if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
149 linkSlowCase(iter); // double check
150 }
151 }
81345200
A
152 emitLoad(op1, regT1, regT0);
153 emitLoad(op2, regT3, regT2);
154 callOperation(operation, regT1, regT0, regT3, regT2);
155 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
4e4e5a6f
A
156}
157
4e4e5a6f
A
158// LeftShift (<<)
159
160void JIT::emit_op_lshift(Instruction* currentInstruction)
161{
81345200
A
162 int dst = currentInstruction[1].u.operand;
163 int op1 = currentInstruction[2].u.operand;
164 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
165
166 if (isOperandConstantImmediateInt(op2)) {
167 emitLoad(op1, regT1, regT0);
14957cd0 168 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f 169 lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
81345200 170 emitStoreInt32(dst, regT0, dst == op1);
4e4e5a6f
A
171 return;
172 }
173
174 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
175 if (!isOperandConstantImmediateInt(op1))
14957cd0
A
176 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
177 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f 178 lshift32(regT2, regT0);
81345200 179 emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
4e4e5a6f
A
180}
181
182void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
183{
81345200
A
184 int op1 = currentInstruction[2].u.operand;
185 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
186
187 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
188 linkSlowCase(iter); // int32 check
189 linkSlowCase(iter); // int32 check
190
81345200
A
191 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
192 slowPathCall.call();
4e4e5a6f
A
193}
194
195// RightShift (>>) and UnsignedRightShift (>>>) helper
196
197void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
198{
81345200
A
199 int dst = currentInstruction[1].u.operand;
200 int op1 = currentInstruction[2].u.operand;
201 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
202
203 // Slow case of rshift makes assumptions about what registers hold the
204 // shift arguments, so any changes must be updated there as well.
205 if (isOperandConstantImmediateInt(op2)) {
206 emitLoad(op1, regT1, regT0);
14957cd0 207 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
6fe7ccc8
A
208 int shift = getConstantOperand(op2).asInt32() & 0x1f;
209 if (shift) {
210 if (isUnsigned)
211 urshift32(Imm32(shift), regT0);
212 else
213 rshift32(Imm32(shift), regT0);
81345200
A
214 }
215 emitStoreInt32(dst, regT0, dst == op1);
6fe7ccc8
A
216 } else {
217 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
218 if (!isOperandConstantImmediateInt(op1))
219 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
220 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
81345200 221 if (isUnsigned)
6fe7ccc8 222 urshift32(regT2, regT0);
81345200 223 else
6fe7ccc8 224 rshift32(regT2, regT0);
81345200 225 emitStoreInt32(dst, regT0, dst == op1);
4e4e5a6f 226 }
4e4e5a6f
A
227}
228
229void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
230{
81345200
A
231 int dst = currentInstruction[1].u.operand;
232 int op1 = currentInstruction[2].u.operand;
233 int op2 = currentInstruction[3].u.operand;
4e4e5a6f 234 if (isOperandConstantImmediateInt(op2)) {
6fe7ccc8 235 int shift = getConstantOperand(op2).asInt32() & 0x1f;
4e4e5a6f
A
236 // op1 = regT1:regT0
237 linkSlowCase(iter); // int32 check
238 if (supportsFloatingPointTruncate()) {
239 JumpList failures;
14957cd0 240 failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
4e4e5a6f
A
241 emitLoadDouble(op1, fpRegT0);
242 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
6fe7ccc8
A
243 if (shift) {
244 if (isUnsigned)
245 urshift32(Imm32(shift), regT0);
246 else
247 rshift32(Imm32(shift), regT0);
81345200 248 }
6fe7ccc8 249 move(TrustedImm32(JSValue::Int32Tag), regT1);
4e4e5a6f
A
250 emitStoreInt32(dst, regT0, false);
251 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
252 failures.link(this);
253 }
4e4e5a6f
A
254 } else {
255 // op1 = regT1:regT0
256 // op2 = regT3:regT2
257 if (!isOperandConstantImmediateInt(op1)) {
258 linkSlowCase(iter); // int32 check -- op1 is not an int
259 if (supportsFloatingPointTruncate()) {
6fe7ccc8
A
260 JumpList failures;
261 failures.append(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); // op1 is not a double
4e4e5a6f 262 emitLoadDouble(op1, fpRegT0);
6fe7ccc8
A
263 failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
264 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
81345200 265 if (isUnsigned)
4e4e5a6f 266 urshift32(regT2, regT0);
81345200 267 else
4e4e5a6f 268 rshift32(regT2, regT0);
6fe7ccc8 269 move(TrustedImm32(JSValue::Int32Tag), regT1);
4e4e5a6f
A
270 emitStoreInt32(dst, regT0, false);
271 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
6fe7ccc8 272 failures.link(this);
4e4e5a6f
A
273 }
274 }
275
276 linkSlowCase(iter); // int32 check - op2 is not an int
4e4e5a6f
A
277 }
278
81345200
A
279 JITSlowPathCall slowPathCall(this, currentInstruction, isUnsigned ? slow_path_urshift : slow_path_rshift);
280 slowPathCall.call();
4e4e5a6f
A
281}
282
283// RightShift (>>)
284
285void JIT::emit_op_rshift(Instruction* currentInstruction)
286{
287 emitRightShift(currentInstruction, false);
288}
289
290void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
291{
292 emitRightShiftSlowCase(currentInstruction, iter, false);
293}
294
295// UnsignedRightShift (>>>)
296
297void JIT::emit_op_urshift(Instruction* currentInstruction)
298{
299 emitRightShift(currentInstruction, true);
300}
301
302void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
303{
304 emitRightShiftSlowCase(currentInstruction, iter, true);
305}
306
81345200
A
307void JIT::emit_op_unsigned(Instruction* currentInstruction)
308{
309 int result = currentInstruction[1].u.operand;
310 int op1 = currentInstruction[2].u.operand;
311
312 emitLoad(op1, regT1, regT0);
313
314 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
315 addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
316 emitStoreInt32(result, regT0, result == op1);
317}
318
319void JIT::emitSlow_op_unsigned(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
320{
321 linkSlowCase(iter);
322 linkSlowCase(iter);
323
324 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_unsigned);
325 slowPathCall.call();
326}
327
4e4e5a6f
A
328// BitAnd (&)
329
330void JIT::emit_op_bitand(Instruction* currentInstruction)
331{
81345200
A
332 int dst = currentInstruction[1].u.operand;
333 int op1 = currentInstruction[2].u.operand;
334 int op2 = currentInstruction[3].u.operand;
4e4e5a6f 335
81345200 336 int op;
4e4e5a6f
A
337 int32_t constant;
338 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
339 emitLoad(op, regT1, regT0);
14957cd0 340 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f 341 and32(Imm32(constant), regT0);
81345200 342 emitStoreInt32(dst, regT0, dst == op);
4e4e5a6f
A
343 return;
344 }
345
346 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
14957cd0
A
347 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
348 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f 349 and32(regT2, regT0);
81345200 350 emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
4e4e5a6f
A
351}
352
353void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
354{
81345200
A
355 int op1 = currentInstruction[2].u.operand;
356 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
357
358 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
359 linkSlowCase(iter); // int32 check
360 linkSlowCase(iter); // int32 check
361
81345200
A
362 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
363 slowPathCall.call();
4e4e5a6f
A
364}
365
366// BitOr (|)
367
368void JIT::emit_op_bitor(Instruction* currentInstruction)
369{
81345200
A
370 int dst = currentInstruction[1].u.operand;
371 int op1 = currentInstruction[2].u.operand;
372 int op2 = currentInstruction[3].u.operand;
4e4e5a6f 373
81345200 374 int op;
4e4e5a6f
A
375 int32_t constant;
376 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
377 emitLoad(op, regT1, regT0);
14957cd0 378 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f 379 or32(Imm32(constant), regT0);
81345200 380 emitStoreInt32(dst, regT0, op == dst);
4e4e5a6f
A
381 return;
382 }
383
384 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
14957cd0
A
385 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
386 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f 387 or32(regT2, regT0);
81345200 388 emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
4e4e5a6f
A
389}
390
391void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
392{
81345200
A
393 int op1 = currentInstruction[2].u.operand;
394 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
395
396 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
397 linkSlowCase(iter); // int32 check
398 linkSlowCase(iter); // int32 check
399
81345200
A
400 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
401 slowPathCall.call();
4e4e5a6f
A
402}
403
404// BitXor (^)
405
406void JIT::emit_op_bitxor(Instruction* currentInstruction)
407{
81345200
A
408 int dst = currentInstruction[1].u.operand;
409 int op1 = currentInstruction[2].u.operand;
410 int op2 = currentInstruction[3].u.operand;
4e4e5a6f 411
81345200 412 int op;
4e4e5a6f
A
413 int32_t constant;
414 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
415 emitLoad(op, regT1, regT0);
14957cd0 416 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f 417 xor32(Imm32(constant), regT0);
81345200 418 emitStoreInt32(dst, regT0, op == dst);
4e4e5a6f
A
419 return;
420 }
421
422 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
14957cd0
A
423 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
424 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f 425 xor32(regT2, regT0);
81345200 426 emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
4e4e5a6f
A
427}
428
429void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
430{
81345200
A
431 int op1 = currentInstruction[2].u.operand;
432 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
433
434 if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
435 linkSlowCase(iter); // int32 check
436 linkSlowCase(iter); // int32 check
437
81345200
A
438 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
439 slowPathCall.call();
4e4e5a6f
A
440}
441
93a37866 442void JIT::emit_op_inc(Instruction* currentInstruction)
4e4e5a6f 443{
81345200 444 int srcDst = currentInstruction[1].u.operand;
4e4e5a6f
A
445
446 emitLoad(srcDst, regT1, regT0);
447
14957cd0
A
448 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
449 addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
81345200 450 emitStoreInt32(srcDst, regT0, true);
4e4e5a6f
A
451}
452
93a37866 453void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
4e4e5a6f 454{
4e4e5a6f
A
455 linkSlowCase(iter); // int32 check
456 linkSlowCase(iter); // overflow check
457
81345200
A
458 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_inc);
459 slowPathCall.call();
4e4e5a6f
A
460}
461
93a37866 462void JIT::emit_op_dec(Instruction* currentInstruction)
4e4e5a6f 463{
81345200 464 int srcDst = currentInstruction[1].u.operand;
4e4e5a6f
A
465
466 emitLoad(srcDst, regT1, regT0);
467
14957cd0
A
468 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
469 addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
81345200 470 emitStoreInt32(srcDst, regT0, true);
4e4e5a6f
A
471}
472
93a37866 473void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
4e4e5a6f 474{
4e4e5a6f
A
475 linkSlowCase(iter); // int32 check
476 linkSlowCase(iter); // overflow check
477
81345200
A
478 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_dec);
479 slowPathCall.call();
4e4e5a6f
A
480}
481
482// Addition (+)
483
484void JIT::emit_op_add(Instruction* currentInstruction)
485{
81345200
A
486 int dst = currentInstruction[1].u.operand;
487 int op1 = currentInstruction[2].u.operand;
488 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
489 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
490
491 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
6fe7ccc8 492 addSlowCase();
81345200
A
493 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
494 slowPathCall.call();
4e4e5a6f
A
495 return;
496 }
497
498 JumpList notInt32Op1;
499 JumpList notInt32Op2;
500
81345200 501 int op;
4e4e5a6f
A
502 int32_t constant;
503 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
504 emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
505 return;
506 }
507
508 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
14957cd0
A
509 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
510 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f
A
511
512 // Int32 case.
513 addSlowCase(branchAdd32(Overflow, regT2, regT0));
514 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
515
516 if (!supportsFloatingPoint()) {
517 addSlowCase(notInt32Op1);
518 addSlowCase(notInt32Op2);
519 return;
520 }
521 Jump end = jump();
522
523 // Double case.
524 emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
525 end.link(this);
526}
527
81345200 528void JIT::emitAdd32Constant(int dst, int op, int32_t constant, ResultType opType)
4e4e5a6f
A
529{
530 // Int32 case.
6fe7ccc8 531 emitLoad(op, regT1, regT2);
14957cd0 532 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
6fe7ccc8 533 addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0));
4e4e5a6f
A
534 emitStoreInt32(dst, regT0, (op == dst));
535
536 // Double case.
537 if (!supportsFloatingPoint()) {
538 addSlowCase(notInt32);
539 return;
540 }
541 Jump end = jump();
542
543 notInt32.link(this);
544 if (!opType.definitelyIsNumber())
14957cd0 545 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
4e4e5a6f
A
546 move(Imm32(constant), regT2);
547 convertInt32ToDouble(regT2, fpRegT0);
548 emitLoadDouble(op, fpRegT1);
549 addDouble(fpRegT1, fpRegT0);
550 emitStoreDouble(dst, fpRegT0);
551
552 end.link(this);
553}
554
555void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
556{
81345200
A
557 int op1 = currentInstruction[2].u.operand;
558 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
559 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
560
6fe7ccc8
A
561 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
562 linkDummySlowCase(iter);
4e4e5a6f 563 return;
6fe7ccc8 564 }
4e4e5a6f 565
81345200 566 int op;
4e4e5a6f
A
567 int32_t constant;
568 if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
569 linkSlowCase(iter); // overflow check
570
571 if (!supportsFloatingPoint())
572 linkSlowCase(iter); // non-sse case
573 else {
574 ResultType opType = op == op1 ? types.first() : types.second();
575 if (!opType.definitelyIsNumber())
576 linkSlowCase(iter); // double check
577 }
578 } else {
579 linkSlowCase(iter); // overflow check
580
581 if (!supportsFloatingPoint()) {
582 linkSlowCase(iter); // int32 check
583 linkSlowCase(iter); // int32 check
584 } else {
585 if (!types.first().definitelyIsNumber())
586 linkSlowCase(iter); // double check
587
588 if (!types.second().definitelyIsNumber()) {
589 linkSlowCase(iter); // int32 check
590 linkSlowCase(iter); // double check
591 }
592 }
593 }
594
81345200
A
595 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
596 slowPathCall.call();
4e4e5a6f
A
597}
598
599// Subtraction (-)
600
601void JIT::emit_op_sub(Instruction* currentInstruction)
602{
81345200
A
603 int dst = currentInstruction[1].u.operand;
604 int op1 = currentInstruction[2].u.operand;
605 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
606 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
607
608 JumpList notInt32Op1;
609 JumpList notInt32Op2;
610
611 if (isOperandConstantImmediateInt(op2)) {
612 emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
613 return;
614 }
615
616 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
14957cd0
A
617 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
618 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f
A
619
620 // Int32 case.
621 addSlowCase(branchSub32(Overflow, regT2, regT0));
622 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
623
624 if (!supportsFloatingPoint()) {
625 addSlowCase(notInt32Op1);
626 addSlowCase(notInt32Op2);
627 return;
628 }
629 Jump end = jump();
630
631 // Double case.
632 emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
633 end.link(this);
634}
635
81345200 636void JIT::emitSub32Constant(int dst, int op, int32_t constant, ResultType opType)
4e4e5a6f
A
637{
638 // Int32 case.
639 emitLoad(op, regT1, regT0);
14957cd0 640 Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
81345200 641 addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
6fe7ccc8 642 emitStoreInt32(dst, regT2, (op == dst));
4e4e5a6f
A
643
644 // Double case.
645 if (!supportsFloatingPoint()) {
646 addSlowCase(notInt32);
647 return;
648 }
649 Jump end = jump();
650
651 notInt32.link(this);
652 if (!opType.definitelyIsNumber())
14957cd0 653 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
4e4e5a6f
A
654 move(Imm32(constant), regT2);
655 convertInt32ToDouble(regT2, fpRegT0);
656 emitLoadDouble(op, fpRegT1);
657 subDouble(fpRegT0, fpRegT1);
658 emitStoreDouble(dst, fpRegT1);
659
660 end.link(this);
661}
662
663void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
664{
81345200 665 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
666 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
667
668 if (isOperandConstantImmediateInt(op2)) {
669 linkSlowCase(iter); // overflow check
670
671 if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
672 linkSlowCase(iter); // int32 or double check
673 } else {
674 linkSlowCase(iter); // overflow check
675
676 if (!supportsFloatingPoint()) {
677 linkSlowCase(iter); // int32 check
678 linkSlowCase(iter); // int32 check
679 } else {
680 if (!types.first().definitelyIsNumber())
681 linkSlowCase(iter); // double check
682
683 if (!types.second().definitelyIsNumber()) {
684 linkSlowCase(iter); // int32 check
685 linkSlowCase(iter); // double check
686 }
687 }
688 }
689
81345200
A
690 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_sub);
691 slowPathCall.call();
4e4e5a6f
A
692}
693
81345200 694void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
4e4e5a6f
A
695{
696 JumpList end;
697
698 if (!notInt32Op1.empty()) {
699 // Double case 1: Op1 is not int32; Op2 is unknown.
700 notInt32Op1.link(this);
701
702 ASSERT(op1IsInRegisters);
703
704 // Verify Op1 is double.
705 if (!types.first().definitelyIsNumber())
14957cd0 706 addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
4e4e5a6f
A
707
708 if (!op2IsInRegisters)
709 emitLoad(op2, regT3, regT2);
710
14957cd0 711 Jump doubleOp2 = branch32(Below, regT3, TrustedImm32(JSValue::LowestTag));
4e4e5a6f
A
712
713 if (!types.second().definitelyIsNumber())
14957cd0 714 addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f
A
715
716 convertInt32ToDouble(regT2, fpRegT0);
717 Jump doTheMath = jump();
718
719 // Load Op2 as double into double register.
720 doubleOp2.link(this);
721 emitLoadDouble(op2, fpRegT0);
722
723 // Do the math.
724 doTheMath.link(this);
725 switch (opcodeID) {
726 case op_mul:
727 emitLoadDouble(op1, fpRegT2);
728 mulDouble(fpRegT2, fpRegT0);
729 emitStoreDouble(dst, fpRegT0);
730 break;
731 case op_add:
732 emitLoadDouble(op1, fpRegT2);
733 addDouble(fpRegT2, fpRegT0);
734 emitStoreDouble(dst, fpRegT0);
735 break;
736 case op_sub:
737 emitLoadDouble(op1, fpRegT1);
738 subDouble(fpRegT0, fpRegT1);
739 emitStoreDouble(dst, fpRegT1);
740 break;
6fe7ccc8 741 case op_div: {
4e4e5a6f
A
742 emitLoadDouble(op1, fpRegT1);
743 divDouble(fpRegT0, fpRegT1);
6fe7ccc8 744
6fe7ccc8
A
745 // Is the result actually an integer? The DFG JIT would really like to know. If it's
746 // not an integer, we increment a count. If this together with the slow case counter
747 // are below threshold then the DFG JIT will compile this division with a specualtion
748 // that the remainder is zero.
749
750 // As well, there are cases where a double result here would cause an important field
751 // in the heap to sometimes have doubles in it, resulting in double predictions getting
752 // propagated to a use site where it might cause damage (such as the index to an array
753 // access). So if we are DFG compiling anything in the program, we want this code to
754 // ensure that it produces integers whenever possible.
755
756 // FIXME: This will fail to convert to integer if the result is zero. We should
757 // distinguish between positive zero and negative zero here.
758
759 JumpList notInteger;
760 branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
761 // If we've got an integer, we might as well make that the result of the division.
762 emitStoreInt32(dst, regT2);
763 Jump isInteger = jump();
764 notInteger.link(this);
765 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
4e4e5a6f 766 emitStoreDouble(dst, fpRegT1);
6fe7ccc8 767 isInteger.link(this);
4e4e5a6f 768 break;
6fe7ccc8 769 }
4e4e5a6f
A
770 case op_jless:
771 emitLoadDouble(op1, fpRegT2);
772 addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
773 break;
774 case op_jlesseq:
775 emitLoadDouble(op1, fpRegT2);
776 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT2, fpRegT0), dst);
777 break;
6fe7ccc8
A
778 case op_jgreater:
779 emitLoadDouble(op1, fpRegT2);
780 addJump(branchDouble(DoubleGreaterThan, fpRegT2, fpRegT0), dst);
781 break;
782 case op_jgreatereq:
783 emitLoadDouble(op1, fpRegT2);
784 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT2, fpRegT0), dst);
785 break;
786 case op_jnless:
787 emitLoadDouble(op1, fpRegT2);
788 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
789 break;
4e4e5a6f
A
790 case op_jnlesseq:
791 emitLoadDouble(op1, fpRegT2);
792 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT0, fpRegT2), dst);
793 break;
6fe7ccc8
A
794 case op_jngreater:
795 emitLoadDouble(op1, fpRegT2);
796 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT0, fpRegT2), dst);
797 break;
798 case op_jngreatereq:
799 emitLoadDouble(op1, fpRegT2);
800 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT0, fpRegT2), dst);
801 break;
4e4e5a6f 802 default:
93a37866 803 RELEASE_ASSERT_NOT_REACHED();
4e4e5a6f
A
804 }
805
806 if (!notInt32Op2.empty())
807 end.append(jump());
808 }
809
810 if (!notInt32Op2.empty()) {
811 // Double case 2: Op1 is int32; Op2 is not int32.
812 notInt32Op2.link(this);
813
814 ASSERT(op2IsInRegisters);
815
816 if (!op1IsInRegisters)
817 emitLoadPayload(op1, regT0);
818
819 convertInt32ToDouble(regT0, fpRegT0);
820
821 // Verify op2 is double.
822 if (!types.second().definitelyIsNumber())
14957cd0 823 addSlowCase(branch32(Above, regT3, TrustedImm32(JSValue::LowestTag)));
4e4e5a6f
A
824
825 // Do the math.
826 switch (opcodeID) {
827 case op_mul:
828 emitLoadDouble(op2, fpRegT2);
829 mulDouble(fpRegT2, fpRegT0);
830 emitStoreDouble(dst, fpRegT0);
831 break;
832 case op_add:
833 emitLoadDouble(op2, fpRegT2);
834 addDouble(fpRegT2, fpRegT0);
835 emitStoreDouble(dst, fpRegT0);
836 break;
837 case op_sub:
838 emitLoadDouble(op2, fpRegT2);
839 subDouble(fpRegT2, fpRegT0);
840 emitStoreDouble(dst, fpRegT0);
841 break;
6fe7ccc8 842 case op_div: {
4e4e5a6f
A
843 emitLoadDouble(op2, fpRegT2);
844 divDouble(fpRegT2, fpRegT0);
6fe7ccc8
A
845 // Is the result actually an integer? The DFG JIT would really like to know. If it's
846 // not an integer, we increment a count. If this together with the slow case counter
847 // are below threshold then the DFG JIT will compile this division with a specualtion
848 // that the remainder is zero.
849
850 // As well, there are cases where a double result here would cause an important field
851 // in the heap to sometimes have doubles in it, resulting in double predictions getting
852 // propagated to a use site where it might cause damage (such as the index to an array
853 // access). So if we are DFG compiling anything in the program, we want this code to
854 // ensure that it produces integers whenever possible.
855
856 // FIXME: This will fail to convert to integer if the result is zero. We should
857 // distinguish between positive zero and negative zero here.
858
859 JumpList notInteger;
860 branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
861 // If we've got an integer, we might as well make that the result of the division.
862 emitStoreInt32(dst, regT2);
863 Jump isInteger = jump();
864 notInteger.link(this);
865 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
4e4e5a6f 866 emitStoreDouble(dst, fpRegT0);
6fe7ccc8 867 isInteger.link(this);
4e4e5a6f 868 break;
6fe7ccc8 869 }
4e4e5a6f
A
870 case op_jless:
871 emitLoadDouble(op2, fpRegT1);
872 addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
873 break;
6fe7ccc8
A
874 case op_jlesseq:
875 emitLoadDouble(op2, fpRegT1);
876 addJump(branchDouble(DoubleLessThanOrEqual, fpRegT0, fpRegT1), dst);
877 break;
878 case op_jgreater:
879 emitLoadDouble(op2, fpRegT1);
880 addJump(branchDouble(DoubleGreaterThan, fpRegT0, fpRegT1), dst);
881 break;
882 case op_jgreatereq:
883 emitLoadDouble(op2, fpRegT1);
884 addJump(branchDouble(DoubleGreaterThanOrEqual, fpRegT0, fpRegT1), dst);
885 break;
886 case op_jnless:
887 emitLoadDouble(op2, fpRegT1);
888 addJump(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
889 break;
4e4e5a6f
A
890 case op_jnlesseq:
891 emitLoadDouble(op2, fpRegT1);
892 addJump(branchDouble(DoubleLessThanOrUnordered, fpRegT1, fpRegT0), dst);
893 break;
6fe7ccc8 894 case op_jngreater:
4e4e5a6f 895 emitLoadDouble(op2, fpRegT1);
6fe7ccc8
A
896 addJump(branchDouble(DoubleGreaterThanOrEqualOrUnordered, fpRegT1, fpRegT0), dst);
897 break;
898 case op_jngreatereq:
899 emitLoadDouble(op2, fpRegT1);
900 addJump(branchDouble(DoubleGreaterThanOrUnordered, fpRegT1, fpRegT0), dst);
4e4e5a6f
A
901 break;
902 default:
93a37866 903 RELEASE_ASSERT_NOT_REACHED();
4e4e5a6f
A
904 }
905 }
906
907 end.link(this);
908}
909
910// Multiplication (*)
911
912void JIT::emit_op_mul(Instruction* currentInstruction)
913{
81345200
A
914 int dst = currentInstruction[1].u.operand;
915 int op1 = currentInstruction[2].u.operand;
916 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
917 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
918
6fe7ccc8 919 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
6fe7ccc8 920
4e4e5a6f
A
921 JumpList notInt32Op1;
922 JumpList notInt32Op2;
923
924 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
14957cd0
A
925 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
926 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f
A
927
928 // Int32 case.
929 move(regT0, regT3);
930 addSlowCase(branchMul32(Overflow, regT2, regT0));
931 addSlowCase(branchTest32(Zero, regT0));
932 emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
933
934 if (!supportsFloatingPoint()) {
935 addSlowCase(notInt32Op1);
936 addSlowCase(notInt32Op2);
937 return;
938 }
939 Jump end = jump();
940
941 // Double case.
942 emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
943 end.link(this);
944}
945
946void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
947{
81345200
A
948 int dst = currentInstruction[1].u.operand;
949 int op1 = currentInstruction[2].u.operand;
950 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
951 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
952
953 Jump overflow = getSlowCase(iter); // overflow check
954 linkSlowCase(iter); // zero result check
955
956 Jump negZero = branchOr32(Signed, regT2, regT3);
14957cd0 957 emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
4e4e5a6f
A
958
959 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
960
961 negZero.link(this);
6fe7ccc8
A
962 // We only get here if we have a genuine negative zero. Record this,
963 // so that the speculative JIT knows that we failed speculation
964 // because of a negative zero.
965 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
4e4e5a6f
A
966 overflow.link(this);
967
968 if (!supportsFloatingPoint()) {
969 linkSlowCase(iter); // int32 check
970 linkSlowCase(iter); // int32 check
971 }
972
973 if (supportsFloatingPoint()) {
974 if (!types.first().definitelyIsNumber())
975 linkSlowCase(iter); // double check
976
977 if (!types.second().definitelyIsNumber()) {
978 linkSlowCase(iter); // int32 check
979 linkSlowCase(iter); // double check
980 }
981 }
982
81345200
A
983 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mul);
984 slowPathCall.call();
4e4e5a6f
A
985}
986
987// Division (/)
988
989void JIT::emit_op_div(Instruction* currentInstruction)
990{
81345200
A
991 int dst = currentInstruction[1].u.operand;
992 int op1 = currentInstruction[2].u.operand;
993 int op2 = currentInstruction[3].u.operand;
4e4e5a6f
A
994 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
995
6fe7ccc8 996 m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
6fe7ccc8 997
4e4e5a6f
A
998 if (!supportsFloatingPoint()) {
999 addSlowCase(jump());
1000 return;
1001 }
1002
1003 // Int32 divide.
1004 JumpList notInt32Op1;
1005 JumpList notInt32Op2;
1006
1007 JumpList end;
1008
1009 emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
1010
14957cd0
A
1011 notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1012 notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f
A
1013
1014 convertInt32ToDouble(regT0, fpRegT0);
1015 convertInt32ToDouble(regT2, fpRegT1);
1016 divDouble(fpRegT1, fpRegT0);
6fe7ccc8
A
1017 // Is the result actually an integer? The DFG JIT would really like to know. If it's
1018 // not an integer, we increment a count. If this together with the slow case counter
1019 // are below threshold then the DFG JIT will compile this division with a specualtion
1020 // that the remainder is zero.
1021
1022 // As well, there are cases where a double result here would cause an important field
1023 // in the heap to sometimes have doubles in it, resulting in double predictions getting
1024 // propagated to a use site where it might cause damage (such as the index to an array
1025 // access). So if we are DFG compiling anything in the program, we want this code to
1026 // ensure that it produces integers whenever possible.
1027
1028 // FIXME: This will fail to convert to integer if the result is zero. We should
1029 // distinguish between positive zero and negative zero here.
1030
1031 JumpList notInteger;
1032 branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
1033 // If we've got an integer, we might as well make that the result of the division.
1034 emitStoreInt32(dst, regT2);
1035 end.append(jump());
1036 notInteger.link(this);
1037 add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
1038 emitStoreDouble(dst, fpRegT0);
4e4e5a6f
A
1039 end.append(jump());
1040
1041 // Double divide.
1042 emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
1043 end.link(this);
1044}
1045
1046void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1047{
4e4e5a6f
A
1048 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1049
1050 if (!supportsFloatingPoint())
1051 linkSlowCase(iter);
1052 else {
1053 if (!types.first().definitelyIsNumber())
1054 linkSlowCase(iter); // double check
1055
1056 if (!types.second().definitelyIsNumber()) {
1057 linkSlowCase(iter); // int32 check
1058 linkSlowCase(iter); // double check
1059 }
1060 }
1061
81345200
A
1062 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
1063 slowPathCall.call();
4e4e5a6f
A
1064}
1065
1066// Mod (%)
1067
1068/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1069
4e4e5a6f
A
1070void JIT::emit_op_mod(Instruction* currentInstruction)
1071{
14957cd0 1072#if CPU(X86) || CPU(X86_64)
81345200
A
1073 int dst = currentInstruction[1].u.operand;
1074 int op1 = currentInstruction[2].u.operand;
1075 int op2 = currentInstruction[3].u.operand;
1076
14957cd0
A
1077 // Make sure registers are correct for x86 IDIV instructions.
1078 ASSERT(regT0 == X86Registers::eax);
1079 ASSERT(regT1 == X86Registers::edx);
1080 ASSERT(regT2 == X86Registers::ecx);
1081 ASSERT(regT3 == X86Registers::ebx);
4e4e5a6f 1082
6fe7ccc8
A
1083 emitLoad2(op1, regT0, regT3, op2, regT1, regT2);
1084 addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
1085 addSlowCase(branch32(NotEqual, regT0, TrustedImm32(JSValue::Int32Tag)));
4e4e5a6f 1086
6fe7ccc8
A
1087 move(regT3, regT0);
1088 addSlowCase(branchTest32(Zero, regT2));
1089 Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
1090 addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
1091 denominatorNotNeg1.link(this);
4e4e5a6f 1092 m_assembler.cdq();
14957cd0 1093 m_assembler.idivl_r(regT2);
6fe7ccc8
A
1094 Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
1095 addSlowCase(branchTest32(Zero, regT1));
1096 numeratorPositive.link(this);
14957cd0 1097 emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
4e4e5a6f 1098#else
81345200
A
1099 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
1100 slowPathCall.call();
4e4e5a6f
A
1101#endif
1102}
1103
1104void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1105{
6fe7ccc8 1106#if CPU(X86) || CPU(X86_64)
4e4e5a6f
A
1107 linkSlowCase(iter);
1108 linkSlowCase(iter);
1109 linkSlowCase(iter);
6fe7ccc8
A
1110 linkSlowCase(iter);
1111 linkSlowCase(iter);
81345200
A
1112 JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
1113 slowPathCall.call();
4e4e5a6f
A
1114#else
1115 UNUSED_PARAM(currentInstruction);
1116 UNUSED_PARAM(iter);
6fe7ccc8
A
1117 // We would have really useful assertions here if it wasn't for the compiler's
1118 // insistence on attribute noreturn.
93a37866 1119 // RELEASE_ASSERT_NOT_REACHED();
4e4e5a6f
A
1120#endif
1121}
1122
4e4e5a6f
A
1123/* ------------------------------ END: OP_MOD ------------------------------ */
1124
14957cd0 1125} // namespace JSC
4e4e5a6f 1126
14957cd0 1127#endif // USE(JSVALUE32_64)
4e4e5a6f 1128#endif // ENABLE(JIT)