]> git.saurik.com Git - apple/javascriptcore.git/blob - jit/JITArithmetic.cpp
JavaScriptCore-721.26.tar.gz
[apple/javascriptcore.git] / jit / JITArithmetic.cpp
1 /*
2 * Copyright (C) 2008 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26 #include "config.h"
27 #include "JIT.h"
28
29 #if ENABLE(JIT)
30
31 #include "CodeBlock.h"
32 #include "JITInlineMethods.h"
33 #include "JITStubCall.h"
34 #include "JITStubs.h"
35 #include "JSArray.h"
36 #include "JSFunction.h"
37 #include "Interpreter.h"
38 #include "ResultType.h"
39 #include "SamplingTool.h"
40
41 #ifndef NDEBUG
42 #include <stdio.h>
43 #endif
44
45 using namespace std;
46
47 namespace JSC {
48
49 #if !USE(JSVALUE32_64)
50
51 void JIT::emit_op_lshift(Instruction* currentInstruction)
52 {
53 unsigned result = currentInstruction[1].u.operand;
54 unsigned op1 = currentInstruction[2].u.operand;
55 unsigned op2 = currentInstruction[3].u.operand;
56
57 emitGetVirtualRegisters(op1, regT0, op2, regT2);
58 // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
59 emitJumpSlowCaseIfNotImmediateInteger(regT0);
60 emitJumpSlowCaseIfNotImmediateInteger(regT2);
61 emitFastArithImmToInt(regT0);
62 emitFastArithImmToInt(regT2);
63 lshift32(regT2, regT0);
64 #if USE(JSVALUE32)
65 addSlowCase(branchAdd32(Overflow, regT0, regT0));
66 signExtend32ToPtr(regT0, regT0);
67 #endif
68 emitFastArithReTagImmediate(regT0, regT0);
69 emitPutVirtualRegister(result);
70 }
71
72 void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
73 {
74 unsigned result = currentInstruction[1].u.operand;
75 unsigned op1 = currentInstruction[2].u.operand;
76 unsigned op2 = currentInstruction[3].u.operand;
77
78 #if USE(JSVALUE64)
79 UNUSED_PARAM(op1);
80 UNUSED_PARAM(op2);
81 linkSlowCase(iter);
82 linkSlowCase(iter);
83 #else
84 // If we are limited to 32-bit immediates there is a third slow case, which required the operands to have been reloaded.
85 Jump notImm1 = getSlowCase(iter);
86 Jump notImm2 = getSlowCase(iter);
87 linkSlowCase(iter);
88 emitGetVirtualRegisters(op1, regT0, op2, regT2);
89 notImm1.link(this);
90 notImm2.link(this);
91 #endif
92 JITStubCall stubCall(this, cti_op_lshift);
93 stubCall.addArgument(regT0);
94 stubCall.addArgument(regT2);
95 stubCall.call(result);
96 }
97
98 void JIT::emit_op_rshift(Instruction* currentInstruction)
99 {
100 unsigned result = currentInstruction[1].u.operand;
101 unsigned op1 = currentInstruction[2].u.operand;
102 unsigned op2 = currentInstruction[3].u.operand;
103
104 if (isOperandConstantImmediateInt(op2)) {
105 // isOperandConstantImmediateInt(op2) => 1 SlowCase
106 emitGetVirtualRegister(op1, regT0);
107 emitJumpSlowCaseIfNotImmediateInteger(regT0);
108 // Mask with 0x1f as per ecma-262 11.7.2 step 7.
109 rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
110 } else {
111 emitGetVirtualRegisters(op1, regT0, op2, regT2);
112 if (supportsFloatingPointTruncate()) {
113 Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
114 #if USE(JSVALUE64)
115 // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
116 addSlowCase(emitJumpIfNotImmediateNumber(regT0));
117 addPtr(tagTypeNumberRegister, regT0);
118 movePtrToDouble(regT0, fpRegT0);
119 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
120 #else
121 // supportsFloatingPoint() && !USE(JSVALUE64) => 5 SlowCases (of which 1 IfNotJSCell)
122 emitJumpSlowCaseIfNotJSCell(regT0, op1);
123 addSlowCase(checkStructure(regT0, m_globalData->numberStructure.get()));
124 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
125 addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
126 addSlowCase(branchAdd32(Overflow, regT0, regT0));
127 #endif
128 lhsIsInt.link(this);
129 emitJumpSlowCaseIfNotImmediateInteger(regT2);
130 } else {
131 // !supportsFloatingPoint() => 2 SlowCases
132 emitJumpSlowCaseIfNotImmediateInteger(regT0);
133 emitJumpSlowCaseIfNotImmediateInteger(regT2);
134 }
135 emitFastArithImmToInt(regT2);
136 rshift32(regT2, regT0);
137 #if USE(JSVALUE32)
138 signExtend32ToPtr(regT0, regT0);
139 #endif
140 }
141 #if USE(JSVALUE64)
142 emitFastArithIntToImmNoCheck(regT0, regT0);
143 #else
144 orPtr(Imm32(JSImmediate::TagTypeNumber), regT0);
145 #endif
146 emitPutVirtualRegister(result);
147 }
148
149 void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
150 {
151 unsigned result = currentInstruction[1].u.operand;
152 unsigned op1 = currentInstruction[2].u.operand;
153 unsigned op2 = currentInstruction[3].u.operand;
154
155 JITStubCall stubCall(this, cti_op_rshift);
156
157 if (isOperandConstantImmediateInt(op2)) {
158 linkSlowCase(iter);
159 stubCall.addArgument(regT0);
160 stubCall.addArgument(op2, regT2);
161 } else {
162 if (supportsFloatingPointTruncate()) {
163 #if USE(JSVALUE64)
164 linkSlowCase(iter);
165 linkSlowCase(iter);
166 linkSlowCase(iter);
167 #else
168 linkSlowCaseIfNotJSCell(iter, op1);
169 linkSlowCase(iter);
170 linkSlowCase(iter);
171 linkSlowCase(iter);
172 linkSlowCase(iter);
173 #endif
174 // We're reloading op1 to regT0 as we can no longer guarantee that
175 // we have not munged the operand. It may have already been shifted
176 // correctly, but it still will not have been tagged.
177 stubCall.addArgument(op1, regT0);
178 stubCall.addArgument(regT2);
179 } else {
180 linkSlowCase(iter);
181 linkSlowCase(iter);
182 stubCall.addArgument(regT0);
183 stubCall.addArgument(regT2);
184 }
185 }
186
187 stubCall.call(result);
188 }
189
190 void JIT::emit_op_urshift(Instruction* currentInstruction)
191 {
192 unsigned dst = currentInstruction[1].u.operand;
193 unsigned op1 = currentInstruction[2].u.operand;
194 unsigned op2 = currentInstruction[3].u.operand;
195
196 // Slow case of urshift makes assumptions about what registers hold the
197 // shift arguments, so any changes must be updated there as well.
198 if (isOperandConstantImmediateInt(op2)) {
199 emitGetVirtualRegister(op1, regT0);
200 emitJumpSlowCaseIfNotImmediateInteger(regT0);
201 emitFastArithImmToInt(regT0);
202 int shift = getConstantOperand(op2).asInt32();
203 if (shift)
204 urshift32(Imm32(shift & 0x1f), regT0);
205 // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
206 // a toUint conversion, which can result in a value we can represent
207 // as an immediate int.
208 if (shift < 0 || !(shift & 31))
209 addSlowCase(branch32(LessThan, regT0, Imm32(0)));
210 #if USE(JSVALUE32)
211 addSlowCase(branchAdd32(Overflow, regT0, regT0));
212 signExtend32ToPtr(regT0, regT0);
213 #endif
214 emitFastArithReTagImmediate(regT0, regT0);
215 emitPutVirtualRegister(dst, regT0);
216 return;
217 }
218 emitGetVirtualRegisters(op1, regT0, op2, regT1);
219 if (!isOperandConstantImmediateInt(op1))
220 emitJumpSlowCaseIfNotImmediateInteger(regT0);
221 emitJumpSlowCaseIfNotImmediateInteger(regT1);
222 emitFastArithImmToInt(regT0);
223 emitFastArithImmToInt(regT1);
224 urshift32(regT1, regT0);
225 addSlowCase(branch32(LessThan, regT0, Imm32(0)));
226 #if USE(JSVALUE32)
227 addSlowCase(branchAdd32(Overflow, regT0, regT0));
228 signExtend32ToPtr(regT0, regT0);
229 #endif
230 emitFastArithReTagImmediate(regT0, regT0);
231 emitPutVirtualRegister(dst, regT0);
232 }
233
234 void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
235 {
236 unsigned dst = currentInstruction[1].u.operand;
237 unsigned op1 = currentInstruction[2].u.operand;
238 unsigned op2 = currentInstruction[3].u.operand;
239 if (isOperandConstantImmediateInt(op2)) {
240 int shift = getConstantOperand(op2).asInt32();
241 // op1 = regT0
242 linkSlowCase(iter); // int32 check
243 #if USE(JSVALUE64)
244 if (supportsFloatingPointTruncate()) {
245 JumpList failures;
246 failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
247 addPtr(tagTypeNumberRegister, regT0);
248 movePtrToDouble(regT0, fpRegT0);
249 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
250 if (shift)
251 urshift32(Imm32(shift & 0x1f), regT0);
252 if (shift < 0 || !(shift & 31))
253 failures.append(branch32(LessThan, regT0, Imm32(0)));
254 emitFastArithReTagImmediate(regT0, regT0);
255 emitPutVirtualRegister(dst, regT0);
256 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
257 failures.link(this);
258 }
259 #endif // JSVALUE64
260 if (shift < 0 || !(shift & 31))
261 linkSlowCase(iter); // failed to box in hot path
262 #if USE(JSVALUE32)
263 linkSlowCase(iter); // Couldn't box result
264 #endif
265 } else {
266 // op1 = regT0
267 // op2 = regT1
268 if (!isOperandConstantImmediateInt(op1)) {
269 linkSlowCase(iter); // int32 check -- op1 is not an int
270 #if USE(JSVALUE64)
271 if (supportsFloatingPointTruncate()) {
272 JumpList failures;
273 failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
274 addPtr(tagTypeNumberRegister, regT0);
275 movePtrToDouble(regT0, fpRegT0);
276 failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
277 failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
278 emitFastArithImmToInt(regT1);
279 urshift32(regT1, regT0);
280 failures.append(branch32(LessThan, regT0, Imm32(0)));
281 emitFastArithReTagImmediate(regT0, regT0);
282 emitPutVirtualRegister(dst, regT0);
283 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
284 failures.link(this);
285 }
286 #endif
287 }
288
289 linkSlowCase(iter); // int32 check - op2 is not an int
290 linkSlowCase(iter); // Can't represent unsigned result as an immediate
291 #if USE(JSVALUE32)
292 linkSlowCase(iter); // Couldn't box result
293 #endif
294 }
295
296 JITStubCall stubCall(this, cti_op_urshift);
297 stubCall.addArgument(op1, regT0);
298 stubCall.addArgument(op2, regT1);
299 stubCall.call(dst);
300 }
301
302 void JIT::emit_op_jnless(Instruction* currentInstruction)
303 {
304 unsigned op1 = currentInstruction[1].u.operand;
305 unsigned op2 = currentInstruction[2].u.operand;
306 unsigned target = currentInstruction[3].u.operand;
307
308 // We generate inline code for the following cases in the fast path:
309 // - int immediate to constant int immediate
310 // - constant int immediate to int immediate
311 // - int immediate to int immediate
312
313 if (isOperandConstantImmediateChar(op1)) {
314 emitGetVirtualRegister(op2, regT0);
315 addSlowCase(emitJumpIfNotJSCell(regT0));
316 JumpList failures;
317 emitLoadCharacterString(regT0, regT0, failures);
318 addSlowCase(failures);
319 addJump(branch32(LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
320 return;
321 }
322 if (isOperandConstantImmediateChar(op2)) {
323 emitGetVirtualRegister(op1, regT0);
324 addSlowCase(emitJumpIfNotJSCell(regT0));
325 JumpList failures;
326 emitLoadCharacterString(regT0, regT0, failures);
327 addSlowCase(failures);
328 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
329 return;
330 }
331 if (isOperandConstantImmediateInt(op2)) {
332 emitGetVirtualRegister(op1, regT0);
333 emitJumpSlowCaseIfNotImmediateInteger(regT0);
334 #if USE(JSVALUE64)
335 int32_t op2imm = getConstantOperandImmediateInt(op2);
336 #else
337 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
338 #endif
339 addJump(branch32(GreaterThanOrEqual, regT0, Imm32(op2imm)), target);
340 } else if (isOperandConstantImmediateInt(op1)) {
341 emitGetVirtualRegister(op2, regT1);
342 emitJumpSlowCaseIfNotImmediateInteger(regT1);
343 #if USE(JSVALUE64)
344 int32_t op1imm = getConstantOperandImmediateInt(op1);
345 #else
346 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
347 #endif
348 addJump(branch32(LessThanOrEqual, regT1, Imm32(op1imm)), target);
349 } else {
350 emitGetVirtualRegisters(op1, regT0, op2, regT1);
351 emitJumpSlowCaseIfNotImmediateInteger(regT0);
352 emitJumpSlowCaseIfNotImmediateInteger(regT1);
353
354 addJump(branch32(GreaterThanOrEqual, regT0, regT1), target);
355 }
356 }
357
358 void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
359 {
360 unsigned op1 = currentInstruction[1].u.operand;
361 unsigned op2 = currentInstruction[2].u.operand;
362 unsigned target = currentInstruction[3].u.operand;
363
364 // We generate inline code for the following cases in the slow path:
365 // - floating-point number to constant int immediate
366 // - constant int immediate to floating-point number
367 // - floating-point number to floating-point number.
368 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
369 linkSlowCase(iter);
370 linkSlowCase(iter);
371 linkSlowCase(iter);
372 linkSlowCase(iter);
373 JITStubCall stubCall(this, cti_op_jless);
374 stubCall.addArgument(op1, regT0);
375 stubCall.addArgument(op2, regT1);
376 stubCall.call();
377 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
378 return;
379 }
380
381 if (isOperandConstantImmediateInt(op2)) {
382 linkSlowCase(iter);
383
384 if (supportsFloatingPoint()) {
385 #if USE(JSVALUE64)
386 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
387 addPtr(tagTypeNumberRegister, regT0);
388 movePtrToDouble(regT0, fpRegT0);
389 #else
390 Jump fail1;
391 if (!m_codeBlock->isKnownNotImmediate(op1))
392 fail1 = emitJumpIfNotJSCell(regT0);
393
394 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
395 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
396 #endif
397
398 int32_t op2imm = getConstantOperand(op2).asInt32();;
399
400 move(Imm32(op2imm), regT1);
401 convertInt32ToDouble(regT1, fpRegT1);
402
403 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
404
405 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
406
407 #if USE(JSVALUE64)
408 fail1.link(this);
409 #else
410 if (!m_codeBlock->isKnownNotImmediate(op1))
411 fail1.link(this);
412 fail2.link(this);
413 #endif
414 }
415
416 JITStubCall stubCall(this, cti_op_jless);
417 stubCall.addArgument(regT0);
418 stubCall.addArgument(op2, regT2);
419 stubCall.call();
420 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
421
422 } else if (isOperandConstantImmediateInt(op1)) {
423 linkSlowCase(iter);
424
425 if (supportsFloatingPoint()) {
426 #if USE(JSVALUE64)
427 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
428 addPtr(tagTypeNumberRegister, regT1);
429 movePtrToDouble(regT1, fpRegT1);
430 #else
431 Jump fail1;
432 if (!m_codeBlock->isKnownNotImmediate(op2))
433 fail1 = emitJumpIfNotJSCell(regT1);
434
435 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
436 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
437 #endif
438
439 int32_t op1imm = getConstantOperand(op1).asInt32();;
440
441 move(Imm32(op1imm), regT0);
442 convertInt32ToDouble(regT0, fpRegT0);
443
444 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
445
446 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
447
448 #if USE(JSVALUE64)
449 fail1.link(this);
450 #else
451 if (!m_codeBlock->isKnownNotImmediate(op2))
452 fail1.link(this);
453 fail2.link(this);
454 #endif
455 }
456
457 JITStubCall stubCall(this, cti_op_jless);
458 stubCall.addArgument(op1, regT2);
459 stubCall.addArgument(regT1);
460 stubCall.call();
461 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
462
463 } else {
464 linkSlowCase(iter);
465
466 if (supportsFloatingPoint()) {
467 #if USE(JSVALUE64)
468 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
469 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
470 Jump fail3 = emitJumpIfImmediateInteger(regT1);
471 addPtr(tagTypeNumberRegister, regT0);
472 addPtr(tagTypeNumberRegister, regT1);
473 movePtrToDouble(regT0, fpRegT0);
474 movePtrToDouble(regT1, fpRegT1);
475 #else
476 Jump fail1;
477 if (!m_codeBlock->isKnownNotImmediate(op1))
478 fail1 = emitJumpIfNotJSCell(regT0);
479
480 Jump fail2;
481 if (!m_codeBlock->isKnownNotImmediate(op2))
482 fail2 = emitJumpIfNotJSCell(regT1);
483
484 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
485 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
486 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
487 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
488 #endif
489
490 emitJumpSlowToHot(branchDouble(DoubleLessThanOrEqualOrUnordered, fpRegT1, fpRegT0), target);
491
492 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
493
494 #if USE(JSVALUE64)
495 fail1.link(this);
496 fail2.link(this);
497 fail3.link(this);
498 #else
499 if (!m_codeBlock->isKnownNotImmediate(op1))
500 fail1.link(this);
501 if (!m_codeBlock->isKnownNotImmediate(op2))
502 fail2.link(this);
503 fail3.link(this);
504 fail4.link(this);
505 #endif
506 }
507
508 linkSlowCase(iter);
509 JITStubCall stubCall(this, cti_op_jless);
510 stubCall.addArgument(regT0);
511 stubCall.addArgument(regT1);
512 stubCall.call();
513 emitJumpSlowToHot(branchTest32(Zero, regT0), target);
514 }
515 }
516
517 void JIT::emit_op_jless(Instruction* currentInstruction)
518 {
519 unsigned op1 = currentInstruction[1].u.operand;
520 unsigned op2 = currentInstruction[2].u.operand;
521 unsigned target = currentInstruction[3].u.operand;
522
523 // We generate inline code for the following cases in the fast path:
524 // - int immediate to constant int immediate
525 // - constant int immediate to int immediate
526 // - int immediate to int immediate
527
528 if (isOperandConstantImmediateChar(op1)) {
529 emitGetVirtualRegister(op2, regT0);
530 addSlowCase(emitJumpIfNotJSCell(regT0));
531 JumpList failures;
532 emitLoadCharacterString(regT0, regT0, failures);
533 addSlowCase(failures);
534 addJump(branch32(GreaterThan, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
535 return;
536 }
537 if (isOperandConstantImmediateChar(op2)) {
538 emitGetVirtualRegister(op1, regT0);
539 addSlowCase(emitJumpIfNotJSCell(regT0));
540 JumpList failures;
541 emitLoadCharacterString(regT0, regT0, failures);
542 addSlowCase(failures);
543 addJump(branch32(LessThan, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
544 return;
545 }
546 if (isOperandConstantImmediateInt(op2)) {
547 emitGetVirtualRegister(op1, regT0);
548 emitJumpSlowCaseIfNotImmediateInteger(regT0);
549 #if USE(JSVALUE64)
550 int32_t op2imm = getConstantOperandImmediateInt(op2);
551 #else
552 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
553 #endif
554 addJump(branch32(LessThan, regT0, Imm32(op2imm)), target);
555 } else if (isOperandConstantImmediateInt(op1)) {
556 emitGetVirtualRegister(op2, regT1);
557 emitJumpSlowCaseIfNotImmediateInteger(regT1);
558 #if USE(JSVALUE64)
559 int32_t op1imm = getConstantOperandImmediateInt(op1);
560 #else
561 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
562 #endif
563 addJump(branch32(GreaterThan, regT1, Imm32(op1imm)), target);
564 } else {
565 emitGetVirtualRegisters(op1, regT0, op2, regT1);
566 emitJumpSlowCaseIfNotImmediateInteger(regT0);
567 emitJumpSlowCaseIfNotImmediateInteger(regT1);
568
569 addJump(branch32(LessThan, regT0, regT1), target);
570 }
571 }
572
573 void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
574 {
575 unsigned op1 = currentInstruction[1].u.operand;
576 unsigned op2 = currentInstruction[2].u.operand;
577 unsigned target = currentInstruction[3].u.operand;
578
579 // We generate inline code for the following cases in the slow path:
580 // - floating-point number to constant int immediate
581 // - constant int immediate to floating-point number
582 // - floating-point number to floating-point number.
583 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
584 linkSlowCase(iter);
585 linkSlowCase(iter);
586 linkSlowCase(iter);
587 linkSlowCase(iter);
588 JITStubCall stubCall(this, cti_op_jless);
589 stubCall.addArgument(op1, regT0);
590 stubCall.addArgument(op2, regT1);
591 stubCall.call();
592 emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
593 return;
594 }
595
596 if (isOperandConstantImmediateInt(op2)) {
597 linkSlowCase(iter);
598
599 if (supportsFloatingPoint()) {
600 #if USE(JSVALUE64)
601 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
602 addPtr(tagTypeNumberRegister, regT0);
603 movePtrToDouble(regT0, fpRegT0);
604 #else
605 Jump fail1;
606 if (!m_codeBlock->isKnownNotImmediate(op1))
607 fail1 = emitJumpIfNotJSCell(regT0);
608
609 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
610 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
611 #endif
612
613 int32_t op2imm = getConstantOperand(op2).asInt32();
614
615 move(Imm32(op2imm), regT1);
616 convertInt32ToDouble(regT1, fpRegT1);
617
618 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
619
620 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
621
622 #if USE(JSVALUE64)
623 fail1.link(this);
624 #else
625 if (!m_codeBlock->isKnownNotImmediate(op1))
626 fail1.link(this);
627 fail2.link(this);
628 #endif
629 }
630
631 JITStubCall stubCall(this, cti_op_jless);
632 stubCall.addArgument(regT0);
633 stubCall.addArgument(op2, regT2);
634 stubCall.call();
635 emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
636
637 } else if (isOperandConstantImmediateInt(op1)) {
638 linkSlowCase(iter);
639
640 if (supportsFloatingPoint()) {
641 #if USE(JSVALUE64)
642 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
643 addPtr(tagTypeNumberRegister, regT1);
644 movePtrToDouble(regT1, fpRegT1);
645 #else
646 Jump fail1;
647 if (!m_codeBlock->isKnownNotImmediate(op2))
648 fail1 = emitJumpIfNotJSCell(regT1);
649
650 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
651 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
652 #endif
653
654 int32_t op1imm = getConstantOperand(op1).asInt32();
655
656 move(Imm32(op1imm), regT0);
657 convertInt32ToDouble(regT0, fpRegT0);
658
659 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
660
661 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
662
663 #if USE(JSVALUE64)
664 fail1.link(this);
665 #else
666 if (!m_codeBlock->isKnownNotImmediate(op2))
667 fail1.link(this);
668 fail2.link(this);
669 #endif
670 }
671
672 JITStubCall stubCall(this, cti_op_jless);
673 stubCall.addArgument(op1, regT2);
674 stubCall.addArgument(regT1);
675 stubCall.call();
676 emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
677
678 } else {
679 linkSlowCase(iter);
680
681 if (supportsFloatingPoint()) {
682 #if USE(JSVALUE64)
683 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
684 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
685 Jump fail3 = emitJumpIfImmediateInteger(regT1);
686 addPtr(tagTypeNumberRegister, regT0);
687 addPtr(tagTypeNumberRegister, regT1);
688 movePtrToDouble(regT0, fpRegT0);
689 movePtrToDouble(regT1, fpRegT1);
690 #else
691 Jump fail1;
692 if (!m_codeBlock->isKnownNotImmediate(op1))
693 fail1 = emitJumpIfNotJSCell(regT0);
694
695 Jump fail2;
696 if (!m_codeBlock->isKnownNotImmediate(op2))
697 fail2 = emitJumpIfNotJSCell(regT1);
698
699 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
700 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
701 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
702 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
703 #endif
704
705 emitJumpSlowToHot(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), target);
706
707 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnless));
708
709 #if USE(JSVALUE64)
710 fail1.link(this);
711 fail2.link(this);
712 fail3.link(this);
713 #else
714 if (!m_codeBlock->isKnownNotImmediate(op1))
715 fail1.link(this);
716 if (!m_codeBlock->isKnownNotImmediate(op2))
717 fail2.link(this);
718 fail3.link(this);
719 fail4.link(this);
720 #endif
721 }
722
723 linkSlowCase(iter);
724 JITStubCall stubCall(this, cti_op_jless);
725 stubCall.addArgument(regT0);
726 stubCall.addArgument(regT1);
727 stubCall.call();
728 emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
729 }
730 }
731
732 void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert)
733 {
734 unsigned op1 = currentInstruction[1].u.operand;
735 unsigned op2 = currentInstruction[2].u.operand;
736 unsigned target = currentInstruction[3].u.operand;
737
738 // We generate inline code for the following cases in the fast path:
739 // - int immediate to constant int immediate
740 // - constant int immediate to int immediate
741 // - int immediate to int immediate
742
743 if (isOperandConstantImmediateChar(op1)) {
744 emitGetVirtualRegister(op2, regT0);
745 addSlowCase(emitJumpIfNotJSCell(regT0));
746 JumpList failures;
747 emitLoadCharacterString(regT0, regT0, failures);
748 addSlowCase(failures);
749 addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
750 return;
751 }
752 if (isOperandConstantImmediateChar(op2)) {
753 emitGetVirtualRegister(op1, regT0);
754 addSlowCase(emitJumpIfNotJSCell(regT0));
755 JumpList failures;
756 emitLoadCharacterString(regT0, regT0, failures);
757 addSlowCase(failures);
758 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
759 return;
760 }
761 if (isOperandConstantImmediateInt(op2)) {
762 emitGetVirtualRegister(op1, regT0);
763 emitJumpSlowCaseIfNotImmediateInteger(regT0);
764 #if USE(JSVALUE64)
765 int32_t op2imm = getConstantOperandImmediateInt(op2);
766 #else
767 int32_t op2imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)));
768 #endif
769 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(op2imm)), target);
770 } else if (isOperandConstantImmediateInt(op1)) {
771 emitGetVirtualRegister(op2, regT1);
772 emitJumpSlowCaseIfNotImmediateInteger(regT1);
773 #if USE(JSVALUE64)
774 int32_t op1imm = getConstantOperandImmediateInt(op1);
775 #else
776 int32_t op1imm = static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)));
777 #endif
778 addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT1, Imm32(op1imm)), target);
779 } else {
780 emitGetVirtualRegisters(op1, regT0, op2, regT1);
781 emitJumpSlowCaseIfNotImmediateInteger(regT0);
782 emitJumpSlowCaseIfNotImmediateInteger(regT1);
783
784 addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT1), target);
785 }
786 }
787
788 void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert)
789 {
790 unsigned op1 = currentInstruction[1].u.operand;
791 unsigned op2 = currentInstruction[2].u.operand;
792 unsigned target = currentInstruction[3].u.operand;
793
794 // We generate inline code for the following cases in the slow path:
795 // - floating-point number to constant int immediate
796 // - constant int immediate to floating-point number
797 // - floating-point number to floating-point number.
798
799 if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
800 linkSlowCase(iter);
801 linkSlowCase(iter);
802 linkSlowCase(iter);
803 linkSlowCase(iter);
804 JITStubCall stubCall(this, cti_op_jlesseq);
805 stubCall.addArgument(op1, regT0);
806 stubCall.addArgument(op2, regT1);
807 stubCall.call();
808 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
809 return;
810 }
811
812 if (isOperandConstantImmediateInt(op2)) {
813 linkSlowCase(iter);
814
815 if (supportsFloatingPoint()) {
816 #if USE(JSVALUE64)
817 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
818 addPtr(tagTypeNumberRegister, regT0);
819 movePtrToDouble(regT0, fpRegT0);
820 #else
821 Jump fail1;
822 if (!m_codeBlock->isKnownNotImmediate(op1))
823 fail1 = emitJumpIfNotJSCell(regT0);
824
825 Jump fail2 = checkStructure(regT0, m_globalData->numberStructure.get());
826 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
827 #endif
828
829 int32_t op2imm = getConstantOperand(op2).asInt32();;
830
831 move(Imm32(op2imm), regT1);
832 convertInt32ToDouble(regT1, fpRegT1);
833
834 emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
835
836 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
837
838 #if USE(JSVALUE64)
839 fail1.link(this);
840 #else
841 if (!m_codeBlock->isKnownNotImmediate(op1))
842 fail1.link(this);
843 fail2.link(this);
844 #endif
845 }
846
847 JITStubCall stubCall(this, cti_op_jlesseq);
848 stubCall.addArgument(regT0);
849 stubCall.addArgument(op2, regT2);
850 stubCall.call();
851 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
852
853 } else if (isOperandConstantImmediateInt(op1)) {
854 linkSlowCase(iter);
855
856 if (supportsFloatingPoint()) {
857 #if USE(JSVALUE64)
858 Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
859 addPtr(tagTypeNumberRegister, regT1);
860 movePtrToDouble(regT1, fpRegT1);
861 #else
862 Jump fail1;
863 if (!m_codeBlock->isKnownNotImmediate(op2))
864 fail1 = emitJumpIfNotJSCell(regT1);
865
866 Jump fail2 = checkStructure(regT1, m_globalData->numberStructure.get());
867 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
868 #endif
869
870 int32_t op1imm = getConstantOperand(op1).asInt32();;
871
872 move(Imm32(op1imm), regT0);
873 convertInt32ToDouble(regT0, fpRegT0);
874
875 emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
876
877 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
878
879 #if USE(JSVALUE64)
880 fail1.link(this);
881 #else
882 if (!m_codeBlock->isKnownNotImmediate(op2))
883 fail1.link(this);
884 fail2.link(this);
885 #endif
886 }
887
888 JITStubCall stubCall(this, cti_op_jlesseq);
889 stubCall.addArgument(op1, regT2);
890 stubCall.addArgument(regT1);
891 stubCall.call();
892 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
893
894 } else {
895 linkSlowCase(iter);
896
897 if (supportsFloatingPoint()) {
898 #if USE(JSVALUE64)
899 Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
900 Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
901 Jump fail3 = emitJumpIfImmediateInteger(regT1);
902 addPtr(tagTypeNumberRegister, regT0);
903 addPtr(tagTypeNumberRegister, regT1);
904 movePtrToDouble(regT0, fpRegT0);
905 movePtrToDouble(regT1, fpRegT1);
906 #else
907 Jump fail1;
908 if (!m_codeBlock->isKnownNotImmediate(op1))
909 fail1 = emitJumpIfNotJSCell(regT0);
910
911 Jump fail2;
912 if (!m_codeBlock->isKnownNotImmediate(op2))
913 fail2 = emitJumpIfNotJSCell(regT1);
914
915 Jump fail3 = checkStructure(regT0, m_globalData->numberStructure.get());
916 Jump fail4 = checkStructure(regT1, m_globalData->numberStructure.get());
917 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
918 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
919 #endif
920
921 emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target);
922
923 emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq));
924
925 #if USE(JSVALUE64)
926 fail1.link(this);
927 fail2.link(this);
928 fail3.link(this);
929 #else
930 if (!m_codeBlock->isKnownNotImmediate(op1))
931 fail1.link(this);
932 if (!m_codeBlock->isKnownNotImmediate(op2))
933 fail2.link(this);
934 fail3.link(this);
935 fail4.link(this);
936 #endif
937 }
938
939 linkSlowCase(iter);
940 JITStubCall stubCall(this, cti_op_jlesseq);
941 stubCall.addArgument(regT0);
942 stubCall.addArgument(regT1);
943 stubCall.call();
944 emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
945 }
946 }
947
948 void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
949 {
950 emit_op_jlesseq(currentInstruction, true);
951 }
952
953 void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
954 {
955 emitSlow_op_jlesseq(currentInstruction, iter, true);
956 }
957
958 void JIT::emit_op_bitand(Instruction* currentInstruction)
959 {
960 unsigned result = currentInstruction[1].u.operand;
961 unsigned op1 = currentInstruction[2].u.operand;
962 unsigned op2 = currentInstruction[3].u.operand;
963
964 if (isOperandConstantImmediateInt(op1)) {
965 emitGetVirtualRegister(op2, regT0);
966 emitJumpSlowCaseIfNotImmediateInteger(regT0);
967 #if USE(JSVALUE64)
968 int32_t imm = getConstantOperandImmediateInt(op1);
969 andPtr(Imm32(imm), regT0);
970 if (imm >= 0)
971 emitFastArithIntToImmNoCheck(regT0, regT0);
972 #else
973 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op1)))), regT0);
974 #endif
975 } else if (isOperandConstantImmediateInt(op2)) {
976 emitGetVirtualRegister(op1, regT0);
977 emitJumpSlowCaseIfNotImmediateInteger(regT0);
978 #if USE(JSVALUE64)
979 int32_t imm = getConstantOperandImmediateInt(op2);
980 andPtr(Imm32(imm), regT0);
981 if (imm >= 0)
982 emitFastArithIntToImmNoCheck(regT0, regT0);
983 #else
984 andPtr(Imm32(static_cast<int32_t>(JSImmediate::rawValue(getConstantOperand(op2)))), regT0);
985 #endif
986 } else {
987 emitGetVirtualRegisters(op1, regT0, op2, regT1);
988 andPtr(regT1, regT0);
989 emitJumpSlowCaseIfNotImmediateInteger(regT0);
990 }
991 emitPutVirtualRegister(result);
992 }
993
994 void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
995 {
996 unsigned result = currentInstruction[1].u.operand;
997 unsigned op1 = currentInstruction[2].u.operand;
998 unsigned op2 = currentInstruction[3].u.operand;
999
1000 linkSlowCase(iter);
1001 if (isOperandConstantImmediateInt(op1)) {
1002 JITStubCall stubCall(this, cti_op_bitand);
1003 stubCall.addArgument(op1, regT2);
1004 stubCall.addArgument(regT0);
1005 stubCall.call(result);
1006 } else if (isOperandConstantImmediateInt(op2)) {
1007 JITStubCall stubCall(this, cti_op_bitand);
1008 stubCall.addArgument(regT0);
1009 stubCall.addArgument(op2, regT2);
1010 stubCall.call(result);
1011 } else {
1012 JITStubCall stubCall(this, cti_op_bitand);
1013 stubCall.addArgument(op1, regT2);
1014 stubCall.addArgument(regT1);
1015 stubCall.call(result);
1016 }
1017 }
1018
1019 void JIT::emit_op_post_inc(Instruction* currentInstruction)
1020 {
1021 unsigned result = currentInstruction[1].u.operand;
1022 unsigned srcDst = currentInstruction[2].u.operand;
1023
1024 emitGetVirtualRegister(srcDst, regT0);
1025 move(regT0, regT1);
1026 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1027 #if USE(JSVALUE64)
1028 addSlowCase(branchAdd32(Overflow, Imm32(1), regT1));
1029 emitFastArithIntToImmNoCheck(regT1, regT1);
1030 #else
1031 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
1032 signExtend32ToPtr(regT1, regT1);
1033 #endif
1034 emitPutVirtualRegister(srcDst, regT1);
1035 emitPutVirtualRegister(result);
1036 }
1037
1038 void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1039 {
1040 unsigned result = currentInstruction[1].u.operand;
1041 unsigned srcDst = currentInstruction[2].u.operand;
1042
1043 linkSlowCase(iter);
1044 linkSlowCase(iter);
1045 JITStubCall stubCall(this, cti_op_post_inc);
1046 stubCall.addArgument(regT0);
1047 stubCall.addArgument(Imm32(srcDst));
1048 stubCall.call(result);
1049 }
1050
1051 void JIT::emit_op_post_dec(Instruction* currentInstruction)
1052 {
1053 unsigned result = currentInstruction[1].u.operand;
1054 unsigned srcDst = currentInstruction[2].u.operand;
1055
1056 emitGetVirtualRegister(srcDst, regT0);
1057 move(regT0, regT1);
1058 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1059 #if USE(JSVALUE64)
1060 addSlowCase(branchSub32(Zero, Imm32(1), regT1));
1061 emitFastArithIntToImmNoCheck(regT1, regT1);
1062 #else
1063 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT1));
1064 signExtend32ToPtr(regT1, regT1);
1065 #endif
1066 emitPutVirtualRegister(srcDst, regT1);
1067 emitPutVirtualRegister(result);
1068 }
1069
1070 void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1071 {
1072 unsigned result = currentInstruction[1].u.operand;
1073 unsigned srcDst = currentInstruction[2].u.operand;
1074
1075 linkSlowCase(iter);
1076 linkSlowCase(iter);
1077 JITStubCall stubCall(this, cti_op_post_dec);
1078 stubCall.addArgument(regT0);
1079 stubCall.addArgument(Imm32(srcDst));
1080 stubCall.call(result);
1081 }
1082
1083 void JIT::emit_op_pre_inc(Instruction* currentInstruction)
1084 {
1085 unsigned srcDst = currentInstruction[1].u.operand;
1086
1087 emitGetVirtualRegister(srcDst, regT0);
1088 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1089 #if USE(JSVALUE64)
1090 addSlowCase(branchAdd32(Overflow, Imm32(1), regT0));
1091 emitFastArithIntToImmNoCheck(regT0, regT0);
1092 #else
1093 addSlowCase(branchAdd32(Overflow, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
1094 signExtend32ToPtr(regT0, regT0);
1095 #endif
1096 emitPutVirtualRegister(srcDst);
1097 }
1098
1099 void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1100 {
1101 unsigned srcDst = currentInstruction[1].u.operand;
1102
1103 Jump notImm = getSlowCase(iter);
1104 linkSlowCase(iter);
1105 emitGetVirtualRegister(srcDst, regT0);
1106 notImm.link(this);
1107 JITStubCall stubCall(this, cti_op_pre_inc);
1108 stubCall.addArgument(regT0);
1109 stubCall.call(srcDst);
1110 }
1111
1112 void JIT::emit_op_pre_dec(Instruction* currentInstruction)
1113 {
1114 unsigned srcDst = currentInstruction[1].u.operand;
1115
1116 emitGetVirtualRegister(srcDst, regT0);
1117 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1118 #if USE(JSVALUE64)
1119 addSlowCase(branchSub32(Zero, Imm32(1), regT0));
1120 emitFastArithIntToImmNoCheck(regT0, regT0);
1121 #else
1122 addSlowCase(branchSub32(Zero, Imm32(1 << JSImmediate::IntegerPayloadShift), regT0));
1123 signExtend32ToPtr(regT0, regT0);
1124 #endif
1125 emitPutVirtualRegister(srcDst);
1126 }
1127
1128 void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1129 {
1130 unsigned srcDst = currentInstruction[1].u.operand;
1131
1132 Jump notImm = getSlowCase(iter);
1133 linkSlowCase(iter);
1134 emitGetVirtualRegister(srcDst, regT0);
1135 notImm.link(this);
1136 JITStubCall stubCall(this, cti_op_pre_dec);
1137 stubCall.addArgument(regT0);
1138 stubCall.call(srcDst);
1139 }
1140
1141 /* ------------------------------ BEGIN: OP_MOD ------------------------------ */
1142
1143 #if CPU(X86) || CPU(X86_64)
1144
1145 void JIT::emit_op_mod(Instruction* currentInstruction)
1146 {
1147 unsigned result = currentInstruction[1].u.operand;
1148 unsigned op1 = currentInstruction[2].u.operand;
1149 unsigned op2 = currentInstruction[3].u.operand;
1150
1151 emitGetVirtualRegisters(op1, X86Registers::eax, op2, X86Registers::ecx);
1152 emitJumpSlowCaseIfNotImmediateInteger(X86Registers::eax);
1153 emitJumpSlowCaseIfNotImmediateInteger(X86Registers::ecx);
1154 #if USE(JSVALUE64)
1155 addSlowCase(branchPtr(Equal, X86Registers::ecx, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0)))));
1156 m_assembler.cdq();
1157 m_assembler.idivl_r(X86Registers::ecx);
1158 #else
1159 emitFastArithDeTagImmediate(X86Registers::eax);
1160 addSlowCase(emitFastArithDeTagImmediateJumpIfZero(X86Registers::ecx));
1161 m_assembler.cdq();
1162 m_assembler.idivl_r(X86Registers::ecx);
1163 signExtend32ToPtr(X86Registers::edx, X86Registers::edx);
1164 #endif
1165 emitFastArithReTagImmediate(X86Registers::edx, X86Registers::eax);
1166 emitPutVirtualRegister(result);
1167 }
1168
1169 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1170 {
1171 unsigned result = currentInstruction[1].u.operand;
1172
1173 #if USE(JSVALUE64)
1174 linkSlowCase(iter);
1175 linkSlowCase(iter);
1176 linkSlowCase(iter);
1177 #else
1178 Jump notImm1 = getSlowCase(iter);
1179 Jump notImm2 = getSlowCase(iter);
1180 linkSlowCase(iter);
1181 emitFastArithReTagImmediate(X86Registers::eax, X86Registers::eax);
1182 emitFastArithReTagImmediate(X86Registers::ecx, X86Registers::ecx);
1183 notImm1.link(this);
1184 notImm2.link(this);
1185 #endif
1186 JITStubCall stubCall(this, cti_op_mod);
1187 stubCall.addArgument(X86Registers::eax);
1188 stubCall.addArgument(X86Registers::ecx);
1189 stubCall.call(result);
1190 }
1191
1192 #else // CPU(X86) || CPU(X86_64)
1193
1194 void JIT::emit_op_mod(Instruction* currentInstruction)
1195 {
1196 unsigned result = currentInstruction[1].u.operand;
1197 unsigned op1 = currentInstruction[2].u.operand;
1198 unsigned op2 = currentInstruction[3].u.operand;
1199
1200 #if ENABLE(JIT_OPTIMIZE_MOD)
1201 emitGetVirtualRegisters(op1, regT0, op2, regT2);
1202 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1203 emitJumpSlowCaseIfNotImmediateInteger(regT2);
1204
1205 addSlowCase(branch32(Equal, regT2, Imm32(1)));
1206
1207 emitNakedCall(m_globalData->jitStubs.ctiSoftModulo());
1208
1209 emitPutVirtualRegister(result, regT0);
1210 #else
1211 JITStubCall stubCall(this, cti_op_mod);
1212 stubCall.addArgument(op1, regT2);
1213 stubCall.addArgument(op2, regT2);
1214 stubCall.call(result);
1215 #endif
1216 }
1217
1218 void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1219 {
1220 #if ENABLE(JIT_OPTIMIZE_MOD)
1221 unsigned result = currentInstruction[1].u.operand;
1222 unsigned op1 = currentInstruction[2].u.operand;
1223 unsigned op2 = currentInstruction[3].u.operand;
1224 linkSlowCase(iter);
1225 linkSlowCase(iter);
1226 linkSlowCase(iter);
1227 JITStubCall stubCall(this, cti_op_mod);
1228 stubCall.addArgument(op1, regT2);
1229 stubCall.addArgument(op2, regT2);
1230 stubCall.call(result);
1231 #else
1232 ASSERT_NOT_REACHED();
1233 #endif
1234 }
1235
1236 #endif // CPU(X86) || CPU(X86_64)
1237
1238 /* ------------------------------ END: OP_MOD ------------------------------ */
1239
1240 #if USE(JSVALUE64)
1241
1242 /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
1243
1244 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
1245 {
1246 emitGetVirtualRegisters(op1, regT0, op2, regT1);
1247 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1248 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1249 if (opcodeID == op_add)
1250 addSlowCase(branchAdd32(Overflow, regT1, regT0));
1251 else if (opcodeID == op_sub)
1252 addSlowCase(branchSub32(Overflow, regT1, regT0));
1253 else {
1254 ASSERT(opcodeID == op_mul);
1255 addSlowCase(branchMul32(Overflow, regT1, regT0));
1256 addSlowCase(branchTest32(Zero, regT0));
1257 }
1258 emitFastArithIntToImmNoCheck(regT0, regT0);
1259 }
1260
1261 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
1262 {
1263 // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
1264 COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
1265
1266 Jump notImm1;
1267 Jump notImm2;
1268 if (op1HasImmediateIntFastCase) {
1269 notImm2 = getSlowCase(iter);
1270 } else if (op2HasImmediateIntFastCase) {
1271 notImm1 = getSlowCase(iter);
1272 } else {
1273 notImm1 = getSlowCase(iter);
1274 notImm2 = getSlowCase(iter);
1275 }
1276
1277 linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
1278 if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
1279 linkSlowCase(iter);
1280 emitGetVirtualRegister(op1, regT0);
1281
1282 Label stubFunctionCall(this);
1283 JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
1284 if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) {
1285 emitGetVirtualRegister(op1, regT0);
1286 emitGetVirtualRegister(op2, regT1);
1287 }
1288 stubCall.addArgument(regT0);
1289 stubCall.addArgument(regT1);
1290 stubCall.call(result);
1291 Jump end = jump();
1292
1293 if (op1HasImmediateIntFastCase) {
1294 notImm2.link(this);
1295 if (!types.second().definitelyIsNumber())
1296 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
1297 emitGetVirtualRegister(op1, regT1);
1298 convertInt32ToDouble(regT1, fpRegT1);
1299 addPtr(tagTypeNumberRegister, regT0);
1300 movePtrToDouble(regT0, fpRegT2);
1301 } else if (op2HasImmediateIntFastCase) {
1302 notImm1.link(this);
1303 if (!types.first().definitelyIsNumber())
1304 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
1305 emitGetVirtualRegister(op2, regT1);
1306 convertInt32ToDouble(regT1, fpRegT1);
1307 addPtr(tagTypeNumberRegister, regT0);
1308 movePtrToDouble(regT0, fpRegT2);
1309 } else {
1310 // if we get here, eax is not an int32, edx not yet checked.
1311 notImm1.link(this);
1312 if (!types.first().definitelyIsNumber())
1313 emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
1314 if (!types.second().definitelyIsNumber())
1315 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
1316 addPtr(tagTypeNumberRegister, regT0);
1317 movePtrToDouble(regT0, fpRegT1);
1318 Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
1319 convertInt32ToDouble(regT1, fpRegT2);
1320 Jump op2wasInteger = jump();
1321
1322 // if we get here, eax IS an int32, edx is not.
1323 notImm2.link(this);
1324 if (!types.second().definitelyIsNumber())
1325 emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
1326 convertInt32ToDouble(regT0, fpRegT1);
1327 op2isDouble.link(this);
1328 addPtr(tagTypeNumberRegister, regT1);
1329 movePtrToDouble(regT1, fpRegT2);
1330 op2wasInteger.link(this);
1331 }
1332
1333 if (opcodeID == op_add)
1334 addDouble(fpRegT2, fpRegT1);
1335 else if (opcodeID == op_sub)
1336 subDouble(fpRegT2, fpRegT1);
1337 else if (opcodeID == op_mul)
1338 mulDouble(fpRegT2, fpRegT1);
1339 else {
1340 ASSERT(opcodeID == op_div);
1341 divDouble(fpRegT2, fpRegT1);
1342 }
1343 moveDoubleToPtr(fpRegT1, regT0);
1344 subPtr(tagTypeNumberRegister, regT0);
1345 emitPutVirtualRegister(result, regT0);
1346
1347 end.link(this);
1348 }
1349
1350 void JIT::emit_op_add(Instruction* currentInstruction)
1351 {
1352 unsigned result = currentInstruction[1].u.operand;
1353 unsigned op1 = currentInstruction[2].u.operand;
1354 unsigned op2 = currentInstruction[3].u.operand;
1355 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1356
1357 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
1358 JITStubCall stubCall(this, cti_op_add);
1359 stubCall.addArgument(op1, regT2);
1360 stubCall.addArgument(op2, regT2);
1361 stubCall.call(result);
1362 return;
1363 }
1364
1365 if (isOperandConstantImmediateInt(op1)) {
1366 emitGetVirtualRegister(op2, regT0);
1367 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1368 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0));
1369 emitFastArithIntToImmNoCheck(regT0, regT0);
1370 } else if (isOperandConstantImmediateInt(op2)) {
1371 emitGetVirtualRegister(op1, regT0);
1372 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1373 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0));
1374 emitFastArithIntToImmNoCheck(regT0, regT0);
1375 } else
1376 compileBinaryArithOp(op_add, result, op1, op2, types);
1377
1378 emitPutVirtualRegister(result);
1379 }
1380
1381 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1382 {
1383 unsigned result = currentInstruction[1].u.operand;
1384 unsigned op1 = currentInstruction[2].u.operand;
1385 unsigned op2 = currentInstruction[3].u.operand;
1386 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1387
1388 if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
1389 return;
1390
1391 bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
1392 bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
1393 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
1394 }
1395
1396 void JIT::emit_op_mul(Instruction* currentInstruction)
1397 {
1398 unsigned result = currentInstruction[1].u.operand;
1399 unsigned op1 = currentInstruction[2].u.operand;
1400 unsigned op2 = currentInstruction[3].u.operand;
1401 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1402
1403 // For now, only plant a fast int case if the constant operand is greater than zero.
1404 int32_t value;
1405 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
1406 emitGetVirtualRegister(op2, regT0);
1407 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1408 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
1409 emitFastArithReTagImmediate(regT0, regT0);
1410 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
1411 emitGetVirtualRegister(op1, regT0);
1412 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1413 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
1414 emitFastArithReTagImmediate(regT0, regT0);
1415 } else
1416 compileBinaryArithOp(op_mul, result, op1, op2, types);
1417
1418 emitPutVirtualRegister(result);
1419 }
1420
1421 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1422 {
1423 unsigned result = currentInstruction[1].u.operand;
1424 unsigned op1 = currentInstruction[2].u.operand;
1425 unsigned op2 = currentInstruction[3].u.operand;
1426 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1427
1428 bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
1429 bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
1430 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand), op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
1431 }
1432
1433 void JIT::emit_op_div(Instruction* currentInstruction)
1434 {
1435 unsigned dst = currentInstruction[1].u.operand;
1436 unsigned op1 = currentInstruction[2].u.operand;
1437 unsigned op2 = currentInstruction[3].u.operand;
1438 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1439
1440 if (isOperandConstantImmediateDouble(op1)) {
1441 emitGetVirtualRegister(op1, regT0);
1442 addPtr(tagTypeNumberRegister, regT0);
1443 movePtrToDouble(regT0, fpRegT0);
1444 } else if (isOperandConstantImmediateInt(op1)) {
1445 emitLoadInt32ToDouble(op1, fpRegT0);
1446 } else {
1447 emitGetVirtualRegister(op1, regT0);
1448 if (!types.first().definitelyIsNumber())
1449 emitJumpSlowCaseIfNotImmediateNumber(regT0);
1450 Jump notInt = emitJumpIfNotImmediateInteger(regT0);
1451 convertInt32ToDouble(regT0, fpRegT0);
1452 Jump skipDoubleLoad = jump();
1453 notInt.link(this);
1454 addPtr(tagTypeNumberRegister, regT0);
1455 movePtrToDouble(regT0, fpRegT0);
1456 skipDoubleLoad.link(this);
1457 }
1458
1459 if (isOperandConstantImmediateDouble(op2)) {
1460 emitGetVirtualRegister(op2, regT1);
1461 addPtr(tagTypeNumberRegister, regT1);
1462 movePtrToDouble(regT1, fpRegT1);
1463 } else if (isOperandConstantImmediateInt(op2)) {
1464 emitLoadInt32ToDouble(op2, fpRegT1);
1465 } else {
1466 emitGetVirtualRegister(op2, regT1);
1467 if (!types.second().definitelyIsNumber())
1468 emitJumpSlowCaseIfNotImmediateNumber(regT1);
1469 Jump notInt = emitJumpIfNotImmediateInteger(regT1);
1470 convertInt32ToDouble(regT1, fpRegT1);
1471 Jump skipDoubleLoad = jump();
1472 notInt.link(this);
1473 addPtr(tagTypeNumberRegister, regT1);
1474 movePtrToDouble(regT1, fpRegT1);
1475 skipDoubleLoad.link(this);
1476 }
1477 divDouble(fpRegT1, fpRegT0);
1478
1479 // Double result.
1480 moveDoubleToPtr(fpRegT0, regT0);
1481 subPtr(tagTypeNumberRegister, regT0);
1482
1483 emitPutVirtualRegister(dst, regT0);
1484 }
1485
1486 void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1487 {
1488 unsigned result = currentInstruction[1].u.operand;
1489 unsigned op1 = currentInstruction[2].u.operand;
1490 unsigned op2 = currentInstruction[3].u.operand;
1491 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1492 if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
1493 #ifndef NDEBUG
1494 breakpoint();
1495 #endif
1496 return;
1497 }
1498 if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
1499 if (!types.first().definitelyIsNumber())
1500 linkSlowCase(iter);
1501 }
1502 if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
1503 if (!types.second().definitelyIsNumber())
1504 linkSlowCase(iter);
1505 }
1506 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1507 JITStubCall stubCall(this, cti_op_div);
1508 stubCall.addArgument(op1, regT2);
1509 stubCall.addArgument(op2, regT2);
1510 stubCall.call(result);
1511 }
1512
1513 void JIT::emit_op_sub(Instruction* currentInstruction)
1514 {
1515 unsigned result = currentInstruction[1].u.operand;
1516 unsigned op1 = currentInstruction[2].u.operand;
1517 unsigned op2 = currentInstruction[3].u.operand;
1518 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1519
1520 compileBinaryArithOp(op_sub, result, op1, op2, types);
1521 emitPutVirtualRegister(result);
1522 }
1523
1524 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1525 {
1526 unsigned result = currentInstruction[1].u.operand;
1527 unsigned op1 = currentInstruction[2].u.operand;
1528 unsigned op2 = currentInstruction[3].u.operand;
1529 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1530
1531 compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false);
1532 }
1533
1534 #else // USE(JSVALUE64)
1535
1536 /* ------------------------------ BEGIN: !USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
1537
1538 void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
1539 {
1540 Structure* numberStructure = m_globalData->numberStructure.get();
1541 Jump wasJSNumberCell1;
1542 Jump wasJSNumberCell2;
1543
1544 emitGetVirtualRegisters(src1, regT0, src2, regT1);
1545
1546 if (types.second().isReusable() && supportsFloatingPoint()) {
1547 ASSERT(types.second().mightBeNumber());
1548
1549 // Check op2 is a number
1550 Jump op2imm = emitJumpIfImmediateInteger(regT1);
1551 if (!types.second().definitelyIsNumber()) {
1552 emitJumpSlowCaseIfNotJSCell(regT1, src2);
1553 addSlowCase(checkStructure(regT1, numberStructure));
1554 }
1555
1556 // (1) In this case src2 is a reusable number cell.
1557 // Slow case if src1 is not a number type.
1558 Jump op1imm = emitJumpIfImmediateInteger(regT0);
1559 if (!types.first().definitelyIsNumber()) {
1560 emitJumpSlowCaseIfNotJSCell(regT0, src1);
1561 addSlowCase(checkStructure(regT0, numberStructure));
1562 }
1563
1564 // (1a) if we get here, src1 is also a number cell
1565 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1566 Jump loadedDouble = jump();
1567 // (1b) if we get here, src1 is an immediate
1568 op1imm.link(this);
1569 emitFastArithImmToInt(regT0);
1570 convertInt32ToDouble(regT0, fpRegT0);
1571 // (1c)
1572 loadedDouble.link(this);
1573 if (opcodeID == op_add)
1574 addDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1575 else if (opcodeID == op_sub)
1576 subDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1577 else {
1578 ASSERT(opcodeID == op_mul);
1579 mulDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1580 }
1581
1582 // Store the result to the JSNumberCell and jump.
1583 storeDouble(fpRegT0, Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)));
1584 move(regT1, regT0);
1585 emitPutVirtualRegister(dst);
1586 wasJSNumberCell2 = jump();
1587
1588 // (2) This handles cases where src2 is an immediate number.
1589 // Two slow cases - either src1 isn't an immediate, or the subtract overflows.
1590 op2imm.link(this);
1591 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1592 } else if (types.first().isReusable() && supportsFloatingPoint()) {
1593 ASSERT(types.first().mightBeNumber());
1594
1595 // Check op1 is a number
1596 Jump op1imm = emitJumpIfImmediateInteger(regT0);
1597 if (!types.first().definitelyIsNumber()) {
1598 emitJumpSlowCaseIfNotJSCell(regT0, src1);
1599 addSlowCase(checkStructure(regT0, numberStructure));
1600 }
1601
1602 // (1) In this case src1 is a reusable number cell.
1603 // Slow case if src2 is not a number type.
1604 Jump op2imm = emitJumpIfImmediateInteger(regT1);
1605 if (!types.second().definitelyIsNumber()) {
1606 emitJumpSlowCaseIfNotJSCell(regT1, src2);
1607 addSlowCase(checkStructure(regT1, numberStructure));
1608 }
1609
1610 // (1a) if we get here, src2 is also a number cell
1611 loadDouble(Address(regT1, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT1);
1612 Jump loadedDouble = jump();
1613 // (1b) if we get here, src2 is an immediate
1614 op2imm.link(this);
1615 emitFastArithImmToInt(regT1);
1616 convertInt32ToDouble(regT1, fpRegT1);
1617 // (1c)
1618 loadedDouble.link(this);
1619 loadDouble(Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)), fpRegT0);
1620 if (opcodeID == op_add)
1621 addDouble(fpRegT1, fpRegT0);
1622 else if (opcodeID == op_sub)
1623 subDouble(fpRegT1, fpRegT0);
1624 else {
1625 ASSERT(opcodeID == op_mul);
1626 mulDouble(fpRegT1, fpRegT0);
1627 }
1628 storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
1629 emitPutVirtualRegister(dst);
1630
1631 // Store the result to the JSNumberCell and jump.
1632 storeDouble(fpRegT0, Address(regT0, OBJECT_OFFSETOF(JSNumberCell, m_value)));
1633 emitPutVirtualRegister(dst);
1634 wasJSNumberCell1 = jump();
1635
1636 // (2) This handles cases where src1 is an immediate number.
1637 // Two slow cases - either src2 isn't an immediate, or the subtract overflows.
1638 op1imm.link(this);
1639 emitJumpSlowCaseIfNotImmediateInteger(regT1);
1640 } else
1641 emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
1642
1643 if (opcodeID == op_add) {
1644 emitFastArithDeTagImmediate(regT0);
1645 addSlowCase(branchAdd32(Overflow, regT1, regT0));
1646 } else if (opcodeID == op_sub) {
1647 addSlowCase(branchSub32(Overflow, regT1, regT0));
1648 signExtend32ToPtr(regT0, regT0);
1649 emitFastArithReTagImmediate(regT0, regT0);
1650 } else {
1651 ASSERT(opcodeID == op_mul);
1652 // convert eax & edx from JSImmediates to ints, and check if either are zero
1653 emitFastArithImmToInt(regT1);
1654 Jump op1Zero = emitFastArithDeTagImmediateJumpIfZero(regT0);
1655 Jump op2NonZero = branchTest32(NonZero, regT1);
1656 op1Zero.link(this);
1657 // if either input is zero, add the two together, and check if the result is < 0.
1658 // If it is, we have a problem (N < 0), (N * 0) == -0, not representatble as a JSImmediate.
1659 move(regT0, regT2);
1660 addSlowCase(branchAdd32(Signed, regT1, regT2));
1661 // Skip the above check if neither input is zero
1662 op2NonZero.link(this);
1663 addSlowCase(branchMul32(Overflow, regT1, regT0));
1664 signExtend32ToPtr(regT0, regT0);
1665 emitFastArithReTagImmediate(regT0, regT0);
1666 }
1667 emitPutVirtualRegister(dst);
1668
1669 if (types.second().isReusable() && supportsFloatingPoint())
1670 wasJSNumberCell2.link(this);
1671 else if (types.first().isReusable() && supportsFloatingPoint())
1672 wasJSNumberCell1.link(this);
1673 }
1674
1675 void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned dst, unsigned src1, unsigned src2, OperandTypes types)
1676 {
1677 linkSlowCase(iter);
1678 if (types.second().isReusable() && supportsFloatingPoint()) {
1679 if (!types.first().definitelyIsNumber()) {
1680 linkSlowCaseIfNotJSCell(iter, src1);
1681 linkSlowCase(iter);
1682 }
1683 if (!types.second().definitelyIsNumber()) {
1684 linkSlowCaseIfNotJSCell(iter, src2);
1685 linkSlowCase(iter);
1686 }
1687 } else if (types.first().isReusable() && supportsFloatingPoint()) {
1688 if (!types.first().definitelyIsNumber()) {
1689 linkSlowCaseIfNotJSCell(iter, src1);
1690 linkSlowCase(iter);
1691 }
1692 if (!types.second().definitelyIsNumber()) {
1693 linkSlowCaseIfNotJSCell(iter, src2);
1694 linkSlowCase(iter);
1695 }
1696 }
1697 linkSlowCase(iter);
1698
1699 // additional entry point to handle -0 cases.
1700 if (opcodeID == op_mul)
1701 linkSlowCase(iter);
1702
1703 JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
1704 stubCall.addArgument(src1, regT2);
1705 stubCall.addArgument(src2, regT2);
1706 stubCall.call(dst);
1707 }
1708
1709 void JIT::emit_op_add(Instruction* currentInstruction)
1710 {
1711 unsigned result = currentInstruction[1].u.operand;
1712 unsigned op1 = currentInstruction[2].u.operand;
1713 unsigned op2 = currentInstruction[3].u.operand;
1714 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1715
1716 if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
1717 JITStubCall stubCall(this, cti_op_add);
1718 stubCall.addArgument(op1, regT2);
1719 stubCall.addArgument(op2, regT2);
1720 stubCall.call(result);
1721 return;
1722 }
1723
1724 if (isOperandConstantImmediateInt(op1)) {
1725 emitGetVirtualRegister(op2, regT0);
1726 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1727 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0));
1728 signExtend32ToPtr(regT0, regT0);
1729 emitPutVirtualRegister(result);
1730 } else if (isOperandConstantImmediateInt(op2)) {
1731 emitGetVirtualRegister(op1, regT0);
1732 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1733 addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0));
1734 signExtend32ToPtr(regT0, regT0);
1735 emitPutVirtualRegister(result);
1736 } else {
1737 compileBinaryArithOp(op_add, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1738 }
1739 }
1740
1741 void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1742 {
1743 unsigned result = currentInstruction[1].u.operand;
1744 unsigned op1 = currentInstruction[2].u.operand;
1745 unsigned op2 = currentInstruction[3].u.operand;
1746
1747 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1748 if (!types.first().mightBeNumber() || !types.second().mightBeNumber())
1749 return;
1750
1751 if (isOperandConstantImmediateInt(op1)) {
1752 Jump notImm = getSlowCase(iter);
1753 linkSlowCase(iter);
1754 sub32(Imm32(getConstantOperandImmediateInt(op1) << JSImmediate::IntegerPayloadShift), regT0);
1755 notImm.link(this);
1756 JITStubCall stubCall(this, cti_op_add);
1757 stubCall.addArgument(op1, regT2);
1758 stubCall.addArgument(regT0);
1759 stubCall.call(result);
1760 } else if (isOperandConstantImmediateInt(op2)) {
1761 Jump notImm = getSlowCase(iter);
1762 linkSlowCase(iter);
1763 sub32(Imm32(getConstantOperandImmediateInt(op2) << JSImmediate::IntegerPayloadShift), regT0);
1764 notImm.link(this);
1765 JITStubCall stubCall(this, cti_op_add);
1766 stubCall.addArgument(regT0);
1767 stubCall.addArgument(op2, regT2);
1768 stubCall.call(result);
1769 } else {
1770 OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
1771 ASSERT(types.first().mightBeNumber() && types.second().mightBeNumber());
1772 compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types);
1773 }
1774 }
1775
1776 void JIT::emit_op_mul(Instruction* currentInstruction)
1777 {
1778 unsigned result = currentInstruction[1].u.operand;
1779 unsigned op1 = currentInstruction[2].u.operand;
1780 unsigned op2 = currentInstruction[3].u.operand;
1781
1782 // For now, only plant a fast int case if the constant operand is greater than zero.
1783 int32_t value;
1784 if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
1785 emitGetVirtualRegister(op2, regT0);
1786 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1787 emitFastArithDeTagImmediate(regT0);
1788 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
1789 signExtend32ToPtr(regT0, regT0);
1790 emitFastArithReTagImmediate(regT0, regT0);
1791 emitPutVirtualRegister(result);
1792 } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
1793 emitGetVirtualRegister(op1, regT0);
1794 emitJumpSlowCaseIfNotImmediateInteger(regT0);
1795 emitFastArithDeTagImmediate(regT0);
1796 addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0));
1797 signExtend32ToPtr(regT0, regT0);
1798 emitFastArithReTagImmediate(regT0, regT0);
1799 emitPutVirtualRegister(result);
1800 } else
1801 compileBinaryArithOp(op_mul, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1802 }
1803
1804 void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1805 {
1806 unsigned result = currentInstruction[1].u.operand;
1807 unsigned op1 = currentInstruction[2].u.operand;
1808 unsigned op2 = currentInstruction[3].u.operand;
1809
1810 if ((isOperandConstantImmediateInt(op1) && (getConstantOperandImmediateInt(op1) > 0))
1811 || (isOperandConstantImmediateInt(op2) && (getConstantOperandImmediateInt(op2) > 0))) {
1812 linkSlowCase(iter);
1813 linkSlowCase(iter);
1814 // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
1815 JITStubCall stubCall(this, cti_op_mul);
1816 stubCall.addArgument(op1, regT2);
1817 stubCall.addArgument(op2, regT2);
1818 stubCall.call(result);
1819 } else
1820 compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, OperandTypes::fromInt(currentInstruction[4].u.operand));
1821 }
1822
1823 void JIT::emit_op_sub(Instruction* currentInstruction)
1824 {
1825 compileBinaryArithOp(op_sub, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
1826 }
1827
1828 void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
1829 {
1830 compileBinaryArithOpSlowCase(op_sub, iter, currentInstruction[1].u.operand, currentInstruction[2].u.operand, currentInstruction[3].u.operand, OperandTypes::fromInt(currentInstruction[4].u.operand));
1831 }
1832
1833 #endif // USE(JSVALUE64)
1834
1835 /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
1836
1837 #endif // !USE(JSVALUE32_64)
1838
1839 } // namespace JSC
1840
1841 #endif // ENABLE(JIT)