]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * Copyright (C) 2008 Apple Inc. All rights reserved. | |
3 | * | |
4 | * Redistribution and use in source and binary forms, with or without | |
5 | * modification, are permitted provided that the following conditions | |
6 | * are met: | |
7 | * 1. Redistributions of source code must retain the above copyright | |
8 | * notice, this list of conditions and the following disclaimer. | |
9 | * 2. Redistributions in binary form must reproduce the above copyright | |
10 | * notice, this list of conditions and the following disclaimer in the | |
11 | * documentation and/or other materials provided with the distribution. | |
12 | * | |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY | |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR | |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
24 | */ | |
25 | ||
26 | #include "config.h" | |
27 | ||
28 | #if ENABLE(JIT) | |
29 | #include "JIT.h" | |
30 | ||
31 | #include "CodeBlock.h" | |
32 | #include "JITInlineMethods.h" | |
33 | #include "JITStubCall.h" | |
34 | #include "JITStubs.h" | |
35 | #include "JSArray.h" | |
36 | #include "JSFunction.h" | |
37 | #include "Interpreter.h" | |
38 | #include "ResultType.h" | |
39 | #include "SamplingTool.h" | |
40 | ||
41 | #ifndef NDEBUG | |
42 | #include <stdio.h> | |
43 | #endif | |
44 | ||
45 | using namespace std; | |
46 | ||
47 | namespace JSC { | |
48 | ||
49 | void JIT::emit_op_jless(Instruction* currentInstruction) | |
50 | { | |
51 | unsigned op1 = currentInstruction[1].u.operand; | |
52 | unsigned op2 = currentInstruction[2].u.operand; | |
53 | unsigned target = currentInstruction[3].u.operand; | |
54 | ||
55 | emit_compareAndJump(op_jless, op1, op2, target, LessThan); | |
56 | } | |
57 | ||
58 | void JIT::emit_op_jlesseq(Instruction* currentInstruction) | |
59 | { | |
60 | unsigned op1 = currentInstruction[1].u.operand; | |
61 | unsigned op2 = currentInstruction[2].u.operand; | |
62 | unsigned target = currentInstruction[3].u.operand; | |
63 | ||
64 | emit_compareAndJump(op_jlesseq, op1, op2, target, LessThanOrEqual); | |
65 | } | |
66 | ||
67 | void JIT::emit_op_jgreater(Instruction* currentInstruction) | |
68 | { | |
69 | unsigned op1 = currentInstruction[1].u.operand; | |
70 | unsigned op2 = currentInstruction[2].u.operand; | |
71 | unsigned target = currentInstruction[3].u.operand; | |
72 | ||
73 | emit_compareAndJump(op_jgreater, op1, op2, target, GreaterThan); | |
74 | } | |
75 | ||
76 | void JIT::emit_op_jgreatereq(Instruction* currentInstruction) | |
77 | { | |
78 | unsigned op1 = currentInstruction[1].u.operand; | |
79 | unsigned op2 = currentInstruction[2].u.operand; | |
80 | unsigned target = currentInstruction[3].u.operand; | |
81 | ||
82 | emit_compareAndJump(op_jgreatereq, op1, op2, target, GreaterThanOrEqual); | |
83 | } | |
84 | ||
85 | void JIT::emit_op_jnless(Instruction* currentInstruction) | |
86 | { | |
87 | unsigned op1 = currentInstruction[1].u.operand; | |
88 | unsigned op2 = currentInstruction[2].u.operand; | |
89 | unsigned target = currentInstruction[3].u.operand; | |
90 | ||
91 | emit_compareAndJump(op_jnless, op1, op2, target, GreaterThanOrEqual); | |
92 | } | |
93 | ||
94 | void JIT::emit_op_jnlesseq(Instruction* currentInstruction) | |
95 | { | |
96 | unsigned op1 = currentInstruction[1].u.operand; | |
97 | unsigned op2 = currentInstruction[2].u.operand; | |
98 | unsigned target = currentInstruction[3].u.operand; | |
99 | ||
100 | emit_compareAndJump(op_jnlesseq, op1, op2, target, GreaterThan); | |
101 | } | |
102 | ||
103 | void JIT::emit_op_jngreater(Instruction* currentInstruction) | |
104 | { | |
105 | unsigned op1 = currentInstruction[1].u.operand; | |
106 | unsigned op2 = currentInstruction[2].u.operand; | |
107 | unsigned target = currentInstruction[3].u.operand; | |
108 | ||
109 | emit_compareAndJump(op_jngreater, op1, op2, target, LessThanOrEqual); | |
110 | } | |
111 | ||
112 | void JIT::emit_op_jngreatereq(Instruction* currentInstruction) | |
113 | { | |
114 | unsigned op1 = currentInstruction[1].u.operand; | |
115 | unsigned op2 = currentInstruction[2].u.operand; | |
116 | unsigned target = currentInstruction[3].u.operand; | |
117 | ||
118 | emit_compareAndJump(op_jngreatereq, op1, op2, target, LessThan); | |
119 | } | |
120 | ||
121 | void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
122 | { | |
123 | unsigned op1 = currentInstruction[1].u.operand; | |
124 | unsigned op2 = currentInstruction[2].u.operand; | |
125 | unsigned target = currentInstruction[3].u.operand; | |
126 | ||
127 | emit_compareAndJumpSlow(op1, op2, target, DoubleLessThan, cti_op_jless, false, iter); | |
128 | } | |
129 | ||
130 | void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
131 | { | |
132 | unsigned op1 = currentInstruction[1].u.operand; | |
133 | unsigned op2 = currentInstruction[2].u.operand; | |
134 | unsigned target = currentInstruction[3].u.operand; | |
135 | ||
136 | emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqual, cti_op_jlesseq, false, iter); | |
137 | } | |
138 | ||
139 | void JIT::emitSlow_op_jgreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
140 | { | |
141 | unsigned op1 = currentInstruction[1].u.operand; | |
142 | unsigned op2 = currentInstruction[2].u.operand; | |
143 | unsigned target = currentInstruction[3].u.operand; | |
144 | ||
145 | emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThan, cti_op_jgreater, false, iter); | |
146 | } | |
147 | ||
148 | void JIT::emitSlow_op_jgreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
149 | { | |
150 | unsigned op1 = currentInstruction[1].u.operand; | |
151 | unsigned op2 = currentInstruction[2].u.operand; | |
152 | unsigned target = currentInstruction[3].u.operand; | |
153 | ||
154 | emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqual, cti_op_jgreatereq, false, iter); | |
155 | } | |
156 | ||
157 | void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
158 | { | |
159 | unsigned op1 = currentInstruction[1].u.operand; | |
160 | unsigned op2 = currentInstruction[2].u.operand; | |
161 | unsigned target = currentInstruction[3].u.operand; | |
162 | ||
163 | emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqualOrUnordered, cti_op_jless, true, iter); | |
164 | } | |
165 | ||
166 | void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
167 | { | |
168 | unsigned op1 = currentInstruction[1].u.operand; | |
169 | unsigned op2 = currentInstruction[2].u.operand; | |
170 | unsigned target = currentInstruction[3].u.operand; | |
171 | ||
172 | emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrUnordered, cti_op_jlesseq, true, iter); | |
173 | } | |
174 | ||
175 | void JIT::emitSlow_op_jngreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
176 | { | |
177 | unsigned op1 = currentInstruction[1].u.operand; | |
178 | unsigned op2 = currentInstruction[2].u.operand; | |
179 | unsigned target = currentInstruction[3].u.operand; | |
180 | ||
181 | emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqualOrUnordered, cti_op_jgreater, true, iter); | |
182 | } | |
183 | ||
184 | void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
185 | { | |
186 | unsigned op1 = currentInstruction[1].u.operand; | |
187 | unsigned op2 = currentInstruction[2].u.operand; | |
188 | unsigned target = currentInstruction[3].u.operand; | |
189 | ||
190 | emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrUnordered, cti_op_jgreatereq, true, iter); | |
191 | } | |
192 | ||
193 | #if USE(JSVALUE64) | |
194 | ||
195 | void JIT::emit_op_negate(Instruction* currentInstruction) | |
196 | { | |
197 | unsigned dst = currentInstruction[1].u.operand; | |
198 | unsigned src = currentInstruction[2].u.operand; | |
199 | ||
200 | emitGetVirtualRegister(src, regT0); | |
201 | ||
202 | Jump srcNotInt = emitJumpIfNotImmediateInteger(regT0); | |
203 | addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff))); | |
204 | neg32(regT0); | |
205 | emitFastArithReTagImmediate(regT0, regT0); | |
206 | ||
207 | Jump end = jump(); | |
208 | ||
209 | srcNotInt.link(this); | |
210 | emitJumpSlowCaseIfNotImmediateNumber(regT0); | |
211 | ||
212 | move(TrustedImmPtr(reinterpret_cast<void*>(0x8000000000000000ull)), regT1); | |
213 | xorPtr(regT1, regT0); | |
214 | ||
215 | end.link(this); | |
216 | emitPutVirtualRegister(dst); | |
217 | } | |
218 | ||
219 | void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
220 | { | |
221 | unsigned dst = currentInstruction[1].u.operand; | |
222 | ||
223 | linkSlowCase(iter); // 0x7fffffff check | |
224 | linkSlowCase(iter); // double check | |
225 | ||
226 | JITStubCall stubCall(this, cti_op_negate); | |
227 | stubCall.addArgument(regT1, regT0); | |
228 | stubCall.call(dst); | |
229 | } | |
230 | ||
231 | void JIT::emit_op_lshift(Instruction* currentInstruction) | |
232 | { | |
233 | unsigned result = currentInstruction[1].u.operand; | |
234 | unsigned op1 = currentInstruction[2].u.operand; | |
235 | unsigned op2 = currentInstruction[3].u.operand; | |
236 | ||
237 | emitGetVirtualRegisters(op1, regT0, op2, regT2); | |
238 | // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent. | |
239 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
240 | emitJumpSlowCaseIfNotImmediateInteger(regT2); | |
241 | emitFastArithImmToInt(regT0); | |
242 | emitFastArithImmToInt(regT2); | |
243 | lshift32(regT2, regT0); | |
244 | emitFastArithReTagImmediate(regT0, regT0); | |
245 | emitPutVirtualRegister(result); | |
246 | } | |
247 | ||
248 | void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
249 | { | |
250 | unsigned result = currentInstruction[1].u.operand; | |
251 | unsigned op1 = currentInstruction[2].u.operand; | |
252 | unsigned op2 = currentInstruction[3].u.operand; | |
253 | ||
254 | UNUSED_PARAM(op1); | |
255 | UNUSED_PARAM(op2); | |
256 | linkSlowCase(iter); | |
257 | linkSlowCase(iter); | |
258 | JITStubCall stubCall(this, cti_op_lshift); | |
259 | stubCall.addArgument(regT0); | |
260 | stubCall.addArgument(regT2); | |
261 | stubCall.call(result); | |
262 | } | |
263 | ||
264 | void JIT::emit_op_rshift(Instruction* currentInstruction) | |
265 | { | |
266 | unsigned result = currentInstruction[1].u.operand; | |
267 | unsigned op1 = currentInstruction[2].u.operand; | |
268 | unsigned op2 = currentInstruction[3].u.operand; | |
269 | ||
270 | if (isOperandConstantImmediateInt(op2)) { | |
271 | // isOperandConstantImmediateInt(op2) => 1 SlowCase | |
272 | emitGetVirtualRegister(op1, regT0); | |
273 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
274 | // Mask with 0x1f as per ecma-262 11.7.2 step 7. | |
275 | rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0); | |
276 | } else { | |
277 | emitGetVirtualRegisters(op1, regT0, op2, regT2); | |
278 | if (supportsFloatingPointTruncate()) { | |
279 | Jump lhsIsInt = emitJumpIfImmediateInteger(regT0); | |
280 | // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases | |
281 | addSlowCase(emitJumpIfNotImmediateNumber(regT0)); | |
282 | addPtr(tagTypeNumberRegister, regT0); | |
283 | movePtrToDouble(regT0, fpRegT0); | |
284 | addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0)); | |
285 | lhsIsInt.link(this); | |
286 | emitJumpSlowCaseIfNotImmediateInteger(regT2); | |
287 | } else { | |
288 | // !supportsFloatingPoint() => 2 SlowCases | |
289 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
290 | emitJumpSlowCaseIfNotImmediateInteger(regT2); | |
291 | } | |
292 | emitFastArithImmToInt(regT2); | |
293 | rshift32(regT2, regT0); | |
294 | } | |
295 | emitFastArithIntToImmNoCheck(regT0, regT0); | |
296 | emitPutVirtualRegister(result); | |
297 | } | |
298 | ||
299 | void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
300 | { | |
301 | unsigned result = currentInstruction[1].u.operand; | |
302 | unsigned op1 = currentInstruction[2].u.operand; | |
303 | unsigned op2 = currentInstruction[3].u.operand; | |
304 | ||
305 | JITStubCall stubCall(this, cti_op_rshift); | |
306 | ||
307 | if (isOperandConstantImmediateInt(op2)) { | |
308 | linkSlowCase(iter); | |
309 | stubCall.addArgument(regT0); | |
310 | stubCall.addArgument(op2, regT2); | |
311 | } else { | |
312 | if (supportsFloatingPointTruncate()) { | |
313 | linkSlowCase(iter); | |
314 | linkSlowCase(iter); | |
315 | linkSlowCase(iter); | |
316 | // We're reloading op1 to regT0 as we can no longer guarantee that | |
317 | // we have not munged the operand. It may have already been shifted | |
318 | // correctly, but it still will not have been tagged. | |
319 | stubCall.addArgument(op1, regT0); | |
320 | stubCall.addArgument(regT2); | |
321 | } else { | |
322 | linkSlowCase(iter); | |
323 | linkSlowCase(iter); | |
324 | stubCall.addArgument(regT0); | |
325 | stubCall.addArgument(regT2); | |
326 | } | |
327 | } | |
328 | ||
329 | stubCall.call(result); | |
330 | } | |
331 | ||
332 | void JIT::emit_op_urshift(Instruction* currentInstruction) | |
333 | { | |
334 | unsigned dst = currentInstruction[1].u.operand; | |
335 | unsigned op1 = currentInstruction[2].u.operand; | |
336 | unsigned op2 = currentInstruction[3].u.operand; | |
337 | ||
338 | // Slow case of urshift makes assumptions about what registers hold the | |
339 | // shift arguments, so any changes must be updated there as well. | |
340 | if (isOperandConstantImmediateInt(op2)) { | |
341 | emitGetVirtualRegister(op1, regT0); | |
342 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
343 | emitFastArithImmToInt(regT0); | |
344 | int shift = getConstantOperand(op2).asInt32(); | |
345 | if (shift) | |
346 | urshift32(Imm32(shift & 0x1f), regT0); | |
347 | // unsigned shift < 0 or shift = k*2^32 may result in (essentially) | |
348 | // a toUint conversion, which can result in a value we can represent | |
349 | // as an immediate int. | |
350 | if (shift < 0 || !(shift & 31)) | |
351 | addSlowCase(branch32(LessThan, regT0, TrustedImm32(0))); | |
352 | emitFastArithReTagImmediate(regT0, regT0); | |
353 | emitPutVirtualRegister(dst, regT0); | |
354 | return; | |
355 | } | |
356 | emitGetVirtualRegisters(op1, regT0, op2, regT1); | |
357 | if (!isOperandConstantImmediateInt(op1)) | |
358 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
359 | emitJumpSlowCaseIfNotImmediateInteger(regT1); | |
360 | emitFastArithImmToInt(regT0); | |
361 | emitFastArithImmToInt(regT1); | |
362 | urshift32(regT1, regT0); | |
363 | addSlowCase(branch32(LessThan, regT0, TrustedImm32(0))); | |
364 | emitFastArithReTagImmediate(regT0, regT0); | |
365 | emitPutVirtualRegister(dst, regT0); | |
366 | } | |
367 | ||
368 | void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
369 | { | |
370 | unsigned dst = currentInstruction[1].u.operand; | |
371 | unsigned op1 = currentInstruction[2].u.operand; | |
372 | unsigned op2 = currentInstruction[3].u.operand; | |
373 | if (isOperandConstantImmediateInt(op2)) { | |
374 | int shift = getConstantOperand(op2).asInt32(); | |
375 | // op1 = regT0 | |
376 | linkSlowCase(iter); // int32 check | |
377 | if (supportsFloatingPointTruncate()) { | |
378 | JumpList failures; | |
379 | failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double | |
380 | addPtr(tagTypeNumberRegister, regT0); | |
381 | movePtrToDouble(regT0, fpRegT0); | |
382 | failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0)); | |
383 | if (shift) | |
384 | urshift32(Imm32(shift & 0x1f), regT0); | |
385 | if (shift < 0 || !(shift & 31)) | |
386 | failures.append(branch32(LessThan, regT0, TrustedImm32(0))); | |
387 | emitFastArithReTagImmediate(regT0, regT0); | |
388 | emitPutVirtualRegister(dst, regT0); | |
389 | emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift)); | |
390 | failures.link(this); | |
391 | } | |
392 | if (shift < 0 || !(shift & 31)) | |
393 | linkSlowCase(iter); // failed to box in hot path | |
394 | } else { | |
395 | // op1 = regT0 | |
396 | // op2 = regT1 | |
397 | if (!isOperandConstantImmediateInt(op1)) { | |
398 | linkSlowCase(iter); // int32 check -- op1 is not an int | |
399 | if (supportsFloatingPointTruncate()) { | |
400 | JumpList failures; | |
401 | failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double | |
402 | addPtr(tagTypeNumberRegister, regT0); | |
403 | movePtrToDouble(regT0, fpRegT0); | |
404 | failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0)); | |
405 | failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int | |
406 | emitFastArithImmToInt(regT1); | |
407 | urshift32(regT1, regT0); | |
408 | failures.append(branch32(LessThan, regT0, TrustedImm32(0))); | |
409 | emitFastArithReTagImmediate(regT0, regT0); | |
410 | emitPutVirtualRegister(dst, regT0); | |
411 | emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift)); | |
412 | failures.link(this); | |
413 | } | |
414 | } | |
415 | ||
416 | linkSlowCase(iter); // int32 check - op2 is not an int | |
417 | linkSlowCase(iter); // Can't represent unsigned result as an immediate | |
418 | } | |
419 | ||
420 | JITStubCall stubCall(this, cti_op_urshift); | |
421 | stubCall.addArgument(op1, regT0); | |
422 | stubCall.addArgument(op2, regT1); | |
423 | stubCall.call(dst); | |
424 | } | |
425 | ||
426 | void JIT::emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition) | |
427 | { | |
428 | // We generate inline code for the following cases in the fast path: | |
429 | // - int immediate to constant int immediate | |
430 | // - constant int immediate to int immediate | |
431 | // - int immediate to int immediate | |
432 | ||
433 | if (isOperandConstantImmediateChar(op1)) { | |
434 | emitGetVirtualRegister(op2, regT0); | |
435 | addSlowCase(emitJumpIfNotJSCell(regT0)); | |
436 | JumpList failures; | |
437 | emitLoadCharacterString(regT0, regT0, failures); | |
438 | addSlowCase(failures); | |
439 | addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target); | |
440 | return; | |
441 | } | |
442 | if (isOperandConstantImmediateChar(op2)) { | |
443 | emitGetVirtualRegister(op1, regT0); | |
444 | addSlowCase(emitJumpIfNotJSCell(regT0)); | |
445 | JumpList failures; | |
446 | emitLoadCharacterString(regT0, regT0, failures); | |
447 | addSlowCase(failures); | |
448 | addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target); | |
449 | return; | |
450 | } | |
451 | if (isOperandConstantImmediateInt(op2)) { | |
452 | emitGetVirtualRegister(op1, regT0); | |
453 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
454 | int32_t op2imm = getConstantOperandImmediateInt(op2); | |
455 | addJump(branch32(condition, regT0, Imm32(op2imm)), target); | |
456 | } else if (isOperandConstantImmediateInt(op1)) { | |
457 | emitGetVirtualRegister(op2, regT1); | |
458 | emitJumpSlowCaseIfNotImmediateInteger(regT1); | |
459 | int32_t op1imm = getConstantOperandImmediateInt(op1); | |
460 | addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target); | |
461 | } else { | |
462 | emitGetVirtualRegisters(op1, regT0, op2, regT1); | |
463 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
464 | emitJumpSlowCaseIfNotImmediateInteger(regT1); | |
465 | ||
466 | addJump(branch32(condition, regT0, regT1), target); | |
467 | } | |
468 | } | |
469 | ||
470 | void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition condition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter) | |
471 | { | |
472 | COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jlesseq), OPCODE_LENGTH_op_jlesseq_equals_op_jless); | |
473 | COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnless), OPCODE_LENGTH_op_jnless_equals_op_jless); | |
474 | COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnlesseq), OPCODE_LENGTH_op_jnlesseq_equals_op_jless); | |
475 | COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreater), OPCODE_LENGTH_op_jgreater_equals_op_jless); | |
476 | COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jgreatereq), OPCODE_LENGTH_op_jgreatereq_equals_op_jless); | |
477 | COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreater), OPCODE_LENGTH_op_jngreater_equals_op_jless); | |
478 | COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jngreatereq), OPCODE_LENGTH_op_jngreatereq_equals_op_jless); | |
479 | ||
480 | // We generate inline code for the following cases in the slow path: | |
481 | // - floating-point number to constant int immediate | |
482 | // - constant int immediate to floating-point number | |
483 | // - floating-point number to floating-point number. | |
484 | if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) { | |
485 | linkSlowCase(iter); | |
486 | linkSlowCase(iter); | |
487 | linkSlowCase(iter); | |
488 | linkSlowCase(iter); | |
489 | JITStubCall stubCall(this, stub); | |
490 | stubCall.addArgument(op1, regT0); | |
491 | stubCall.addArgument(op2, regT1); | |
492 | stubCall.call(); | |
493 | emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); | |
494 | return; | |
495 | } | |
496 | ||
497 | if (isOperandConstantImmediateInt(op2)) { | |
498 | linkSlowCase(iter); | |
499 | ||
500 | if (supportsFloatingPoint()) { | |
501 | Jump fail1 = emitJumpIfNotImmediateNumber(regT0); | |
502 | addPtr(tagTypeNumberRegister, regT0); | |
503 | movePtrToDouble(regT0, fpRegT0); | |
504 | ||
505 | int32_t op2imm = getConstantOperand(op2).asInt32(); | |
506 | ||
507 | move(Imm32(op2imm), regT1); | |
508 | convertInt32ToDouble(regT1, fpRegT1); | |
509 | ||
510 | emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target); | |
511 | ||
512 | emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless)); | |
513 | ||
514 | fail1.link(this); | |
515 | } | |
516 | ||
517 | JITStubCall stubCall(this, stub); | |
518 | stubCall.addArgument(regT0); | |
519 | stubCall.addArgument(op2, regT2); | |
520 | stubCall.call(); | |
521 | emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); | |
522 | ||
523 | } else if (isOperandConstantImmediateInt(op1)) { | |
524 | linkSlowCase(iter); | |
525 | ||
526 | if (supportsFloatingPoint()) { | |
527 | Jump fail1 = emitJumpIfNotImmediateNumber(regT1); | |
528 | addPtr(tagTypeNumberRegister, regT1); | |
529 | movePtrToDouble(regT1, fpRegT1); | |
530 | ||
531 | int32_t op1imm = getConstantOperand(op1).asInt32(); | |
532 | ||
533 | move(Imm32(op1imm), regT0); | |
534 | convertInt32ToDouble(regT0, fpRegT0); | |
535 | ||
536 | emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target); | |
537 | ||
538 | emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless)); | |
539 | ||
540 | fail1.link(this); | |
541 | } | |
542 | ||
543 | JITStubCall stubCall(this, stub); | |
544 | stubCall.addArgument(op1, regT2); | |
545 | stubCall.addArgument(regT1); | |
546 | stubCall.call(); | |
547 | emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); | |
548 | } else { | |
549 | linkSlowCase(iter); | |
550 | ||
551 | if (supportsFloatingPoint()) { | |
552 | Jump fail1 = emitJumpIfNotImmediateNumber(regT0); | |
553 | Jump fail2 = emitJumpIfNotImmediateNumber(regT1); | |
554 | Jump fail3 = emitJumpIfImmediateInteger(regT1); | |
555 | addPtr(tagTypeNumberRegister, regT0); | |
556 | addPtr(tagTypeNumberRegister, regT1); | |
557 | movePtrToDouble(regT0, fpRegT0); | |
558 | movePtrToDouble(regT1, fpRegT1); | |
559 | ||
560 | emitJumpSlowToHot(branchDouble(condition, fpRegT0, fpRegT1), target); | |
561 | ||
562 | emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jless)); | |
563 | ||
564 | fail1.link(this); | |
565 | fail2.link(this); | |
566 | fail3.link(this); | |
567 | } | |
568 | ||
569 | linkSlowCase(iter); | |
570 | JITStubCall stubCall(this, stub); | |
571 | stubCall.addArgument(regT0); | |
572 | stubCall.addArgument(regT1); | |
573 | stubCall.call(); | |
574 | emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); | |
575 | } | |
576 | } | |
577 | ||
578 | void JIT::emit_op_bitand(Instruction* currentInstruction) | |
579 | { | |
580 | unsigned result = currentInstruction[1].u.operand; | |
581 | unsigned op1 = currentInstruction[2].u.operand; | |
582 | unsigned op2 = currentInstruction[3].u.operand; | |
583 | ||
584 | if (isOperandConstantImmediateInt(op1)) { | |
585 | emitGetVirtualRegister(op2, regT0); | |
586 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
587 | int32_t imm = getConstantOperandImmediateInt(op1); | |
588 | andPtr(Imm32(imm), regT0); | |
589 | if (imm >= 0) | |
590 | emitFastArithIntToImmNoCheck(regT0, regT0); | |
591 | } else if (isOperandConstantImmediateInt(op2)) { | |
592 | emitGetVirtualRegister(op1, regT0); | |
593 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
594 | int32_t imm = getConstantOperandImmediateInt(op2); | |
595 | andPtr(Imm32(imm), regT0); | |
596 | if (imm >= 0) | |
597 | emitFastArithIntToImmNoCheck(regT0, regT0); | |
598 | } else { | |
599 | emitGetVirtualRegisters(op1, regT0, op2, regT1); | |
600 | andPtr(regT1, regT0); | |
601 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
602 | } | |
603 | emitPutVirtualRegister(result); | |
604 | } | |
605 | ||
606 | void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
607 | { | |
608 | unsigned result = currentInstruction[1].u.operand; | |
609 | unsigned op1 = currentInstruction[2].u.operand; | |
610 | unsigned op2 = currentInstruction[3].u.operand; | |
611 | ||
612 | linkSlowCase(iter); | |
613 | if (isOperandConstantImmediateInt(op1)) { | |
614 | JITStubCall stubCall(this, cti_op_bitand); | |
615 | stubCall.addArgument(op1, regT2); | |
616 | stubCall.addArgument(regT0); | |
617 | stubCall.call(result); | |
618 | } else if (isOperandConstantImmediateInt(op2)) { | |
619 | JITStubCall stubCall(this, cti_op_bitand); | |
620 | stubCall.addArgument(regT0); | |
621 | stubCall.addArgument(op2, regT2); | |
622 | stubCall.call(result); | |
623 | } else { | |
624 | JITStubCall stubCall(this, cti_op_bitand); | |
625 | stubCall.addArgument(op1, regT2); | |
626 | stubCall.addArgument(regT1); | |
627 | stubCall.call(result); | |
628 | } | |
629 | } | |
630 | ||
631 | void JIT::emit_op_post_inc(Instruction* currentInstruction) | |
632 | { | |
633 | unsigned result = currentInstruction[1].u.operand; | |
634 | unsigned srcDst = currentInstruction[2].u.operand; | |
635 | ||
636 | emitGetVirtualRegister(srcDst, regT0); | |
637 | move(regT0, regT1); | |
638 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
639 | addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT1)); | |
640 | emitFastArithIntToImmNoCheck(regT1, regT1); | |
641 | emitPutVirtualRegister(srcDst, regT1); | |
642 | emitPutVirtualRegister(result); | |
643 | } | |
644 | ||
645 | void JIT::emitSlow_op_post_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
646 | { | |
647 | unsigned result = currentInstruction[1].u.operand; | |
648 | unsigned srcDst = currentInstruction[2].u.operand; | |
649 | ||
650 | linkSlowCase(iter); | |
651 | linkSlowCase(iter); | |
652 | JITStubCall stubCall(this, cti_op_post_inc); | |
653 | stubCall.addArgument(regT0); | |
654 | stubCall.addArgument(Imm32(srcDst)); | |
655 | stubCall.call(result); | |
656 | } | |
657 | ||
658 | void JIT::emit_op_post_dec(Instruction* currentInstruction) | |
659 | { | |
660 | unsigned result = currentInstruction[1].u.operand; | |
661 | unsigned srcDst = currentInstruction[2].u.operand; | |
662 | ||
663 | emitGetVirtualRegister(srcDst, regT0); | |
664 | move(regT0, regT1); | |
665 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
666 | addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT1)); | |
667 | emitFastArithIntToImmNoCheck(regT1, regT1); | |
668 | emitPutVirtualRegister(srcDst, regT1); | |
669 | emitPutVirtualRegister(result); | |
670 | } | |
671 | ||
672 | void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
673 | { | |
674 | unsigned result = currentInstruction[1].u.operand; | |
675 | unsigned srcDst = currentInstruction[2].u.operand; | |
676 | ||
677 | linkSlowCase(iter); | |
678 | linkSlowCase(iter); | |
679 | JITStubCall stubCall(this, cti_op_post_dec); | |
680 | stubCall.addArgument(regT0); | |
681 | stubCall.addArgument(Imm32(srcDst)); | |
682 | stubCall.call(result); | |
683 | } | |
684 | ||
685 | void JIT::emit_op_pre_inc(Instruction* currentInstruction) | |
686 | { | |
687 | unsigned srcDst = currentInstruction[1].u.operand; | |
688 | ||
689 | emitGetVirtualRegister(srcDst, regT0); | |
690 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
691 | addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0)); | |
692 | emitFastArithIntToImmNoCheck(regT0, regT0); | |
693 | emitPutVirtualRegister(srcDst); | |
694 | } | |
695 | ||
696 | void JIT::emitSlow_op_pre_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
697 | { | |
698 | unsigned srcDst = currentInstruction[1].u.operand; | |
699 | ||
700 | Jump notImm = getSlowCase(iter); | |
701 | linkSlowCase(iter); | |
702 | emitGetVirtualRegister(srcDst, regT0); | |
703 | notImm.link(this); | |
704 | JITStubCall stubCall(this, cti_op_pre_inc); | |
705 | stubCall.addArgument(regT0); | |
706 | stubCall.call(srcDst); | |
707 | } | |
708 | ||
709 | void JIT::emit_op_pre_dec(Instruction* currentInstruction) | |
710 | { | |
711 | unsigned srcDst = currentInstruction[1].u.operand; | |
712 | ||
713 | emitGetVirtualRegister(srcDst, regT0); | |
714 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
715 | addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0)); | |
716 | emitFastArithIntToImmNoCheck(regT0, regT0); | |
717 | emitPutVirtualRegister(srcDst); | |
718 | } | |
719 | ||
720 | void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
721 | { | |
722 | unsigned srcDst = currentInstruction[1].u.operand; | |
723 | ||
724 | Jump notImm = getSlowCase(iter); | |
725 | linkSlowCase(iter); | |
726 | emitGetVirtualRegister(srcDst, regT0); | |
727 | notImm.link(this); | |
728 | JITStubCall stubCall(this, cti_op_pre_dec); | |
729 | stubCall.addArgument(regT0); | |
730 | stubCall.call(srcDst); | |
731 | } | |
732 | ||
733 | /* ------------------------------ BEGIN: OP_MOD ------------------------------ */ | |
734 | ||
735 | #if CPU(X86) || CPU(X86_64) | |
736 | ||
737 | void JIT::emit_op_mod(Instruction* currentInstruction) | |
738 | { | |
739 | unsigned result = currentInstruction[1].u.operand; | |
740 | unsigned op1 = currentInstruction[2].u.operand; | |
741 | unsigned op2 = currentInstruction[3].u.operand; | |
742 | ||
743 | // Make sure registers are correct for x86 IDIV instructions. | |
744 | ASSERT(regT0 == X86Registers::eax); | |
745 | ASSERT(regT1 == X86Registers::edx); | |
746 | ASSERT(regT2 == X86Registers::ecx); | |
747 | ||
748 | emitGetVirtualRegisters(op1, regT3, op2, regT2); | |
749 | emitJumpSlowCaseIfNotImmediateInteger(regT3); | |
750 | emitJumpSlowCaseIfNotImmediateInteger(regT2); | |
751 | ||
752 | move(regT3, regT0); | |
753 | addSlowCase(branchTest32(Zero, regT2)); | |
754 | Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1)); | |
755 | addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1))); | |
756 | denominatorNotNeg1.link(this); | |
757 | m_assembler.cdq(); | |
758 | m_assembler.idivl_r(regT2); | |
759 | Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0)); | |
760 | addSlowCase(branchTest32(Zero, regT1)); | |
761 | numeratorPositive.link(this); | |
762 | emitFastArithReTagImmediate(regT1, regT0); | |
763 | emitPutVirtualRegister(result); | |
764 | } | |
765 | ||
766 | void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
767 | { | |
768 | unsigned result = currentInstruction[1].u.operand; | |
769 | ||
770 | linkSlowCase(iter); | |
771 | linkSlowCase(iter); | |
772 | linkSlowCase(iter); | |
773 | linkSlowCase(iter); | |
774 | linkSlowCase(iter); | |
775 | JITStubCall stubCall(this, cti_op_mod); | |
776 | stubCall.addArgument(regT3); | |
777 | stubCall.addArgument(regT2); | |
778 | stubCall.call(result); | |
779 | } | |
780 | ||
781 | #else // CPU(X86) || CPU(X86_64) | |
782 | ||
783 | void JIT::emit_op_mod(Instruction* currentInstruction) | |
784 | { | |
785 | unsigned result = currentInstruction[1].u.operand; | |
786 | unsigned op1 = currentInstruction[2].u.operand; | |
787 | unsigned op2 = currentInstruction[3].u.operand; | |
788 | ||
789 | JITStubCall stubCall(this, cti_op_mod); | |
790 | stubCall.addArgument(op1, regT2); | |
791 | stubCall.addArgument(op2, regT2); | |
792 | stubCall.call(result); | |
793 | } | |
794 | ||
795 | void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
796 | { | |
797 | ASSERT_NOT_REACHED(); | |
798 | } | |
799 | ||
800 | #endif // CPU(X86) || CPU(X86_64) | |
801 | ||
802 | /* ------------------------------ END: OP_MOD ------------------------------ */ | |
803 | ||
804 | /* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */ | |
805 | ||
806 | void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes) | |
807 | { | |
808 | emitGetVirtualRegisters(op1, regT0, op2, regT1); | |
809 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
810 | emitJumpSlowCaseIfNotImmediateInteger(regT1); | |
811 | #if ENABLE(VALUE_PROFILER) | |
812 | RareCaseProfile* profile = m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset); | |
813 | #endif | |
814 | if (opcodeID == op_add) | |
815 | addSlowCase(branchAdd32(Overflow, regT1, regT0)); | |
816 | else if (opcodeID == op_sub) | |
817 | addSlowCase(branchSub32(Overflow, regT1, regT0)); | |
818 | else { | |
819 | ASSERT(opcodeID == op_mul); | |
820 | #if ENABLE(VALUE_PROFILER) | |
821 | if (m_canBeOptimized) { | |
822 | // We want to be able to measure if this is taking the slow case just | |
823 | // because of negative zero. If this produces positive zero, then we | |
824 | // don't want the slow case to be taken because that will throw off | |
825 | // speculative compilation. | |
826 | move(regT0, regT2); | |
827 | addSlowCase(branchMul32(Overflow, regT1, regT2)); | |
828 | JumpList done; | |
829 | done.append(branchTest32(NonZero, regT2)); | |
830 | Jump negativeZero = branch32(LessThan, regT0, TrustedImm32(0)); | |
831 | done.append(branch32(GreaterThanOrEqual, regT1, TrustedImm32(0))); | |
832 | negativeZero.link(this); | |
833 | // We only get here if we have a genuine negative zero. Record this, | |
834 | // so that the speculative JIT knows that we failed speculation | |
835 | // because of a negative zero. | |
836 | add32(TrustedImm32(1), AbsoluteAddress(&profile->m_counter)); | |
837 | addSlowCase(jump()); | |
838 | done.link(this); | |
839 | move(regT2, regT0); | |
840 | } else { | |
841 | addSlowCase(branchMul32(Overflow, regT1, regT0)); | |
842 | addSlowCase(branchTest32(Zero, regT0)); | |
843 | } | |
844 | #else | |
845 | addSlowCase(branchMul32(Overflow, regT1, regT0)); | |
846 | addSlowCase(branchTest32(Zero, regT0)); | |
847 | #endif | |
848 | } | |
849 | emitFastArithIntToImmNoCheck(regT0, regT0); | |
850 | } | |
851 | ||
852 | void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase) | |
853 | { | |
854 | // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset. | |
855 | COMPILE_ASSERT(((TagTypeNumber + DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0); | |
856 | ||
857 | Jump notImm1; | |
858 | Jump notImm2; | |
859 | if (op1HasImmediateIntFastCase) { | |
860 | notImm2 = getSlowCase(iter); | |
861 | } else if (op2HasImmediateIntFastCase) { | |
862 | notImm1 = getSlowCase(iter); | |
863 | } else { | |
864 | notImm1 = getSlowCase(iter); | |
865 | notImm2 = getSlowCase(iter); | |
866 | } | |
867 | ||
868 | linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare. | |
869 | if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number. | |
870 | linkSlowCase(iter); | |
871 | emitGetVirtualRegister(op1, regT0); | |
872 | ||
873 | Label stubFunctionCall(this); | |
874 | JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul); | |
875 | if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) { | |
876 | emitGetVirtualRegister(op1, regT0); | |
877 | emitGetVirtualRegister(op2, regT1); | |
878 | } | |
879 | stubCall.addArgument(regT0); | |
880 | stubCall.addArgument(regT1); | |
881 | stubCall.call(result); | |
882 | Jump end = jump(); | |
883 | ||
884 | if (op1HasImmediateIntFastCase) { | |
885 | notImm2.link(this); | |
886 | if (!types.second().definitelyIsNumber()) | |
887 | emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); | |
888 | emitGetVirtualRegister(op1, regT1); | |
889 | convertInt32ToDouble(regT1, fpRegT1); | |
890 | addPtr(tagTypeNumberRegister, regT0); | |
891 | movePtrToDouble(regT0, fpRegT2); | |
892 | } else if (op2HasImmediateIntFastCase) { | |
893 | notImm1.link(this); | |
894 | if (!types.first().definitelyIsNumber()) | |
895 | emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); | |
896 | emitGetVirtualRegister(op2, regT1); | |
897 | convertInt32ToDouble(regT1, fpRegT1); | |
898 | addPtr(tagTypeNumberRegister, regT0); | |
899 | movePtrToDouble(regT0, fpRegT2); | |
900 | } else { | |
901 | // if we get here, eax is not an int32, edx not yet checked. | |
902 | notImm1.link(this); | |
903 | if (!types.first().definitelyIsNumber()) | |
904 | emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); | |
905 | if (!types.second().definitelyIsNumber()) | |
906 | emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this); | |
907 | addPtr(tagTypeNumberRegister, regT0); | |
908 | movePtrToDouble(regT0, fpRegT1); | |
909 | Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1); | |
910 | convertInt32ToDouble(regT1, fpRegT2); | |
911 | Jump op2wasInteger = jump(); | |
912 | ||
913 | // if we get here, eax IS an int32, edx is not. | |
914 | notImm2.link(this); | |
915 | if (!types.second().definitelyIsNumber()) | |
916 | emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this); | |
917 | convertInt32ToDouble(regT0, fpRegT1); | |
918 | op2isDouble.link(this); | |
919 | addPtr(tagTypeNumberRegister, regT1); | |
920 | movePtrToDouble(regT1, fpRegT2); | |
921 | op2wasInteger.link(this); | |
922 | } | |
923 | ||
924 | if (opcodeID == op_add) | |
925 | addDouble(fpRegT2, fpRegT1); | |
926 | else if (opcodeID == op_sub) | |
927 | subDouble(fpRegT2, fpRegT1); | |
928 | else if (opcodeID == op_mul) | |
929 | mulDouble(fpRegT2, fpRegT1); | |
930 | else { | |
931 | ASSERT(opcodeID == op_div); | |
932 | divDouble(fpRegT2, fpRegT1); | |
933 | } | |
934 | moveDoubleToPtr(fpRegT1, regT0); | |
935 | subPtr(tagTypeNumberRegister, regT0); | |
936 | emitPutVirtualRegister(result, regT0); | |
937 | ||
938 | end.link(this); | |
939 | } | |
940 | ||
941 | void JIT::emit_op_add(Instruction* currentInstruction) | |
942 | { | |
943 | unsigned result = currentInstruction[1].u.operand; | |
944 | unsigned op1 = currentInstruction[2].u.operand; | |
945 | unsigned op2 = currentInstruction[3].u.operand; | |
946 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
947 | ||
948 | if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) { | |
949 | addSlowCase(); | |
950 | JITStubCall stubCall(this, cti_op_add); | |
951 | stubCall.addArgument(op1, regT2); | |
952 | stubCall.addArgument(op2, regT2); | |
953 | stubCall.call(result); | |
954 | return; | |
955 | } | |
956 | ||
957 | if (isOperandConstantImmediateInt(op1)) { | |
958 | emitGetVirtualRegister(op2, regT0); | |
959 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
960 | addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op1)), regT1)); | |
961 | emitFastArithIntToImmNoCheck(regT1, regT0); | |
962 | } else if (isOperandConstantImmediateInt(op2)) { | |
963 | emitGetVirtualRegister(op1, regT0); | |
964 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
965 | addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op2)), regT1)); | |
966 | emitFastArithIntToImmNoCheck(regT1, regT0); | |
967 | } else | |
968 | compileBinaryArithOp(op_add, result, op1, op2, types); | |
969 | ||
970 | emitPutVirtualRegister(result); | |
971 | } | |
972 | ||
973 | void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
974 | { | |
975 | unsigned result = currentInstruction[1].u.operand; | |
976 | unsigned op1 = currentInstruction[2].u.operand; | |
977 | unsigned op2 = currentInstruction[3].u.operand; | |
978 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
979 | ||
980 | if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) { | |
981 | linkDummySlowCase(iter); | |
982 | return; | |
983 | } | |
984 | ||
985 | bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1); | |
986 | bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2); | |
987 | compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase); | |
988 | } | |
989 | ||
990 | void JIT::emit_op_mul(Instruction* currentInstruction) | |
991 | { | |
992 | unsigned result = currentInstruction[1].u.operand; | |
993 | unsigned op1 = currentInstruction[2].u.operand; | |
994 | unsigned op2 = currentInstruction[3].u.operand; | |
995 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
996 | ||
997 | // For now, only plant a fast int case if the constant operand is greater than zero. | |
998 | int32_t value; | |
999 | if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) { | |
1000 | #if ENABLE(VALUE_PROFILER) | |
1001 | // Add a special fast case profile because the DFG JIT will expect one. | |
1002 | m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset); | |
1003 | #endif | |
1004 | emitGetVirtualRegister(op2, regT0); | |
1005 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
1006 | addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1)); | |
1007 | emitFastArithReTagImmediate(regT1, regT0); | |
1008 | } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) { | |
1009 | #if ENABLE(VALUE_PROFILER) | |
1010 | // Add a special fast case profile because the DFG JIT will expect one. | |
1011 | m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset); | |
1012 | #endif | |
1013 | emitGetVirtualRegister(op1, regT0); | |
1014 | emitJumpSlowCaseIfNotImmediateInteger(regT0); | |
1015 | addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1)); | |
1016 | emitFastArithReTagImmediate(regT1, regT0); | |
1017 | } else | |
1018 | compileBinaryArithOp(op_mul, result, op1, op2, types); | |
1019 | ||
1020 | emitPutVirtualRegister(result); | |
1021 | } | |
1022 | ||
1023 | void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
1024 | { | |
1025 | unsigned result = currentInstruction[1].u.operand; | |
1026 | unsigned op1 = currentInstruction[2].u.operand; | |
1027 | unsigned op2 = currentInstruction[3].u.operand; | |
1028 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
1029 | ||
1030 | bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0; | |
1031 | bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0; | |
1032 | compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase); | |
1033 | } | |
1034 | ||
1035 | void JIT::emit_op_div(Instruction* currentInstruction) | |
1036 | { | |
1037 | unsigned dst = currentInstruction[1].u.operand; | |
1038 | unsigned op1 = currentInstruction[2].u.operand; | |
1039 | unsigned op2 = currentInstruction[3].u.operand; | |
1040 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
1041 | ||
1042 | if (isOperandConstantImmediateDouble(op1)) { | |
1043 | emitGetVirtualRegister(op1, regT0); | |
1044 | addPtr(tagTypeNumberRegister, regT0); | |
1045 | movePtrToDouble(regT0, fpRegT0); | |
1046 | } else if (isOperandConstantImmediateInt(op1)) { | |
1047 | emitLoadInt32ToDouble(op1, fpRegT0); | |
1048 | } else { | |
1049 | emitGetVirtualRegister(op1, regT0); | |
1050 | if (!types.first().definitelyIsNumber()) | |
1051 | emitJumpSlowCaseIfNotImmediateNumber(regT0); | |
1052 | Jump notInt = emitJumpIfNotImmediateInteger(regT0); | |
1053 | convertInt32ToDouble(regT0, fpRegT0); | |
1054 | Jump skipDoubleLoad = jump(); | |
1055 | notInt.link(this); | |
1056 | addPtr(tagTypeNumberRegister, regT0); | |
1057 | movePtrToDouble(regT0, fpRegT0); | |
1058 | skipDoubleLoad.link(this); | |
1059 | } | |
1060 | ||
1061 | if (isOperandConstantImmediateDouble(op2)) { | |
1062 | emitGetVirtualRegister(op2, regT1); | |
1063 | addPtr(tagTypeNumberRegister, regT1); | |
1064 | movePtrToDouble(regT1, fpRegT1); | |
1065 | } else if (isOperandConstantImmediateInt(op2)) { | |
1066 | emitLoadInt32ToDouble(op2, fpRegT1); | |
1067 | } else { | |
1068 | emitGetVirtualRegister(op2, regT1); | |
1069 | if (!types.second().definitelyIsNumber()) | |
1070 | emitJumpSlowCaseIfNotImmediateNumber(regT1); | |
1071 | Jump notInt = emitJumpIfNotImmediateInteger(regT1); | |
1072 | convertInt32ToDouble(regT1, fpRegT1); | |
1073 | Jump skipDoubleLoad = jump(); | |
1074 | notInt.link(this); | |
1075 | addPtr(tagTypeNumberRegister, regT1); | |
1076 | movePtrToDouble(regT1, fpRegT1); | |
1077 | skipDoubleLoad.link(this); | |
1078 | } | |
1079 | divDouble(fpRegT1, fpRegT0); | |
1080 | ||
1081 | #if ENABLE(VALUE_PROFILER) | |
1082 | // Is the result actually an integer? The DFG JIT would really like to know. If it's | |
1083 | // not an integer, we increment a count. If this together with the slow case counter | |
1084 | // are below threshold then the DFG JIT will compile this division with a specualtion | |
1085 | // that the remainder is zero. | |
1086 | ||
1087 | // As well, there are cases where a double result here would cause an important field | |
1088 | // in the heap to sometimes have doubles in it, resulting in double predictions getting | |
1089 | // propagated to a use site where it might cause damage (such as the index to an array | |
1090 | // access). So if we are DFG compiling anything in the program, we want this code to | |
1091 | // ensure that it produces integers whenever possible. | |
1092 | ||
1093 | // FIXME: This will fail to convert to integer if the result is zero. We should | |
1094 | // distinguish between positive zero and negative zero here. | |
1095 | ||
1096 | JumpList notInteger; | |
1097 | branchConvertDoubleToInt32(fpRegT0, regT0, notInteger, fpRegT1); | |
1098 | // If we've got an integer, we might as well make that the result of the division. | |
1099 | emitFastArithReTagImmediate(regT0, regT0); | |
1100 | Jump isInteger = jump(); | |
1101 | notInteger.link(this); | |
1102 | add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter)); | |
1103 | moveDoubleToPtr(fpRegT0, regT0); | |
1104 | subPtr(tagTypeNumberRegister, regT0); | |
1105 | isInteger.link(this); | |
1106 | #else | |
1107 | // Double result. | |
1108 | moveDoubleToPtr(fpRegT0, regT0); | |
1109 | subPtr(tagTypeNumberRegister, regT0); | |
1110 | #endif | |
1111 | ||
1112 | emitPutVirtualRegister(dst, regT0); | |
1113 | } | |
1114 | ||
1115 | void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
1116 | { | |
1117 | unsigned result = currentInstruction[1].u.operand; | |
1118 | unsigned op1 = currentInstruction[2].u.operand; | |
1119 | unsigned op2 = currentInstruction[3].u.operand; | |
1120 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
1121 | if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) { | |
1122 | #ifndef NDEBUG | |
1123 | breakpoint(); | |
1124 | #endif | |
1125 | return; | |
1126 | } | |
1127 | if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) { | |
1128 | if (!types.first().definitelyIsNumber()) | |
1129 | linkSlowCase(iter); | |
1130 | } | |
1131 | if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) { | |
1132 | if (!types.second().definitelyIsNumber()) | |
1133 | linkSlowCase(iter); | |
1134 | } | |
1135 | // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0. | |
1136 | JITStubCall stubCall(this, cti_op_div); | |
1137 | stubCall.addArgument(op1, regT2); | |
1138 | stubCall.addArgument(op2, regT2); | |
1139 | stubCall.call(result); | |
1140 | } | |
1141 | ||
1142 | void JIT::emit_op_sub(Instruction* currentInstruction) | |
1143 | { | |
1144 | unsigned result = currentInstruction[1].u.operand; | |
1145 | unsigned op1 = currentInstruction[2].u.operand; | |
1146 | unsigned op2 = currentInstruction[3].u.operand; | |
1147 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
1148 | ||
1149 | compileBinaryArithOp(op_sub, result, op1, op2, types); | |
1150 | emitPutVirtualRegister(result); | |
1151 | } | |
1152 | ||
1153 | void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) | |
1154 | { | |
1155 | unsigned result = currentInstruction[1].u.operand; | |
1156 | unsigned op1 = currentInstruction[2].u.operand; | |
1157 | unsigned op2 = currentInstruction[3].u.operand; | |
1158 | OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); | |
1159 | ||
1160 | compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false); | |
1161 | } | |
1162 | ||
1163 | /* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */ | |
1164 | ||
1165 | #endif // USE(JSVALUE64) | |
1166 | ||
1167 | } // namespace JSC | |
1168 | ||
1169 | #endif // ENABLE(JIT) |